crypto: riscv/sha256 - implement library instead of shash

Instead of providing crypto_shash algorithms for the arch-optimized
SHA-256 code, instead implement the SHA-256 library.  This is much
simpler, it makes the SHA-256 library functions be arch-optimized, and
it fixes the longstanding issue where the arch-optimized SHA-256 was
disabled by default.  SHA-256 still remains available through
crypto_shash, but individual architectures no longer need to handle it.

To match sha256_blocks_arch(), change the type of the nblocks parameter
of the assembly function from int to size_t.  The assembly function
actually already treated it as size_t.

Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Eric Biggers
2025-04-28 10:00:32 -07:00
committed by Herbert Xu
parent 1a49c573bf
commit bf52d93865
7 changed files with 74 additions and 141 deletions

View File

@@ -28,17 +28,6 @@ config CRYPTO_GHASH_RISCV64
Architecture: riscv64 using:
- Zvkg vector crypto extension
config CRYPTO_SHA256_RISCV64
tristate "Hash functions: SHA-224 and SHA-256"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
select CRYPTO_SHA256
help
SHA-224 and SHA-256 secure hash algorithm (FIPS 180)
Architecture: riscv64 using:
- Zvknha or Zvknhb vector crypto extensions
- Zvkb vector crypto extension
config CRYPTO_SHA512_RISCV64
tristate "Hash functions: SHA-384 and SHA-512"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO

View File

@@ -7,9 +7,6 @@ aes-riscv64-y := aes-riscv64-glue.o aes-riscv64-zvkned.o \
obj-$(CONFIG_CRYPTO_GHASH_RISCV64) += ghash-riscv64.o
ghash-riscv64-y := ghash-riscv64-glue.o ghash-riscv64-zvkg.o
obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o
sha256-riscv64-y := sha256-riscv64-glue.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o
obj-$(CONFIG_CRYPTO_SHA512_RISCV64) += sha512-riscv64.o
sha512-riscv64-y := sha512-riscv64-glue.o sha512-riscv64-zvknhb-zvkb.o

View File

@@ -1,125 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SHA-256 and SHA-224 using the RISC-V vector crypto extensions
*
* Copyright (C) 2022 VRULL GmbH
* Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
*
* Copyright (C) 2023 SiFive, Inc.
* Author: Jerry Shih <jerry.shih@sifive.com>
*/
#include <asm/simd.h>
#include <asm/vector.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha256_base.h>
#include <linux/kernel.h>
#include <linux/module.h>
/*
* Note: the asm function only uses the 'state' field of struct sha256_state.
* It is assumed to be the first field.
*/
asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb(
struct crypto_sha256_state *state, const u8 *data, int num_blocks);
static void sha256_block(struct crypto_sha256_state *state, const u8 *data,
int num_blocks)
{
/*
* Ensure struct crypto_sha256_state begins directly with the SHA-256
* 256-bit internal state, as this is what the asm function expects.
*/
BUILD_BUG_ON(offsetof(struct crypto_sha256_state, state) != 0);
if (crypto_simd_usable()) {
kernel_vector_begin();
sha256_transform_zvknha_or_zvknhb_zvkb(state, data, num_blocks);
kernel_vector_end();
} else
sha256_transform_blocks(state, data, num_blocks);
}
static int riscv64_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha256_base_do_update_blocks(desc, data, len, sha256_block);
}
static int riscv64_sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
sha256_base_do_finup(desc, data, len, sha256_block);
return sha256_base_finish(desc, out);
}
static int riscv64_sha256_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return sha256_base_init(desc) ?:
riscv64_sha256_finup(desc, data, len, out);
}
static struct shash_alg riscv64_sha256_algs[] = {
{
.init = sha256_base_init,
.update = riscv64_sha256_update,
.finup = riscv64_sha256_finup,
.digest = riscv64_sha256_digest,
.descsize = sizeof(struct crypto_sha256_state),
.digestsize = SHA256_DIGEST_SIZE,
.base = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_priority = 300,
.cra_name = "sha256",
.cra_driver_name = "sha256-riscv64-zvknha_or_zvknhb-zvkb",
.cra_module = THIS_MODULE,
},
}, {
.init = sha224_base_init,
.update = riscv64_sha256_update,
.finup = riscv64_sha256_finup,
.descsize = sizeof(struct crypto_sha256_state),
.digestsize = SHA224_DIGEST_SIZE,
.base = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_priority = 300,
.cra_name = "sha224",
.cra_driver_name = "sha224-riscv64-zvknha_or_zvknhb-zvkb",
.cra_module = THIS_MODULE,
},
},
};
static int __init riscv64_sha256_mod_init(void)
{
/* Both zvknha and zvknhb provide the SHA-256 instructions. */
if ((riscv_isa_extension_available(NULL, ZVKNHA) ||
riscv_isa_extension_available(NULL, ZVKNHB)) &&
riscv_isa_extension_available(NULL, ZVKB) &&
riscv_vector_vlen() >= 128)
return crypto_register_shashes(riscv64_sha256_algs,
ARRAY_SIZE(riscv64_sha256_algs));
return -ENODEV;
}
static void __exit riscv64_sha256_mod_exit(void)
{
crypto_unregister_shashes(riscv64_sha256_algs,
ARRAY_SIZE(riscv64_sha256_algs));
}
module_init(riscv64_sha256_mod_init);
module_exit(riscv64_sha256_mod_exit);
MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)");
MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("sha256");
MODULE_ALIAS_CRYPTO("sha224");

View File

@@ -6,3 +6,10 @@ config CRYPTO_CHACHA_RISCV64
default CRYPTO_LIB_CHACHA
select CRYPTO_ARCH_HAVE_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC
config CRYPTO_SHA256_RISCV64
tristate
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
default CRYPTO_LIB_SHA256
select CRYPTO_ARCH_HAVE_LIB_SHA256
select CRYPTO_LIB_SHA256_GENERIC

View File

@@ -2,3 +2,6 @@
obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o
chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o
obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o
sha256-riscv64-y := sha256.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o

View File

@@ -106,8 +106,8 @@
sha256_4rounds \last, \k3, W3, W0, W1, W2
.endm
// void sha256_transform_zvknha_or_zvknhb_zvkb(u32 state[8], const u8 *data,
// int num_blocks);
// void sha256_transform_zvknha_or_zvknhb_zvkb(u32 state[SHA256_STATE_WORDS],
// const u8 *data, size_t nblocks);
SYM_FUNC_START(sha256_transform_zvknha_or_zvknhb_zvkb)
// Load the round constants into K0-K15.

View File

@@ -0,0 +1,62 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SHA-256 (RISC-V accelerated)
*
* Copyright (C) 2022 VRULL GmbH
* Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
*
* Copyright (C) 2023 SiFive, Inc.
* Author: Jerry Shih <jerry.shih@sifive.com>
*/
#include <asm/simd.h>
#include <asm/vector.h>
#include <crypto/internal/sha2.h>
#include <crypto/internal/simd.h>
#include <linux/kernel.h>
#include <linux/module.h>
asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb(
u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_extensions);
void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks)
{
if (static_branch_likely(&have_extensions) && crypto_simd_usable()) {
kernel_vector_begin();
sha256_transform_zvknha_or_zvknhb_zvkb(state, data, nblocks);
kernel_vector_end();
} else {
sha256_blocks_generic(state, data, nblocks);
}
}
EXPORT_SYMBOL(sha256_blocks_arch);
bool sha256_is_arch_optimized(void)
{
return static_key_enabled(&have_extensions);
}
EXPORT_SYMBOL(sha256_is_arch_optimized);
static int __init riscv64_sha256_mod_init(void)
{
/* Both zvknha and zvknhb provide the SHA-256 instructions. */
if ((riscv_isa_extension_available(NULL, ZVKNHA) ||
riscv_isa_extension_available(NULL, ZVKNHB)) &&
riscv_isa_extension_available(NULL, ZVKB) &&
riscv_vector_vlen() >= 128)
static_branch_enable(&have_extensions);
return 0;
}
arch_initcall(riscv64_sha256_mod_init);
static void __exit riscv64_sha256_mod_exit(void)
{
}
module_exit(riscv64_sha256_mod_exit);
MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)");
MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
MODULE_LICENSE("GPL");