@@ -18,13 +18,13 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");
-asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
- unsigned int num_blks);
+asmlinkage void sha512_blocks_arch(u64 *digest, const void *data,
+ unsigned int num_blks);
static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src,
int blocks)
{
- sha512_block_data_order(sst->state, src, blocks);
+ sha512_blocks_arch(sst->state, src, blocks);
}
static int sha512_update(struct shash_desc *desc, const u8 *data,
@@ -17,3 +17,4 @@ config CRYPTO_SHA256_ARM64
tristate
default CRYPTO_LIB_SHA256
select CRYPTO_ARCH_HAVE_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
@@ -95,7 +95,7 @@ if ($output =~ /512/) {
$reg_t="w";
}
-$func="sha${BITS}_block_data_order";
+$func="sha${BITS}_blocks_arch";
($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
@@ -6,12 +6,12 @@
*/
#include <asm/neon.h>
#include <crypto/internal/sha2.h>
-#include <crypto/internal/simd.h>
#include <linux/kernel.h>
#include <linux/module.h>
-asmlinkage void sha256_block_data_order(u32 state[SHA256_STATE_WORDS],
- const u8 *data, size_t nblocks);
+asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
asmlinkage void sha256_block_neon(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks);
asmlinkage size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
@@ -20,11 +20,11 @@ asmlinkage size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
-void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
- static_branch_likely(&have_neon) && crypto_simd_usable()) {
+ static_branch_likely(&have_neon)) {
if (static_branch_likely(&have_ce)) {
do {
size_t rem;
@@ -42,10 +42,10 @@ void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
kernel_neon_end();
}
} else {
- sha256_block_data_order(state, data, nblocks);
+ sha256_blocks_arch(state, data, nblocks);
}
}
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+EXPORT_SYMBOL_GPL(sha256_blocks_simd);
bool sha256_is_arch_optimized(void)
{
Add CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD and a SIMD block function so that the caller can decide whether to use SIMD. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> --- arch/arm64/crypto/sha512-glue.c | 6 +++--- arch/arm64/lib/crypto/Kconfig | 1 + arch/arm64/lib/crypto/sha2-armv8.pl | 2 +- arch/arm64/lib/crypto/sha256.c | 14 +++++++------- 4 files changed, 12 insertions(+), 11 deletions(-)