mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 07:20:53 +07:00
crypto: arm64/aes-blk - add a non-SIMD fallback for synchronous CTR
To accommodate systems that may disallow use of the NEON in kernel mode in some circumstances, introduce a C fallback for synchronous AES in CTR mode, and use it if may_use_simd() returns false. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
5092fcf349
commit
e211506979
@ -64,15 +64,17 @@ config CRYPTO_AES_ARM64_CE_CCM
|
||||
|
||||
config CRYPTO_AES_ARM64_CE_BLK
|
||||
tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
|
||||
depends on ARM64 && KERNEL_MODE_NEON
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_AES_ARM64_CE
|
||||
select CRYPTO_AES_ARM64
|
||||
select CRYPTO_SIMD
|
||||
|
||||
config CRYPTO_AES_ARM64_NEON_BLK
|
||||
tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
|
||||
depends on ARM64 && KERNEL_MODE_NEON
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_AES_ARM64
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_SIMD
|
||||
|
||||
|
53
arch/arm64/crypto/aes-ctr-fallback.h
Normal file
53
arch/arm64/crypto/aes-ctr-fallback.h
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Fallback for sync aes(ctr) in contexts where kernel mode NEON
|
||||
* is not allowed
|
||||
*
|
||||
* Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
|
||||
asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
|
||||
|
||||
static inline int aes_ctr_encrypt_fallback(struct crypto_aes_ctx *ctx,
|
||||
struct skcipher_request *req)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
int nbytes = walk.nbytes;
|
||||
int tail = 0;
|
||||
|
||||
if (nbytes < walk.total) {
|
||||
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
|
||||
tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
do {
|
||||
int bsize = min(nbytes, AES_BLOCK_SIZE);
|
||||
|
||||
__aes_arm64_encrypt(ctx->key_enc, buf, walk.iv,
|
||||
6 + ctx->key_length / 4);
|
||||
crypto_xor_cpy(dst, src, buf, bsize);
|
||||
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
||||
|
||||
dst += AES_BLOCK_SIZE;
|
||||
src += AES_BLOCK_SIZE;
|
||||
nbytes -= AES_BLOCK_SIZE;
|
||||
} while (nbytes > 0);
|
||||
|
||||
err = skcipher_walk_done(&walk, tail);
|
||||
}
|
||||
return err;
|
||||
}
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
@ -19,6 +20,7 @@
|
||||
#include <crypto/xts.h>
|
||||
|
||||
#include "aes-ce-setkey.h"
|
||||
#include "aes-ctr-fallback.h"
|
||||
|
||||
#ifdef USE_V8_CRYPTO_EXTENSIONS
|
||||
#define MODE "ce"
|
||||
@ -249,6 +251,17 @@ static int ctr_encrypt(struct skcipher_request *req)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ctr_encrypt_sync(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (!may_use_simd())
|
||||
return aes_ctr_encrypt_fallback(ctx, req);
|
||||
|
||||
return ctr_encrypt(req);
|
||||
}
|
||||
|
||||
static int xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
@ -355,8 +368,8 @@ static struct skcipher_alg aes_algs[] = { {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
.setkey = skcipher_aes_setkey,
|
||||
.encrypt = ctr_encrypt,
|
||||
.decrypt = ctr_encrypt,
|
||||
.encrypt = ctr_encrypt_sync,
|
||||
.decrypt = ctr_encrypt_sync,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__xts(aes)",
|
||||
@ -458,11 +471,35 @@ static int mac_init(struct shash_desc *desc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
|
||||
u8 dg[], int enc_before, int enc_after)
|
||||
{
|
||||
int rounds = 6 + ctx->key_length / 4;
|
||||
|
||||
if (may_use_simd()) {
|
||||
kernel_neon_begin();
|
||||
aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
|
||||
enc_after);
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
if (enc_before)
|
||||
__aes_arm64_encrypt(ctx->key_enc, dg, dg, rounds);
|
||||
|
||||
while (blocks--) {
|
||||
crypto_xor(dg, in, AES_BLOCK_SIZE);
|
||||
in += AES_BLOCK_SIZE;
|
||||
|
||||
if (blocks || enc_after)
|
||||
__aes_arm64_encrypt(ctx->key_enc, dg, dg,
|
||||
rounds);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
|
||||
{
|
||||
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
int rounds = 6 + tctx->key.key_length / 4;
|
||||
|
||||
while (len > 0) {
|
||||
unsigned int l;
|
||||
@ -474,10 +511,8 @@ static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
|
||||
|
||||
len %= AES_BLOCK_SIZE;
|
||||
|
||||
kernel_neon_begin();
|
||||
aes_mac_update(p, tctx->key.key_enc, rounds, blocks,
|
||||
ctx->dg, (ctx->len != 0), (len != 0));
|
||||
kernel_neon_end();
|
||||
mac_do_update(&tctx->key, p, blocks, ctx->dg,
|
||||
(ctx->len != 0), (len != 0));
|
||||
|
||||
p += blocks * AES_BLOCK_SIZE;
|
||||
|
||||
@ -505,11 +540,8 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
int rounds = 6 + tctx->key.key_length / 4;
|
||||
|
||||
kernel_neon_begin();
|
||||
aes_mac_update(NULL, tctx->key.key_enc, rounds, 0, ctx->dg, 1, 0);
|
||||
kernel_neon_end();
|
||||
mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0);
|
||||
|
||||
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
|
||||
|
||||
@ -520,7 +552,6 @@ static int cmac_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
int rounds = 6 + tctx->key.key_length / 4;
|
||||
u8 *consts = tctx->consts;
|
||||
|
||||
if (ctx->len != AES_BLOCK_SIZE) {
|
||||
@ -528,9 +559,7 @@ static int cmac_final(struct shash_desc *desc, u8 *out)
|
||||
consts += AES_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
kernel_neon_begin();
|
||||
aes_mac_update(consts, tctx->key.key_enc, rounds, 1, ctx->dg, 0, 1);
|
||||
kernel_neon_end();
|
||||
mac_do_update(&tctx->key, consts, 1, ctx->dg, 0, 1);
|
||||
|
||||
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user