mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 09:00:52 +07:00
[CRYPTO] blkcipher: Remove alignment restriction on block size
Previously we assumed for convenience that the block size is a multiple of the algorithm's required alignment. With the pending addition of CTR this will no longer be the case as the block size will be 1 due to it being a stream cipher. However, the alignment requirement will be that of the underlying implementation which will most likely be greater than 1. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
e4c5c6c9b0
commit
70613783fc
@ -63,9 +63,6 @@ static int crypto_check_alg(struct crypto_alg *alg)
|
||||
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
|
||||
return -EINVAL;
|
||||
|
||||
if (alg->cra_alignmask & alg->cra_blocksize)
|
||||
return -EINVAL;
|
||||
|
||||
if (alg->cra_blocksize > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -149,6 +149,7 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
|
||||
unsigned int alignmask)
|
||||
{
|
||||
unsigned int n;
|
||||
unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
|
||||
|
||||
if (walk->buffer)
|
||||
goto ok;
|
||||
@ -167,8 +168,8 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
|
||||
walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
|
||||
alignmask + 1);
|
||||
walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
|
||||
walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize,
|
||||
bsize);
|
||||
walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
|
||||
aligned_bsize, bsize);
|
||||
|
||||
scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
|
||||
|
||||
@ -278,7 +279,9 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
|
||||
{
|
||||
unsigned bs = crypto_blkcipher_blocksize(tfm);
|
||||
unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
|
||||
unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1);
|
||||
unsigned aligned_bs = ALIGN(bs, alignmask + 1);
|
||||
unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
|
||||
(alignmask + 1);
|
||||
u8 *iv;
|
||||
|
||||
size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
|
||||
@ -287,8 +290,8 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
|
||||
return -ENOMEM;
|
||||
|
||||
iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
|
||||
iv = blkcipher_get_spot(iv, bs) + bs;
|
||||
iv = blkcipher_get_spot(iv, bs) + bs;
|
||||
iv = blkcipher_get_spot(iv, bs) + aligned_bs;
|
||||
iv = blkcipher_get_spot(iv, bs) + aligned_bs;
|
||||
iv = blkcipher_get_spot(iv, ivsize);
|
||||
|
||||
walk->iv = memcpy(iv, walk->iv, ivsize);
|
||||
|
Loading…
Reference in New Issue
Block a user