linux_dsm_epyc7002/include/crypto/cbc.h
Kees Cook d73d67fbcb crypto: cbc - Remove VLA usage
In the quest to remove all stack VLA usage from the kernel[1], this
uses the upper bounds on blocksize. Since this is always a cipher
blocksize, use the existing cipher max blocksize.

[1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com

Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-09-04 11:35:03 +08:00

147 lines
3.5 KiB
C

/*
* CBC: Cipher Block Chaining mode
*
* Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_CBC_H
#define _CRYPTO_CBC_H
#include <crypto/internal/skcipher.h>
#include <linux/string.h>
#include <linux/types.h>
static inline int crypto_cbc_encrypt_segment(
struct skcipher_walk *walk, struct crypto_skcipher *tfm,
void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
crypto_xor(iv, src, bsize);
fn(tfm, iv, dst);
memcpy(iv, dst, bsize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static inline int crypto_cbc_encrypt_inplace(
struct skcipher_walk *walk, struct crypto_skcipher *tfm,
void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
do {
crypto_xor(src, iv, bsize);
fn(tfm, src, src);
iv = src;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
void (*fn)(struct crypto_skcipher *,
const u8 *, u8 *))
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
if (walk.src.virt.addr == walk.dst.virt.addr)
err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
else
err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
err = skcipher_walk_done(&walk, err);
}
return err;
}
static inline int crypto_cbc_decrypt_segment(
struct skcipher_walk *walk, struct crypto_skcipher *tfm,
void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
fn(tfm, src, dst);
crypto_xor(dst, iv, bsize);
iv = src;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static inline int crypto_cbc_decrypt_inplace(
struct skcipher_walk *walk, struct crypto_skcipher *tfm,
void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 last_iv[MAX_CIPHER_BLOCKSIZE];
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
memcpy(last_iv, src, bsize);
for (;;) {
fn(tfm, src, src);
if ((nbytes -= bsize) < bsize)
break;
crypto_xor(src, src - bsize, bsize);
src -= bsize;
}
crypto_xor(src, walk->iv, bsize);
memcpy(walk->iv, last_iv, bsize);
return nbytes;
}
static inline int crypto_cbc_decrypt_blocks(
struct skcipher_walk *walk, struct crypto_skcipher *tfm,
void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
if (walk->src.virt.addr == walk->dst.virt.addr)
return crypto_cbc_decrypt_inplace(walk, tfm, fn);
else
return crypto_cbc_decrypt_segment(walk, tfm, fn);
}
#endif /* _CRYPTO_CBC_H */