mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 18:55:08 +07:00
cfcd2271a9
The mv_cesa_queue_req() function calls crypto_enqueue_request() to
enqueue a request. In the normal case (i.e the queue isn't full), this
function returns -EINPROGRESS. The current Marvell CESA crypto driver
takes this into account and cleans up the request only if an error
occured, i.e if the return value is not -EINPROGRESS.
Unfortunately this causes problems with
CRYPTO_TFM_REQ_MAY_BACKLOG-flagged requests. When such a request is
passed to crypto_enqueue_request() and the queue is full,
crypto_enqueue_request() will return -EBUSY, but will keep the request
enqueued nonetheless. This situation was not properly handled by the
Marvell CESA driver, which was anyway cleaning up the request in such
a situation. When later on the request was taken out of the backlog
and actually processed, a kernel crash occured due to the internal
driver data structures for this structure having been cleaned up.
To avoid this situation, this commit adds a
mv_cesa_req_needs_cleanup() helper function which indicates if the
request needs to be cleaned up or not after a call to
crypto_enqueue_request(). This helper allows to do the cleanup only in
the appropriate cases, and all call sites of mv_cesa_queue_req() are
fixed to use this new helper function.
Reported-by: Vincent Donnefort <vdonnefort@gmail.com>
Fixes: db509a4533
("crypto: marvell/cesa - add TDMA support")
Cc: <stable@vger.kernel.org> # v4.2+
Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Tested-by: Vincent Donnefort <vdonnefort@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
797 lines
21 KiB
C
797 lines
21 KiB
C
/*
|
|
* Cipher algorithms supported by the CESA: DES, 3DES and AES.
|
|
*
|
|
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
|
|
* Author: Arnaud Ebalard <arno@natisbad.org>
|
|
*
|
|
* This work is based on an initial version written by
|
|
* Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License version 2 as published
|
|
* by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <crypto/aes.h>
|
|
#include <crypto/des.h>
|
|
|
|
#include "cesa.h"
|
|
|
|
struct mv_cesa_des_ctx {
|
|
struct mv_cesa_ctx base;
|
|
u8 key[DES_KEY_SIZE];
|
|
};
|
|
|
|
struct mv_cesa_des3_ctx {
|
|
struct mv_cesa_ctx base;
|
|
u8 key[DES3_EDE_KEY_SIZE];
|
|
};
|
|
|
|
struct mv_cesa_aes_ctx {
|
|
struct mv_cesa_ctx base;
|
|
struct crypto_aes_ctx aes;
|
|
};
|
|
|
|
struct mv_cesa_ablkcipher_dma_iter {
|
|
struct mv_cesa_dma_iter base;
|
|
struct mv_cesa_sg_dma_iter src;
|
|
struct mv_cesa_sg_dma_iter dst;
|
|
};
|
|
|
|
static inline void
|
|
mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
|
|
struct ablkcipher_request *req)
|
|
{
|
|
mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
|
|
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
|
|
mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
|
|
}
|
|
|
|
static inline bool
|
|
mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
|
|
{
|
|
iter->src.op_offset = 0;
|
|
iter->dst.op_offset = 0;
|
|
|
|
return mv_cesa_req_dma_iter_next_op(&iter->base);
|
|
}
|
|
|
|
static inline void
|
|
mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
|
|
|
if (req->dst != req->src) {
|
|
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
|
|
DMA_FROM_DEVICE);
|
|
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
|
|
DMA_TO_DEVICE);
|
|
} else {
|
|
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
|
|
DMA_BIDIRECTIONAL);
|
|
}
|
|
mv_cesa_dma_cleanup(&creq->req.dma);
|
|
}
|
|
|
|
static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
|
|
|
if (creq->req.base.type == CESA_DMA_REQ)
|
|
mv_cesa_ablkcipher_dma_cleanup(req);
|
|
}
|
|
|
|
static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
|
struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
|
|
struct mv_cesa_engine *engine = sreq->base.engine;
|
|
size_t len = min_t(size_t, req->nbytes - sreq->offset,
|
|
CESA_SA_SRAM_PAYLOAD_SIZE);
|
|
|
|
len = sg_pcopy_to_buffer(req->src, creq->src_nents,
|
|
engine->sram + CESA_SA_DATA_SRAM_OFFSET,
|
|
len, sreq->offset);
|
|
|
|
sreq->size = len;
|
|
mv_cesa_set_crypt_op_len(&sreq->op, len);
|
|
|
|
/* FIXME: only update enc_len field */
|
|
if (!sreq->skip_ctx) {
|
|
memcpy(engine->sram, &sreq->op, sizeof(sreq->op));
|
|
sreq->skip_ctx = true;
|
|
} else {
|
|
memcpy(engine->sram, &sreq->op, sizeof(sreq->op.desc));
|
|
}
|
|
|
|
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
|
|
writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
|
|
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
|
|
}
|
|
|
|
static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
|
|
u32 status)
|
|
{
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
|
struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
|
|
struct mv_cesa_engine *engine = sreq->base.engine;
|
|
size_t len;
|
|
|
|
len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
|
|
engine->sram + CESA_SA_DATA_SRAM_OFFSET,
|
|
sreq->size, sreq->offset);
|
|
|
|
sreq->offset += len;
|
|
if (sreq->offset < req->nbytes)
|
|
return -EINPROGRESS;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
|
|
u32 status)
|
|
{
|
|
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
|
|
struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
|
|
struct mv_cesa_engine *engine = sreq->base.engine;
|
|
int ret;
|
|
|
|
if (creq->req.base.type == CESA_DMA_REQ)
|
|
ret = mv_cesa_dma_process(&creq->req.dma, status);
|
|
else
|
|
ret = mv_cesa_ablkcipher_std_process(ablkreq, status);
|
|
|
|
if (ret)
|
|
return ret;
|
|
|
|
memcpy(ablkreq->info, engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
|
|
crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
|
|
{
|
|
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
|
|
|
|
if (creq->req.base.type == CESA_DMA_REQ)
|
|
mv_cesa_dma_step(&creq->req.dma);
|
|
else
|
|
mv_cesa_ablkcipher_std_step(ablkreq);
|
|
}
|
|
|
|
static inline void
|
|
mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
|
struct mv_cesa_tdma_req *dreq = &creq->req.dma;
|
|
|
|
mv_cesa_dma_prepare(dreq, dreq->base.engine);
|
|
}
|
|
|
|
static inline void
|
|
mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
|
struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
|
|
struct mv_cesa_engine *engine = sreq->base.engine;
|
|
|
|
sreq->size = 0;
|
|
sreq->offset = 0;
|
|
mv_cesa_adjust_op(engine, &sreq->op);
|
|
memcpy(engine->sram, &sreq->op, sizeof(sreq->op));
|
|
}
|
|
|
|
static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
|
|
struct mv_cesa_engine *engine)
|
|
{
|
|
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
|
|
creq->req.base.engine = engine;
|
|
|
|
if (creq->req.base.type == CESA_DMA_REQ)
|
|
mv_cesa_ablkcipher_dma_prepare(ablkreq);
|
|
else
|
|
mv_cesa_ablkcipher_std_prepare(ablkreq);
|
|
}
|
|
|
|
static inline void
|
|
mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
|
|
{
|
|
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
|
|
|
mv_cesa_ablkcipher_cleanup(ablkreq);
|
|
}
|
|
|
|
static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
|
|
.step = mv_cesa_ablkcipher_step,
|
|
.process = mv_cesa_ablkcipher_process,
|
|
.prepare = mv_cesa_ablkcipher_prepare,
|
|
.cleanup = mv_cesa_ablkcipher_req_cleanup,
|
|
};
|
|
|
|
static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
|
|
{
|
|
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
|
|
|
|
tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
|
unsigned int len)
|
|
{
|
|
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
|
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
int remaining;
|
|
int offset;
|
|
int ret;
|
|
int i;
|
|
|
|
ret = crypto_aes_expand_key(&ctx->aes, key, len);
|
|
if (ret) {
|
|
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
return ret;
|
|
}
|
|
|
|
remaining = (ctx->aes.key_length - 16) / 4;
|
|
offset = ctx->aes.key_length + 24 - remaining;
|
|
for (i = 0; i < remaining; i++)
|
|
ctx->aes.key_dec[4 + i] =
|
|
cpu_to_le32(ctx->aes.key_enc[offset + i]);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
|
unsigned int len)
|
|
{
|
|
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
|
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
u32 tmp[DES_EXPKEY_WORDS];
|
|
int ret;
|
|
|
|
if (len != DES_KEY_SIZE) {
|
|
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
return -EINVAL;
|
|
}
|
|
|
|
ret = des_ekey(tmp, key);
|
|
if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
|
|
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
|
return -EINVAL;
|
|
}
|
|
|
|
memcpy(ctx->key, key, DES_KEY_SIZE);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
|
|
const u8 *key, unsigned int len)
|
|
{
|
|
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
|
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
if (len != DES3_EDE_KEY_SIZE) {
|
|
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
return -EINVAL;
|
|
}
|
|
|
|
memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
|
|
const struct mv_cesa_op_ctx *op_templ)
|
|
{
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
|
GFP_KERNEL : GFP_ATOMIC;
|
|
struct mv_cesa_tdma_req *dreq = &creq->req.dma;
|
|
struct mv_cesa_ablkcipher_dma_iter iter;
|
|
struct mv_cesa_tdma_chain chain;
|
|
bool skip_ctx = false;
|
|
int ret;
|
|
|
|
dreq->base.type = CESA_DMA_REQ;
|
|
dreq->chain.first = NULL;
|
|
dreq->chain.last = NULL;
|
|
|
|
if (req->src != req->dst) {
|
|
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
|
|
DMA_TO_DEVICE);
|
|
if (!ret)
|
|
return -ENOMEM;
|
|
|
|
ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
|
|
DMA_FROM_DEVICE);
|
|
if (!ret) {
|
|
ret = -ENOMEM;
|
|
goto err_unmap_src;
|
|
}
|
|
} else {
|
|
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
|
|
DMA_BIDIRECTIONAL);
|
|
if (!ret)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
mv_cesa_tdma_desc_iter_init(&chain);
|
|
mv_cesa_ablkcipher_req_iter_init(&iter, req);
|
|
|
|
do {
|
|
struct mv_cesa_op_ctx *op;
|
|
|
|
op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags);
|
|
if (IS_ERR(op)) {
|
|
ret = PTR_ERR(op);
|
|
goto err_free_tdma;
|
|
}
|
|
skip_ctx = true;
|
|
|
|
mv_cesa_set_crypt_op_len(op, iter.base.op_len);
|
|
|
|
/* Add input transfers */
|
|
ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
|
|
&iter.src, flags);
|
|
if (ret)
|
|
goto err_free_tdma;
|
|
|
|
/* Add dummy desc to launch the crypto operation */
|
|
ret = mv_cesa_dma_add_dummy_launch(&chain, flags);
|
|
if (ret)
|
|
goto err_free_tdma;
|
|
|
|
/* Add output transfers */
|
|
ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
|
|
&iter.dst, flags);
|
|
if (ret)
|
|
goto err_free_tdma;
|
|
|
|
} while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
|
|
|
|
dreq->chain = chain;
|
|
|
|
return 0;
|
|
|
|
err_free_tdma:
|
|
mv_cesa_dma_cleanup(dreq);
|
|
if (req->dst != req->src)
|
|
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
|
|
DMA_FROM_DEVICE);
|
|
|
|
err_unmap_src:
|
|
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
|
|
req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline int
|
|
mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
|
|
const struct mv_cesa_op_ctx *op_templ)
|
|
{
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
|
struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
|
|
|
|
sreq->base.type = CESA_STD_REQ;
|
|
sreq->op = *op_templ;
|
|
sreq->skip_ctx = false;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
|
|
struct mv_cesa_op_ctx *tmpl)
|
|
{
|
|
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
|
|
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
|
unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
|
|
int ret;
|
|
|
|
if (!IS_ALIGNED(req->nbytes, blksize))
|
|
return -EINVAL;
|
|
|
|
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
|
|
creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
|
|
|
|
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
|
|
CESA_SA_DESC_CFG_OP_MSK);
|
|
|
|
/* TODO: add a threshold for DMA usage */
|
|
if (cesa_dev->caps->has_tdma)
|
|
ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
|
|
else
|
|
ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int mv_cesa_des_op(struct ablkcipher_request *req,
|
|
struct mv_cesa_op_ctx *tmpl)
|
|
{
|
|
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
|
int ret;
|
|
|
|
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
|
|
CESA_SA_DESC_CFG_CRYPTM_MSK);
|
|
|
|
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
|
|
|
|
ret = mv_cesa_ablkcipher_req_init(req, tmpl);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = mv_cesa_queue_req(&req->base);
|
|
if (mv_cesa_req_needs_cleanup(&req->base, ret))
|
|
mv_cesa_ablkcipher_cleanup(req);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl,
|
|
CESA_SA_DESC_CFG_CRYPTCM_ECB |
|
|
CESA_SA_DESC_CFG_DIR_ENC);
|
|
|
|
return mv_cesa_des_op(req, &tmpl);
|
|
}
|
|
|
|
static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl,
|
|
CESA_SA_DESC_CFG_CRYPTCM_ECB |
|
|
CESA_SA_DESC_CFG_DIR_DEC);
|
|
|
|
return mv_cesa_des_op(req, &tmpl);
|
|
}
|
|
|
|
struct crypto_alg mv_cesa_ecb_des_alg = {
|
|
.cra_name = "ecb(des)",
|
|
.cra_driver_name = "mv-ecb-des",
|
|
.cra_priority = 300,
|
|
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
|
.cra_blocksize = DES_BLOCK_SIZE,
|
|
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
|
|
.cra_alignmask = 0,
|
|
.cra_type = &crypto_ablkcipher_type,
|
|
.cra_module = THIS_MODULE,
|
|
.cra_init = mv_cesa_ablkcipher_cra_init,
|
|
.cra_u = {
|
|
.ablkcipher = {
|
|
.min_keysize = DES_KEY_SIZE,
|
|
.max_keysize = DES_KEY_SIZE,
|
|
.setkey = mv_cesa_des_setkey,
|
|
.encrypt = mv_cesa_ecb_des_encrypt,
|
|
.decrypt = mv_cesa_ecb_des_decrypt,
|
|
},
|
|
},
|
|
};
|
|
|
|
static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
|
|
struct mv_cesa_op_ctx *tmpl)
|
|
{
|
|
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
|
|
CESA_SA_DESC_CFG_CRYPTCM_MSK);
|
|
|
|
memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
|
|
|
|
return mv_cesa_des_op(req, tmpl);
|
|
}
|
|
|
|
static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
|
|
|
|
return mv_cesa_cbc_des_op(req, &tmpl);
|
|
}
|
|
|
|
static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
|
|
|
|
return mv_cesa_cbc_des_op(req, &tmpl);
|
|
}
|
|
|
|
struct crypto_alg mv_cesa_cbc_des_alg = {
|
|
.cra_name = "cbc(des)",
|
|
.cra_driver_name = "mv-cbc-des",
|
|
.cra_priority = 300,
|
|
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
|
.cra_blocksize = DES_BLOCK_SIZE,
|
|
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
|
|
.cra_alignmask = 0,
|
|
.cra_type = &crypto_ablkcipher_type,
|
|
.cra_module = THIS_MODULE,
|
|
.cra_init = mv_cesa_ablkcipher_cra_init,
|
|
.cra_u = {
|
|
.ablkcipher = {
|
|
.min_keysize = DES_KEY_SIZE,
|
|
.max_keysize = DES_KEY_SIZE,
|
|
.ivsize = DES_BLOCK_SIZE,
|
|
.setkey = mv_cesa_des_setkey,
|
|
.encrypt = mv_cesa_cbc_des_encrypt,
|
|
.decrypt = mv_cesa_cbc_des_decrypt,
|
|
},
|
|
},
|
|
};
|
|
|
|
static int mv_cesa_des3_op(struct ablkcipher_request *req,
|
|
struct mv_cesa_op_ctx *tmpl)
|
|
{
|
|
struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
|
int ret;
|
|
|
|
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
|
|
CESA_SA_DESC_CFG_CRYPTM_MSK);
|
|
|
|
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
|
|
|
|
ret = mv_cesa_ablkcipher_req_init(req, tmpl);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = mv_cesa_queue_req(&req->base);
|
|
if (mv_cesa_req_needs_cleanup(&req->base, ret))
|
|
mv_cesa_ablkcipher_cleanup(req);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl,
|
|
CESA_SA_DESC_CFG_CRYPTCM_ECB |
|
|
CESA_SA_DESC_CFG_3DES_EDE |
|
|
CESA_SA_DESC_CFG_DIR_ENC);
|
|
|
|
return mv_cesa_des3_op(req, &tmpl);
|
|
}
|
|
|
|
static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl,
|
|
CESA_SA_DESC_CFG_CRYPTCM_ECB |
|
|
CESA_SA_DESC_CFG_3DES_EDE |
|
|
CESA_SA_DESC_CFG_DIR_DEC);
|
|
|
|
return mv_cesa_des3_op(req, &tmpl);
|
|
}
|
|
|
|
struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
|
|
.cra_name = "ecb(des3_ede)",
|
|
.cra_driver_name = "mv-ecb-des3-ede",
|
|
.cra_priority = 300,
|
|
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
|
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
|
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
|
|
.cra_alignmask = 0,
|
|
.cra_type = &crypto_ablkcipher_type,
|
|
.cra_module = THIS_MODULE,
|
|
.cra_init = mv_cesa_ablkcipher_cra_init,
|
|
.cra_u = {
|
|
.ablkcipher = {
|
|
.min_keysize = DES3_EDE_KEY_SIZE,
|
|
.max_keysize = DES3_EDE_KEY_SIZE,
|
|
.ivsize = DES3_EDE_BLOCK_SIZE,
|
|
.setkey = mv_cesa_des3_ede_setkey,
|
|
.encrypt = mv_cesa_ecb_des3_ede_encrypt,
|
|
.decrypt = mv_cesa_ecb_des3_ede_decrypt,
|
|
},
|
|
},
|
|
};
|
|
|
|
static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
|
|
struct mv_cesa_op_ctx *tmpl)
|
|
{
|
|
memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
|
|
|
|
return mv_cesa_des3_op(req, tmpl);
|
|
}
|
|
|
|
static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl,
|
|
CESA_SA_DESC_CFG_CRYPTCM_CBC |
|
|
CESA_SA_DESC_CFG_3DES_EDE |
|
|
CESA_SA_DESC_CFG_DIR_ENC);
|
|
|
|
return mv_cesa_cbc_des3_op(req, &tmpl);
|
|
}
|
|
|
|
static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl,
|
|
CESA_SA_DESC_CFG_CRYPTCM_CBC |
|
|
CESA_SA_DESC_CFG_3DES_EDE |
|
|
CESA_SA_DESC_CFG_DIR_DEC);
|
|
|
|
return mv_cesa_cbc_des3_op(req, &tmpl);
|
|
}
|
|
|
|
struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
|
|
.cra_name = "cbc(des3_ede)",
|
|
.cra_driver_name = "mv-cbc-des3-ede",
|
|
.cra_priority = 300,
|
|
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
|
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
|
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
|
|
.cra_alignmask = 0,
|
|
.cra_type = &crypto_ablkcipher_type,
|
|
.cra_module = THIS_MODULE,
|
|
.cra_init = mv_cesa_ablkcipher_cra_init,
|
|
.cra_u = {
|
|
.ablkcipher = {
|
|
.min_keysize = DES3_EDE_KEY_SIZE,
|
|
.max_keysize = DES3_EDE_KEY_SIZE,
|
|
.ivsize = DES3_EDE_BLOCK_SIZE,
|
|
.setkey = mv_cesa_des3_ede_setkey,
|
|
.encrypt = mv_cesa_cbc_des3_ede_encrypt,
|
|
.decrypt = mv_cesa_cbc_des3_ede_decrypt,
|
|
},
|
|
},
|
|
};
|
|
|
|
static int mv_cesa_aes_op(struct ablkcipher_request *req,
|
|
struct mv_cesa_op_ctx *tmpl)
|
|
{
|
|
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
|
int ret, i;
|
|
u32 *key;
|
|
u32 cfg;
|
|
|
|
cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
|
|
|
|
if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
|
|
key = ctx->aes.key_dec;
|
|
else
|
|
key = ctx->aes.key_enc;
|
|
|
|
for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
|
|
tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
|
|
|
|
if (ctx->aes.key_length == 24)
|
|
cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
|
|
else if (ctx->aes.key_length == 32)
|
|
cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
|
|
|
|
mv_cesa_update_op_cfg(tmpl, cfg,
|
|
CESA_SA_DESC_CFG_CRYPTM_MSK |
|
|
CESA_SA_DESC_CFG_AES_LEN_MSK);
|
|
|
|
ret = mv_cesa_ablkcipher_req_init(req, tmpl);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = mv_cesa_queue_req(&req->base);
|
|
if (mv_cesa_req_needs_cleanup(&req->base, ret))
|
|
mv_cesa_ablkcipher_cleanup(req);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl,
|
|
CESA_SA_DESC_CFG_CRYPTCM_ECB |
|
|
CESA_SA_DESC_CFG_DIR_ENC);
|
|
|
|
return mv_cesa_aes_op(req, &tmpl);
|
|
}
|
|
|
|
static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl,
|
|
CESA_SA_DESC_CFG_CRYPTCM_ECB |
|
|
CESA_SA_DESC_CFG_DIR_DEC);
|
|
|
|
return mv_cesa_aes_op(req, &tmpl);
|
|
}
|
|
|
|
struct crypto_alg mv_cesa_ecb_aes_alg = {
|
|
.cra_name = "ecb(aes)",
|
|
.cra_driver_name = "mv-ecb-aes",
|
|
.cra_priority = 300,
|
|
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
|
|
.cra_alignmask = 0,
|
|
.cra_type = &crypto_ablkcipher_type,
|
|
.cra_module = THIS_MODULE,
|
|
.cra_init = mv_cesa_ablkcipher_cra_init,
|
|
.cra_u = {
|
|
.ablkcipher = {
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
.setkey = mv_cesa_aes_setkey,
|
|
.encrypt = mv_cesa_ecb_aes_encrypt,
|
|
.decrypt = mv_cesa_ecb_aes_decrypt,
|
|
},
|
|
},
|
|
};
|
|
|
|
static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
|
|
struct mv_cesa_op_ctx *tmpl)
|
|
{
|
|
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
|
|
CESA_SA_DESC_CFG_CRYPTCM_MSK);
|
|
memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
|
|
|
|
return mv_cesa_aes_op(req, tmpl);
|
|
}
|
|
|
|
static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
|
|
|
|
return mv_cesa_cbc_aes_op(req, &tmpl);
|
|
}
|
|
|
|
static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
|
|
{
|
|
struct mv_cesa_op_ctx tmpl;
|
|
|
|
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
|
|
|
|
return mv_cesa_cbc_aes_op(req, &tmpl);
|
|
}
|
|
|
|
struct crypto_alg mv_cesa_cbc_aes_alg = {
|
|
.cra_name = "cbc(aes)",
|
|
.cra_driver_name = "mv-cbc-aes",
|
|
.cra_priority = 300,
|
|
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
|
|
.cra_alignmask = 0,
|
|
.cra_type = &crypto_ablkcipher_type,
|
|
.cra_module = THIS_MODULE,
|
|
.cra_init = mv_cesa_ablkcipher_cra_init,
|
|
.cra_u = {
|
|
.ablkcipher = {
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.setkey = mv_cesa_aes_setkey,
|
|
.encrypt = mv_cesa_cbc_aes_encrypt,
|
|
.decrypt = mv_cesa_cbc_aes_decrypt,
|
|
},
|
|
},
|
|
};
|