mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
ec8f5d8f6f
The driver is separated by functional parts. The core part implements a platform driver probe and remove callbaks. The probe enables clocks, checks crypto version, initialize and request dma channels, create done tasklet and init crypto queue and finally register the algorithms into crypto core subsystem. - DMA and SG helper functions implement dmaengine and sg-list helper functions used by other parts of the crypto driver. - ablkcipher algorithms implementation of AES, DES and 3DES crypto API callbacks, the crypto register alg function, the async request handler and its dma done callback function. - SHA and HMAC transforms implementation and registration of ahash crypto type. It includes sha1, sha256, hmac(sha1) and hmac(sha256). - infrastructure to setup the crypto hw contains functions used to setup/prepare hardware registers for all algorithms supported by the crypto block. It also exports few helper functions needed by algorithms: - to check hardware status - to start crypto hardware - to translate data stream to big endian form Adds register addresses and bit/masks used by the driver as well. Signed-off-by: Stanimir Varbanov <svarbanov@mm-sol.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
432 lines
11 KiB
C
432 lines
11 KiB
C
/*
|
|
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
* only version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/types.h>
|
|
#include <crypto/aes.h>
|
|
#include <crypto/algapi.h>
|
|
#include <crypto/des.h>
|
|
|
|
#include "cipher.h"
|
|
|
|
static LIST_HEAD(ablkcipher_algs);
|
|
|
|
static void qce_ablkcipher_done(void *data)
|
|
{
|
|
struct crypto_async_request *async_req = data;
|
|
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
|
|
struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
|
|
struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
|
|
struct qce_device *qce = tmpl->qce;
|
|
enum dma_data_direction dir_src, dir_dst;
|
|
u32 status;
|
|
int error;
|
|
bool diff_dst;
|
|
|
|
diff_dst = (req->src != req->dst) ? true : false;
|
|
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
|
|
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
|
|
|
|
error = qce_dma_terminate_all(&qce->dma);
|
|
if (error)
|
|
dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
|
|
error);
|
|
|
|
if (diff_dst)
|
|
qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
|
|
rctx->dst_chained);
|
|
qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
|
|
rctx->dst_chained);
|
|
|
|
sg_free_table(&rctx->dst_tbl);
|
|
|
|
error = qce_check_status(qce, &status);
|
|
if (error < 0)
|
|
dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
|
|
|
|
qce->async_req_done(tmpl->qce, error);
|
|
}
|
|
|
|
static int
|
|
qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
|
|
{
|
|
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
|
|
struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
|
|
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
|
struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
|
|
struct qce_device *qce = tmpl->qce;
|
|
enum dma_data_direction dir_src, dir_dst;
|
|
struct scatterlist *sg;
|
|
bool diff_dst;
|
|
gfp_t gfp;
|
|
int ret;
|
|
|
|
rctx->iv = req->info;
|
|
rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
|
rctx->cryptlen = req->nbytes;
|
|
|
|
diff_dst = (req->src != req->dst) ? true : false;
|
|
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
|
|
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
|
|
|
|
rctx->src_nents = qce_countsg(req->src, req->nbytes,
|
|
&rctx->src_chained);
|
|
if (diff_dst) {
|
|
rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
|
|
&rctx->dst_chained);
|
|
} else {
|
|
rctx->dst_nents = rctx->src_nents;
|
|
rctx->dst_chained = rctx->src_chained;
|
|
}
|
|
|
|
rctx->dst_nents += 1;
|
|
|
|
gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
|
GFP_KERNEL : GFP_ATOMIC;
|
|
|
|
ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
|
|
if (ret)
|
|
return ret;
|
|
|
|
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
|
|
|
|
sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
|
|
if (IS_ERR(sg)) {
|
|
ret = PTR_ERR(sg);
|
|
goto error_free;
|
|
}
|
|
|
|
sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
|
|
if (IS_ERR(sg)) {
|
|
ret = PTR_ERR(sg);
|
|
goto error_free;
|
|
}
|
|
|
|
sg_mark_end(sg);
|
|
rctx->dst_sg = rctx->dst_tbl.sgl;
|
|
|
|
ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
|
|
rctx->dst_chained);
|
|
if (ret < 0)
|
|
goto error_free;
|
|
|
|
if (diff_dst) {
|
|
ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
|
|
rctx->src_chained);
|
|
if (ret < 0)
|
|
goto error_unmap_dst;
|
|
rctx->src_sg = req->src;
|
|
} else {
|
|
rctx->src_sg = rctx->dst_sg;
|
|
}
|
|
|
|
ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
|
|
rctx->dst_sg, rctx->dst_nents,
|
|
qce_ablkcipher_done, async_req);
|
|
if (ret)
|
|
goto error_unmap_src;
|
|
|
|
qce_dma_issue_pending(&qce->dma);
|
|
|
|
ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
|
|
if (ret)
|
|
goto error_terminate;
|
|
|
|
return 0;
|
|
|
|
error_terminate:
|
|
qce_dma_terminate_all(&qce->dma);
|
|
error_unmap_src:
|
|
if (diff_dst)
|
|
qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
|
|
rctx->src_chained);
|
|
error_unmap_dst:
|
|
qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
|
|
rctx->dst_chained);
|
|
error_free:
|
|
sg_free_table(&rctx->dst_tbl);
|
|
return ret;
|
|
}
|
|
|
|
static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
|
|
unsigned int keylen)
|
|
{
|
|
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
|
|
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
|
|
int ret;
|
|
|
|
if (!key || !keylen)
|
|
return -EINVAL;
|
|
|
|
if (IS_AES(flags)) {
|
|
switch (keylen) {
|
|
case AES_KEYSIZE_128:
|
|
case AES_KEYSIZE_256:
|
|
break;
|
|
default:
|
|
goto fallback;
|
|
}
|
|
} else if (IS_DES(flags)) {
|
|
u32 tmp[DES_EXPKEY_WORDS];
|
|
|
|
ret = des_ekey(tmp, key);
|
|
if (!ret && crypto_ablkcipher_get_flags(ablk) &
|
|
CRYPTO_TFM_REQ_WEAK_KEY)
|
|
goto weakkey;
|
|
}
|
|
|
|
ctx->enc_keylen = keylen;
|
|
memcpy(ctx->enc_key, key, keylen);
|
|
return 0;
|
|
fallback:
|
|
ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
|
|
if (!ret)
|
|
ctx->enc_keylen = keylen;
|
|
return ret;
|
|
weakkey:
|
|
crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
|
|
return -EINVAL;
|
|
}
|
|
|
|
static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
|
|
{
|
|
struct crypto_tfm *tfm =
|
|
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
|
|
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
|
|
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
|
|
int ret;
|
|
|
|
rctx->flags = tmpl->alg_flags;
|
|
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
|
|
|
|
if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
|
|
ctx->enc_keylen != AES_KEYSIZE_256) {
|
|
ablkcipher_request_set_tfm(req, ctx->fallback);
|
|
ret = encrypt ? crypto_ablkcipher_encrypt(req) :
|
|
crypto_ablkcipher_decrypt(req);
|
|
ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
|
|
return ret;
|
|
}
|
|
|
|
return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
|
|
}
|
|
|
|
static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
|
|
{
|
|
return qce_ablkcipher_crypt(req, 1);
|
|
}
|
|
|
|
static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
|
|
{
|
|
return qce_ablkcipher_crypt(req, 0);
|
|
}
|
|
|
|
static int qce_ablkcipher_init(struct crypto_tfm *tfm)
|
|
{
|
|
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
memset(ctx, 0, sizeof(*ctx));
|
|
tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
|
|
|
|
ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
|
|
CRYPTO_ALG_TYPE_ABLKCIPHER,
|
|
CRYPTO_ALG_ASYNC |
|
|
CRYPTO_ALG_NEED_FALLBACK);
|
|
if (IS_ERR(ctx->fallback))
|
|
return PTR_ERR(ctx->fallback);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
|
|
{
|
|
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
crypto_free_ablkcipher(ctx->fallback);
|
|
}
|
|
|
|
struct qce_ablkcipher_def {
|
|
unsigned long flags;
|
|
const char *name;
|
|
const char *drv_name;
|
|
unsigned int blocksize;
|
|
unsigned int ivsize;
|
|
unsigned int min_keysize;
|
|
unsigned int max_keysize;
|
|
};
|
|
|
|
static const struct qce_ablkcipher_def ablkcipher_def[] = {
|
|
{
|
|
.flags = QCE_ALG_AES | QCE_MODE_ECB,
|
|
.name = "ecb(aes)",
|
|
.drv_name = "ecb-aes-qce",
|
|
.blocksize = AES_BLOCK_SIZE,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
},
|
|
{
|
|
.flags = QCE_ALG_AES | QCE_MODE_CBC,
|
|
.name = "cbc(aes)",
|
|
.drv_name = "cbc-aes-qce",
|
|
.blocksize = AES_BLOCK_SIZE,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
},
|
|
{
|
|
.flags = QCE_ALG_AES | QCE_MODE_CTR,
|
|
.name = "ctr(aes)",
|
|
.drv_name = "ctr-aes-qce",
|
|
.blocksize = AES_BLOCK_SIZE,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
},
|
|
{
|
|
.flags = QCE_ALG_AES | QCE_MODE_XTS,
|
|
.name = "xts(aes)",
|
|
.drv_name = "xts-aes-qce",
|
|
.blocksize = AES_BLOCK_SIZE,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
},
|
|
{
|
|
.flags = QCE_ALG_DES | QCE_MODE_ECB,
|
|
.name = "ecb(des)",
|
|
.drv_name = "ecb-des-qce",
|
|
.blocksize = DES_BLOCK_SIZE,
|
|
.ivsize = 0,
|
|
.min_keysize = DES_KEY_SIZE,
|
|
.max_keysize = DES_KEY_SIZE,
|
|
},
|
|
{
|
|
.flags = QCE_ALG_DES | QCE_MODE_CBC,
|
|
.name = "cbc(des)",
|
|
.drv_name = "cbc-des-qce",
|
|
.blocksize = DES_BLOCK_SIZE,
|
|
.ivsize = DES_BLOCK_SIZE,
|
|
.min_keysize = DES_KEY_SIZE,
|
|
.max_keysize = DES_KEY_SIZE,
|
|
},
|
|
{
|
|
.flags = QCE_ALG_3DES | QCE_MODE_ECB,
|
|
.name = "ecb(des3_ede)",
|
|
.drv_name = "ecb-3des-qce",
|
|
.blocksize = DES3_EDE_BLOCK_SIZE,
|
|
.ivsize = 0,
|
|
.min_keysize = DES3_EDE_KEY_SIZE,
|
|
.max_keysize = DES3_EDE_KEY_SIZE,
|
|
},
|
|
{
|
|
.flags = QCE_ALG_3DES | QCE_MODE_CBC,
|
|
.name = "cbc(des3_ede)",
|
|
.drv_name = "cbc-3des-qce",
|
|
.blocksize = DES3_EDE_BLOCK_SIZE,
|
|
.ivsize = DES3_EDE_BLOCK_SIZE,
|
|
.min_keysize = DES3_EDE_KEY_SIZE,
|
|
.max_keysize = DES3_EDE_KEY_SIZE,
|
|
},
|
|
};
|
|
|
|
static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
|
|
struct qce_device *qce)
|
|
{
|
|
struct qce_alg_template *tmpl;
|
|
struct crypto_alg *alg;
|
|
int ret;
|
|
|
|
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
|
|
if (!tmpl)
|
|
return -ENOMEM;
|
|
|
|
alg = &tmpl->alg.crypto;
|
|
|
|
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
|
|
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
|
def->drv_name);
|
|
|
|
alg->cra_blocksize = def->blocksize;
|
|
alg->cra_ablkcipher.ivsize = def->ivsize;
|
|
alg->cra_ablkcipher.min_keysize = def->min_keysize;
|
|
alg->cra_ablkcipher.max_keysize = def->max_keysize;
|
|
alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
|
|
alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
|
|
alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
|
|
|
|
alg->cra_priority = 300;
|
|
alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
|
|
CRYPTO_ALG_NEED_FALLBACK;
|
|
alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
|
|
alg->cra_alignmask = 0;
|
|
alg->cra_type = &crypto_ablkcipher_type;
|
|
alg->cra_module = THIS_MODULE;
|
|
alg->cra_init = qce_ablkcipher_init;
|
|
alg->cra_exit = qce_ablkcipher_exit;
|
|
INIT_LIST_HEAD(&alg->cra_list);
|
|
|
|
INIT_LIST_HEAD(&tmpl->entry);
|
|
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
|
|
tmpl->alg_flags = def->flags;
|
|
tmpl->qce = qce;
|
|
|
|
ret = crypto_register_alg(alg);
|
|
if (ret) {
|
|
kfree(tmpl);
|
|
dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
|
|
return ret;
|
|
}
|
|
|
|
list_add_tail(&tmpl->entry, &ablkcipher_algs);
|
|
dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
|
|
return 0;
|
|
}
|
|
|
|
static void qce_ablkcipher_unregister(struct qce_device *qce)
|
|
{
|
|
struct qce_alg_template *tmpl, *n;
|
|
|
|
list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
|
|
crypto_unregister_alg(&tmpl->alg.crypto);
|
|
list_del(&tmpl->entry);
|
|
kfree(tmpl);
|
|
}
|
|
}
|
|
|
|
static int qce_ablkcipher_register(struct qce_device *qce)
|
|
{
|
|
int ret, i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
|
|
ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
|
|
if (ret)
|
|
goto err;
|
|
}
|
|
|
|
return 0;
|
|
err:
|
|
qce_ablkcipher_unregister(qce);
|
|
return ret;
|
|
}
|
|
|
|
const struct qce_algo_ops ablkcipher_ops = {
|
|
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
|
|
.register_algs = qce_ablkcipher_register,
|
|
.unregister_algs = qce_ablkcipher_unregister,
|
|
.async_req_handle = qce_ablkcipher_async_req_handle,
|
|
};
|