linux_dsm_epyc7002/drivers/crypto/caam/caamhash.c

2026 lines
58 KiB
C
Raw Normal View History

/*
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
*
* Based on caamalg.c crypto API driver.
*
* relationship of digest job descriptor or first job descriptor after init to
* shared descriptors:
*
* --------------- ---------------
* | JobDesc #1 |-------------------->| ShareDesc |
* | *(packet 1) | | (hashKey) |
* --------------- | (operation) |
* ---------------
*
* relationship of subsequent job descriptors to shared descriptors:
*
* --------------- ---------------
* | JobDesc #2 |-------------------->| ShareDesc |
* | *(packet 2) | |------------->| (hashKey) |
* --------------- | |-------->| (operation) |
* . | | | (load ctx2) |
* . | | ---------------
* --------------- | |
* | JobDesc #3 |------| |
* | *(packet 3) | |
* --------------- |
* . |
* . |
* --------------- |
* | JobDesc #4 |------------
* | *(packet 4) |
* ---------------
*
* The SharedDesc never changes for a connection unless rekeyed, but
* each packet will likely be in a different place. So all we need
* to know to process the packet is where the input is, where the
* output goes, and what context we want to process with. Context is
* in the SharedDesc, packet references in the JobDesc.
*
* So, a job desc looks like:
*
* ---------------------
* | Header |
* | ShareDesc Pointer |
* | SEQ_OUT_PTR |
* | (output buffer) |
* | (output length) |
* | SEQ_IN_PTR |
* | (input buffer) |
* | (input length) |
* ---------------------
*/
#include "compat.h"
#include "regs.h"
#include "intern.h"
#include "desc_constr.h"
#include "jr.h"
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
#define CAAM_CRA_PRIORITY 3000
/* max hash key is max split key size */
#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
/* length of descriptors text */
#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
CAAM_MAX_HASH_KEY_SIZE)
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
/* caam context sizes for hashes: running digest + 8 */
#define HASH_MSG_LEN 8
#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
#ifdef DEBUG
/* for print_hex_dumps with line references */
#define debug(format, arg...) printk(format, arg)
#else
#define debug(format, arg...)
#endif
static struct list_head hash_list;
/* ahash per-session context */
struct caam_hash_ctx {
struct device *jrdev;
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
dma_addr_t sh_desc_update_dma;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
dma_addr_t sh_desc_finup_dma;
u32 alg_type;
u32 alg_op;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
dma_addr_t key_dma;
int ctx_len;
unsigned int split_key_len;
unsigned int split_key_pad_len;
};
/* ahash state */
struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_0;
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_1;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
int current_buf;
};
struct caam_export_state {
u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
u8 caam_ctx[MAX_CTX_LEN];
int buflen;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
};
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
struct caam_hash_state *state,
int ctx_len)
{
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, state->ctx_dma)) {
dev_err(jrdev, "unable to map ctx\n");
return -ENOMEM;
}
append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
return 0;
}
/* Map req->result, and append seq_out_ptr command that points to it */
static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
u8 *result, int digestsize)
{
dma_addr_t dst_dma;
dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
append_seq_out_ptr(desc, dst_dma, digestsize, 0);
return dst_dma;
}
/* Map current buffer in state and put it in link table */
static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
struct sec4_sg_entry *sec4_sg,
u8 *buf, int buflen)
{
dma_addr_t buf_dma;
buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
return buf_dma;
}
/* Map req->src and put it in link table */
static inline void src_map_to_sec4_sg(struct device *jrdev,
struct scatterlist *src, int src_nents,
struct sec4_sg_entry *sec4_sg)
{
dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
}
/*
* Only put buffer in link table if it contains data, which is possible,
* since a buffer has previously been used, and needs to be unmapped,
*/
static inline dma_addr_t
try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
u8 *buf, dma_addr_t buf_dma, int buflen,
int last_buflen)
{
if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
if (buflen)
buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
else
buf_dma = 0;
return buf_dma;
}
/* Map state->caam_ctx, and add it to link table */
static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
struct caam_hash_state *state, int ctx_len,
struct sec4_sg_entry *sec4_sg, u32 flag)
{
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
if (dma_mapping_error(jrdev, state->ctx_dma)) {
dev_err(jrdev, "unable to map ctx\n");
return -ENOMEM;
}
dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
return 0;
}
/* Common shared descriptor commands */
static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
{
append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
ctx->split_key_len, CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC);
}
/* Append key if it has been set */
static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
{
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_SERIAL);
if (ctx->split_key_len) {
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
append_key_ahash(desc, ctx);
set_jump_tgt_here(desc, key_jump_cmd);
}
/* Propagate errors from shared to job descriptor */
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
/*
* For ahash read data from seqin following state->caam_ctx,
* and write resulting class2 context to seqout, which may be state->caam_ctx
* or req->result
*/
static inline void ahash_append_load_str(u32 *desc, int digestsize)
{
/* Calculate remaining bytes to read */
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
/* Read remaining bytes */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
FIFOLD_TYPE_MSG | KEY_VLF);
/* Store class2 context bytes */
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
}
/*
* For ahash update, final and finup, import context, read and write to seqout
*/
static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
int digestsize,
struct caam_hash_ctx *ctx)
{
init_sh_desc_key_ahash(desc, ctx);
/* Import context from software */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_2_CCB | ctx->ctx_len);
/* Class 2 operation */
append_operation(desc, op | state | OP_ALG_ENCRYPT);
/*
* Load from buf and/or src and write to req->result or state->context
*/
ahash_append_load_str(desc, digestsize);
}
/* For ahash firsts and digest, read and write to seqout */
static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
int digestsize, struct caam_hash_ctx *ctx)
{
init_sh_desc_key_ahash(desc, ctx);
/* Class 2 operation */
append_operation(desc, op | state | OP_ALG_ENCRYPT);
/*
* Load from buf and/or src and write to req->result or state->context
*/
ahash_append_load_str(desc, digestsize);
}
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
u32 have_key = 0;
u32 *desc;
if (ctx->split_key_len)
have_key = OP_ALG_AAI_HMAC_PRECOMP;
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Import context from software */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_2_CCB | ctx->ctx_len);
/* Class 2 operation */
append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
OP_ALG_ENCRYPT);
/* Load data and write to result or context */
ahash_append_load_str(desc, ctx->ctx_len);
ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
ctx->ctx_len, ctx);
ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
OP_ALG_AS_FINALIZE, digestsize, ctx);
ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
/* ahash_finup shared descriptor */
desc = ctx->sh_desc_finup;
ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
OP_ALG_AS_FINALIZE, digestsize, ctx);
ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
digestsize, ctx);
ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
return 0;
}
static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 keylen)
{
return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
ctx->split_key_pad_len, key_in, keylen,
ctx->alg_op);
}
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
{
struct device *jrdev = ctx->jrdev;
u32 *desc;
struct split_key_result result;
dma_addr_t src_dma, dst_dma;
int ret = 0;
crypto: caam - Fixed the memory out of bound overwrite issue When kernel is compiled with CONFIG_SLUB_DEBUG=y and CRYPTO_MANAGER_DISABLE_TESTS=n, during kernel bootup, the kernel reports error given below. The root cause is that in function hash_digest_key(), for allocating descriptor, insufficient memory was being allocated. The required number of descriptor words apart from input and output pointers are 8 (instead of 6). ============================================================================= BUG dma-kmalloc-32 (Not tainted): Redzone overwritten ----------------------------------------------------------------------------- Disabling lock debugging due to kernel taint INFO: 0xdec5dec0-0xdec5dec3. First byte 0x0 instead of 0xcc INFO: Allocated in ahash_setkey+0x60/0x594 age=7 cpu=1 pid=1257 __kmalloc+0x154/0x1b4 ahash_setkey+0x60/0x594 test_hash+0x260/0x5a0 alg_test_hash+0x48/0xb0 alg_test+0x84/0x228 cryptomgr_test+0x4c/0x54 kthread+0x98/0x9c ret_from_kernel_thread+0x64/0x6c INFO: Slab 0xc0bd0ba0 objects=19 used=2 fp=0xdec5d0d0 flags=0x0081 INFO: Object 0xdec5dea0 @offset=3744 fp=0x5c200014 Bytes b4 dec5de90: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ Object dec5dea0: b0 80 00 0a 84 41 00 0d f0 40 00 00 00 67 3f c0 .....A...@...g?. Object dec5deb0: 00 00 00 50 2c 14 00 50 f8 40 00 00 1e c5 d0 00 ...P,..P.@...... Redzone dec5dec0: 00 00 00 14 .... Padding dec5df68: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ Call Trace: [dec65b60] [c00071b4] show_stack+0x4c/0x168 (unreliable) [dec65ba0] [c00d4ec8] check_bytes_and_report+0xe4/0x11c [dec65bd0] [c00d507c] check_object+0x17c/0x23c [dec65bf0] [c0550a00] free_debug_processing+0xf4/0x294 [dec65c20] [c0550bdc] __slab_free+0x3c/0x294 [dec65c80] [c03f0744] ahash_setkey+0x4e0/0x594 [dec65cd0] [c01ef138] test_hash+0x260/0x5a0 [dec65e50] [c01ef4c0] alg_test_hash+0x48/0xb0 [dec65e70] [c01eecc4] alg_test+0x84/0x228 [dec65ee0] [c01ec640] cryptomgr_test+0x4c/0x54 [dec65ef0] [c005adc0] kthread+0x98/0x9c [dec65f40] [c000e1ac] ret_from_kernel_thread+0x64/0x6c FIX dma-kmalloc-32: Restoring 0xdec5dec0-0xdec5dec3=0xcc Change-Id: I0c7a1048053e811025d1c3b487940f87345c8f5d Signed-off-by: Vakul Garg <vakul@freescale.com> CC: <stable@vger.kernel.org> #3.9 Reviewed-by: Geanta Neag Horia Ioan-B05471 <horia.geanta@freescale.com> Reviewed-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com> Tested-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2013-07-10 13:26:13 +07:00
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM;
}
init_job_desc(desc, 0);
src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, src_dma)) {
dev_err(jrdev, "unable to map key input memory\n");
kfree(desc);
return -ENOMEM;
}
dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dst_dma)) {
dev_err(jrdev, "unable to map key output memory\n");
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
kfree(desc);
return -ENOMEM;
}
/* Job descriptor to perform unkeyed hash on key_in */
append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
append_seq_out_ptr(desc, dst_dma, digestsize, 0);
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
result.err = 0;
init_completion(&result.completion);
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
wait_for_completion_interruptible(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"digested key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_in,
digestsize, 1);
#endif
}
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
crypto: caam - fix DMA unmapping error in hash_digest_key Key being hashed is unmapped using the digest size instead of initial length: caam_jr ffe301000.jr: DMA-API: device driver frees DMA memory with different size [device address=0x000000002eeedac0] [map size=80 bytes] [unmap size=20 bytes] ------------[ cut here ]------------ WARNING: at lib/dma-debug.c:1090 Modules linked in: caamhash(+) CPU: 0 PID: 1327 Comm: cryptomgr_test Not tainted 3.16.0-rc1 #23 task: eebda5d0 ti: ee26a000 task.ti: ee26a000 NIP: c0288790 LR: c0288790 CTR: c02d7020 REGS: ee26ba30 TRAP: 0700 Not tainted (3.16.0-rc1) MSR: 00021002 <CE,ME> CR: 44022082 XER: 00000000 GPR00: c0288790 ee26bae0 eebda5d0 0000009f c1de3478 c1de382c 00000000 00021002 GPR08: 00000007 00000000 01660000 0000012f 82022082 00000000 c07a1900 eeda29c0 GPR16: 00000000 c61deea0 000c49a0 00000260 c07e1e10 c0da1180 00029002 c0d9ef08 GPR24: c07a0000 c07a4acc ee26bb38 ee2765c0 00000014 ee130210 00000000 00000014 NIP [c0288790] check_unmap+0x640/0xab0 LR [c0288790] check_unmap+0x640/0xab0 Call Trace: [ee26bae0] [c0288790] check_unmap+0x640/0xab0 (unreliable) [ee26bb30] [c0288c78] debug_dma_unmap_page+0x78/0x90 [ee26bbb0] [f929c3d4] ahash_setkey+0x374/0x720 [caamhash] [ee26bc30] [c022fec8] __test_hash+0x228/0x6c0 [ee26bde0] [c0230388] test_hash+0x28/0xb0 [ee26be00] [c0230458] alg_test_hash+0x48/0xc0 [ee26be20] [c022fa94] alg_test+0x114/0x2e0 [ee26bea0] [c022cd1c] cryptomgr_test+0x4c/0x60 [ee26beb0] [c00497a4] kthread+0xc4/0xe0 [ee26bf40] [c000f2fc] ret_from_kernel_thread+0x5c/0x64 Instruction dump: 41de03e8 83da0020 3c60c06d 83fa0024 3863f520 813b0020 815b0024 80fa0018 811a001c 93c10008 93e1000c 4830cf6d <0fe00000> 3c60c06d 3863f0f4 4830cf5d ---[ end trace db1fae088c75c26c ]--- Mapped at: [<f929c15c>] ahash_setkey+0xfc/0x720 [caamhash] [<c022fec8>] __test_hash+0x228/0x6c0 [<c0230388>] test_hash+0x28/0xb0 [<c0230458>] alg_test_hash+0x48/0xc0 [<c022fa94>] alg_test+0x114/0x2e0 Signed-off-by: Horia Geanta <horia.geanta@freescale.com> Acked-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-07-11 19:34:50 +07:00
*keylen = digestsize;
kfree(desc);
return ret;
}
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
int ret = 0;
u8 *hashed_key = NULL;
#ifdef DEBUG
printk(KERN_ERR "keylen %d\n", keylen);
#endif
if (keylen > blocksize) {
hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
GFP_DMA);
if (!hashed_key)
return -ENOMEM;
ret = hash_digest_key(ctx, key, &keylen, hashed_key,
digestsize);
if (ret)
goto badkey;
key = hashed_key;
}
/* Pick class 2 key length from algorithm submask */
ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT] * 2;
ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
#ifdef DEBUG
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
ret = gen_split_hash_key(ctx, key, keylen);
if (ret)
goto badkey;
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
ret = -ENOMEM;
goto map_err;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->split_key_pad_len, 1);
#endif
ret = ahash_set_sh_desc(ahash);
if (ret) {
dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
DMA_TO_DEVICE);
}
map_err:
kfree(hashed_key);
return ret;
badkey:
kfree(hashed_key);
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/*
* ahash_edesc - s/w-extended ahash descriptor
* @dst_dma: physical mapped address of req->result
* @sec4_sg_dma: physical mapped address of h/w link table
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ahash_edesc {
dma_addr_t dst_dma;
dma_addr_t sec4_sg_dma;
int src_nents;
int sec4_sg_bytes;
struct sec4_sg_entry *sec4_sg;
u32 hw_desc[0];
};
static inline void ahash_unmap(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len)
{
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
if (edesc->dst_dma)
dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
}
static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len, u32 flag)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
if (state->ctx_dma)
dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
ahash_unmap(dev, edesc, req, dst_len);
}
static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
struct ahash_request *req = context;
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
req->base.complete(&req->base, err);
}
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
struct ahash_request *req = context;
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
#ifdef DEBUG
struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
req->base.complete(&req->base, err);
}
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
struct ahash_request *req = context;
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err)
caam_jr_strstatus(jrdev, err);
crypto: caam - fix DMA direction mismatch in ahash_done_ctx_src caam_jr ffe301000.jr: DMA-API: device driver frees DMA memory with different direction [device address=0x0000000006271dac] [size=28 bytes] [mapped with DMA_TO_DEVICE] [unmapped with DMA_FROM_DEVICE] ------------[ cut here ]------------ WARNING: at lib/dma-debug.c:1131 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G W 3.16.0-rc1 #23 task: c0789380 ti: effd2000 task.ti: c07d6000 NIP: c02885cc LR: c02885cc CTR: c02d7020 REGS: effd3d50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00021002 <CE,ME> CR: 44048082 XER: 00000000 GPR00: c02885cc effd3e00 c0789380 000000c6 c1de3478 c1de382c 00000000 00021002 GPR08: 00000007 00000000 01660000 0000012f 84048082 00000000 00000018 c07db080 GPR16: 00000006 00000100 0000002c c62517a0 c07e1e10 c0da1180 00029002 c0d95f88 GPR24: c07a0000 c07a4acc effd3e58 ee322bc0 0000001c ee130210 00000000 c0d95f80 NIP [c02885cc] check_unmap+0x47c/0xab0 LR [c02885cc] check_unmap+0x47c/0xab0 Call Trace: [effd3e00] [c02885cc] check_unmap+0x47c/0xab0 (unreliable) [effd3e50] [c0288c78] debug_dma_unmap_page+0x78/0x90 [effd3ed0] [f9624d84] ahash_done_ctx_src+0xa4/0x200 [caamhash] [effd3f00] [c0429640] caam_jr_dequeue+0x1c0/0x280 [effd3f50] [c002c94c] tasklet_action+0xcc/0x1a0 [effd3f80] [c002cb30] __do_softirq+0x110/0x220 [effd3fe0] [c002cf34] irq_exit+0xa4/0xe0 [effd3ff0] [c000d834] call_do_irq+0x24/0x3c [c07d7d50] [c000489c] do_IRQ+0x8c/0x110 [c07d7d70] [c000f86c] ret_from_except+0x0/0x18 --- Exception: 501 at _raw_spin_unlock_irq+0x30/0x50 LR = _raw_spin_unlock_irq+0x2c/0x50 [c07d7e40] [c0053084] finish_task_switch+0x74/0x130 [c07d7e60] [c058f278] __schedule+0x238/0x620 [c07d7f70] [c058fb50] schedule_preempt_disabled+0x10/0x20 [c07d7f80] [c00686a0] cpu_startup_entry+0x100/0x1b0 [c07d7fb0] [c074793c] start_kernel+0x338/0x34c [c07d7ff0] [c00003d8] set_ivor+0x140/0x17c Instruction dump: 7d495214 7d294214 806a0010 80c90010 811a001c 813a0020 815a0024 90610008 3c60c06d 90c1000c 3863f764 4830d131 <0fe00000> 3c60c06d 3863f0f4 4830d121 ---[ end trace db1fae088c75c280 ]--- Mapped at: [<f96251bc>] ahash_final_ctx+0x14c/0x7b0 [caamhash] [<c022ff4c>] __test_hash+0x2ac/0x6c0 [<c0230388>] test_hash+0x28/0xb0 [<c02304a4>] alg_test_hash+0x94/0xc0 [<c022fa94>] alg_test+0x114/0x2e0 Signed-off-by: Horia Geanta <horia.geanta@freescale.com> Acked-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-07-11 19:34:52 +07:00
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
req->base.complete(&req->base, err);
}
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
struct ahash_request *req = context;
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
#ifdef DEBUG
struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err)
caam_jr_strstatus(jrdev, err);
crypto: caam - fix DMA direction mismatch in ahash_done_ctx_dst caam_jr ffe301000.jr: DMA-API: device driver frees DMA memory with different direction [device address=0x00000000062ad1ac] [size=28 bytes] [mapped with DMA_FROM_DEVICE] [unmapped with DMA_TO_DEVICE] ------------[ cut here ]------------ WARNING: at lib/dma-debug.c:1131 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G W 3.16.0-rc1 #23 task: c0789380 ti: effd2000 task.ti: c07d6000 NIP: c02885cc LR: c02885cc CTR: c02d7020 REGS: effd3d50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00021002 <CE,ME> CR: 44048082 XER: 00000000 GPR00: c02885cc effd3e00 c0789380 000000c6 c1de3478 c1de382c 00000000 00021002 GPR08: 00000007 00000000 01660000 0000012f 84048082 00000000 00000018 c07db080 GPR16: 00000006 00000100 0000002c eee567e0 c07e1e10 c0da1180 00029002 c0d96708 GPR24: c07a0000 c07a4acc effd3e58 ee29b140 0000001c ee130210 00000000 c0d96700 NIP [c02885cc] check_unmap+0x47c/0xab0 LR [c02885cc] check_unmap+0x47c/0xab0 Call Trace: [effd3e00] [c02885cc] check_unmap+0x47c/0xab0 (unreliable) [effd3e50] [c0288c78] debug_dma_unmap_page+0x78/0x90 [effd3ed0] [f9350974] ahash_done_ctx_dst+0xa4/0x200 [caamhash] [effd3f00] [c0429640] caam_jr_dequeue+0x1c0/0x280 [effd3f50] [c002c94c] tasklet_action+0xcc/0x1a0 [effd3f80] [c002cb30] __do_softirq+0x110/0x220 [effd3fe0] [c002cf34] irq_exit+0xa4/0xe0 [effd3ff0] [c000d834] call_do_irq+0x24/0x3c [c07d7d50] [c000489c] do_IRQ+0x8c/0x110 [c07d7d70] [c000f86c] ret_from_except+0x0/0x18 --- Exception: 501 at _raw_spin_unlock_irq+0x30/0x50 LR = _raw_spin_unlock_irq+0x2c/0x50 [c07d7e40] [c0053084] finish_task_switch+0x74/0x130 [c07d7e60] [c058f278] __schedule+0x238/0x620 [c07d7f70] [c058fb50] schedule_preempt_disabled+0x10/0x20 [c07d7f80] [c00686a0] cpu_startup_entry+0x100/0x1b0 [c07d7fb0] [c074793c] start_kernel+0x338/0x34c [c07d7ff0] [c00003d8] set_ivor+0x140/0x17c Instruction dump: 7d495214 7d294214 806a0010 80c90010 811a001c 813a0020 815a0024 90610008 3c60c06d 90c1000c 3863f764 4830d131 <0fe00000> 3c60c06d 3863f0f4 4830d121 ---[ end trace db1fae088c75c270 ]--- Mapped at: [<f9352454>] ahash_update_first+0x5b4/0xba0 [caamhash] [<c022ff28>] __test_hash+0x288/0x6c0 [<c0230388>] test_hash+0x28/0xb0 [<c02304a4>] alg_test_hash+0x94/0xc0 [<c022fa94>] alg_test+0x114/0x2e0 Signed-off-by: Horia Geanta <horia.geanta@freescale.com> Acked-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-07-11 19:34:51 +07:00
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
req->base.complete(&req->base, err);
}
/* submit update job descriptor */
static int ahash_update_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
int *next_buflen = state->current_buf ? &state->buflen_0 :
&state->buflen_1, last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *sh_desc = ctx->sh_desc_update, *desc;
dma_addr_t ptr = ctx->sh_desc_update_dma;
int src_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
last_buflen = *next_buflen;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - (*next_buflen));
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
"could not allocate extended descriptor\n");
return -ENOMEM;
}
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
edesc->sec4_sg + 1,
buf, state->buf_dma,
crypto: caam - fix non-block aligned hash calculation caam does not properly calculate the size of the retained state when non-block aligned hashes are requested - it uses the wrong buffer sizes, which results in errors such as: caam_jr 2102000.jr1: 40000501: DECO: desc idx 5: SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table. We end up here with: in_len 0x46 blocksize 0x40 last_bufsize 0x0 next_bufsize 0x6 to_hash 0x40 ctx_len 0x28 nbytes 0x20 which results in a job descriptor of: jobdesc@889: ed03d918: b0861c08 3daa0080 f1400000 3d03d938 jobdesc@889: ed03d928: 00000068 f8400000 3cde2a40 00000028 where the word at 0xed03d928 is the expected data size (0x68), and a scatterlist containing: sg@892: ed03d938: 00000000 3cde2a40 00000028 00000000 sg@892: ed03d948: 00000000 3d03d100 00000006 00000000 sg@892: ed03d958: 00000000 7e8aa700 40000020 00000000 0x68 comes from 0x28 (the context size) plus the "in_len" rounded down to a block size (0x40). in_len comes from 0x26 bytes of unhashed data from the previous operation, plus the 0x20 bytes from the latest operation. The fixed version would create: sg@892: ed03d938: 00000000 3cde2a40 00000028 00000000 sg@892: ed03d948: 00000000 3d03d100 00000026 00000000 sg@892: ed03d958: 00000000 7e8aa700 40000020 00000000 which replaces the 0x06 length with the correct 0x26 bytes of previously unhashed data. This fixes a previous commit which erroneously "fixed" this due to a DMA-API bug report; that commit indicates that the bug was caused via a test_ahash_pnum() function in the tcrypt module. No such function has ever existed in the mainline kernel. Given that the change in this commit has been tested with DMA API debug enabled and shows no issue, I can only conclude that test_ahash_pnum() was triggering that bad behaviour by CAAM. Fixes: 7d5196aba3c8 ("crypto: caam - Correct DMA unmap size in ahash_update_ctx()") Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-10-18 23:51:20 +07:00
*buflen, last_buflen);
if (src_nents) {
src_map_to_sec4_sg(jrdev, req->src, src_nents,
edesc->sec4_sg + sec4_sg_src_index);
if (*next_buflen)
crypto: caam - remove duplicated sg copy functions Replace equivalent (and partially incorrect) scatter-gather functions with ones from crypto-API. The replacement is motivated by page-faults in sg_copy_part triggered by successive calls to crypto_hash_update. The following fault appears after calling crypto_ahash_update twice, first with 13 and then with 285 bytes: Unable to handle kernel paging request for data at address 0x00000008 Faulting instruction address: 0xf9bf9a8c Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=8 CoreNet Generic Modules linked in: tcrypt(+) caamhash caam_jr caam tls CPU: 6 PID: 1497 Comm: cryptomgr_test Not tainted 3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2 #75 task: e9308530 ti: e700e000 task.ti: e700e000 NIP: f9bf9a8c LR: f9bfcf28 CTR: c0019ea0 REGS: e700fb80 TRAP: 0300 Not tainted (3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2) MSR: 00029002 <CE,EE,ME> CR: 44f92024 XER: 20000000 DEAR: 00000008, ESR: 00000000 GPR00: f9bfcf28 e700fc30 e9308530 e70b1e55 00000000 ffffffdd e70b1e54 0bebf888 GPR08: 902c7ef5 c0e771e2 00000002 00000888 c0019ea0 00000000 00000000 c07a4154 GPR16: c08d0000 e91a8f9c 00000001 e98fb400 00000100 e9c83028 e70b1e08 e70b1d48 GPR24: e992ce10 e70b1dc8 f9bfe4f4 e70b1e55 ffffffdd e70b1ce0 00000000 00000000 NIP [f9bf9a8c] sg_copy+0x1c/0x100 [caamhash] LR [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] Call Trace: [e700fc30] [f9bf9c50] sg_copy_part+0xe0/0x160 [caamhash] (unreliable) [e700fc50] [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] [e700fcb0] [f954e19c] crypto_tls_genicv+0x13c/0x300 [tls] [e700fd10] [f954e65c] crypto_tls_encrypt+0x5c/0x260 [tls] [e700fd40] [c02250ec] __test_aead.constprop.9+0x2bc/0xb70 [e700fe40] [c02259f0] alg_test_aead+0x50/0xc0 [e700fe60] [c02241e4] alg_test+0x114/0x2e0 [e700fee0] [c022276c] cryptomgr_test+0x4c/0x60 [e700fef0] [c004f658] kthread+0x98/0xa0 [e700ff40] [c000fd04] ret_from_kernel_thread+0x5c/0x64 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-08-14 17:51:56 +07:00
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
*next_buflen, 0);
} else {
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
cpu_to_caam32(SEC4_SG_LEN_FIN);
}
state->current_buf = !state->current_buf;
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
to_hash, LDST_SGF);
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
DMA_BIDIRECTIONAL);
kfree(edesc);
}
} else if (*next_buflen) {
crypto: caam - remove duplicated sg copy functions Replace equivalent (and partially incorrect) scatter-gather functions with ones from crypto-API. The replacement is motivated by page-faults in sg_copy_part triggered by successive calls to crypto_hash_update. The following fault appears after calling crypto_ahash_update twice, first with 13 and then with 285 bytes: Unable to handle kernel paging request for data at address 0x00000008 Faulting instruction address: 0xf9bf9a8c Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=8 CoreNet Generic Modules linked in: tcrypt(+) caamhash caam_jr caam tls CPU: 6 PID: 1497 Comm: cryptomgr_test Not tainted 3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2 #75 task: e9308530 ti: e700e000 task.ti: e700e000 NIP: f9bf9a8c LR: f9bfcf28 CTR: c0019ea0 REGS: e700fb80 TRAP: 0300 Not tainted (3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2) MSR: 00029002 <CE,EE,ME> CR: 44f92024 XER: 20000000 DEAR: 00000008, ESR: 00000000 GPR00: f9bfcf28 e700fc30 e9308530 e70b1e55 00000000 ffffffdd e70b1e54 0bebf888 GPR08: 902c7ef5 c0e771e2 00000002 00000888 c0019ea0 00000000 00000000 c07a4154 GPR16: c08d0000 e91a8f9c 00000001 e98fb400 00000100 e9c83028 e70b1e08 e70b1d48 GPR24: e992ce10 e70b1dc8 f9bfe4f4 e70b1e55 ffffffdd e70b1ce0 00000000 00000000 NIP [f9bf9a8c] sg_copy+0x1c/0x100 [caamhash] LR [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] Call Trace: [e700fc30] [f9bf9c50] sg_copy_part+0xe0/0x160 [caamhash] (unreliable) [e700fc50] [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] [e700fcb0] [f954e19c] crypto_tls_genicv+0x13c/0x300 [tls] [e700fd10] [f954e65c] crypto_tls_encrypt+0x5c/0x260 [tls] [e700fd40] [c02250ec] __test_aead.constprop.9+0x2bc/0xb70 [e700fe40] [c02259f0] alg_test_aead+0x50/0xc0 [e700fe60] [c02241e4] alg_test+0x114/0x2e0 [e700fee0] [c022276c] cryptomgr_test+0x4c/0x60 [e700fef0] [c004f658] kthread+0x98/0xa0 [e700ff40] [c000fd04] ret_from_kernel_thread+0x5c/0x64 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-08-14 17:51:56 +07:00
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
*next_buflen = last_buflen;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
*next_buflen, 1);
#endif
return ret;
}
static int ahash_final_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
}
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
edesc->src_nents = 0;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
cpu_to_caam32(SEC4_SG_LEN_FIN);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
return ret;
}
static int ahash_finup_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_finup, *desc;
dma_addr_t ptr = ctx->sh_desc_finup_dma;
int sec4_sg_bytes, sec4_sg_src_index;
int src_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
}
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
sec4_sg_src_index);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
buflen + req->nbytes, LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
return ret;
}
static int ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u32 *sh_desc = ctx->sh_desc_digest, *desc;
dma_addr_t ptr = ctx->sh_desc_digest_dma;
int digestsize = crypto_ahash_digestsize(ahash);
int src_nents, sec4_sg_bytes;
dma_addr_t src_dma;
struct ahash_edesc *edesc;
int ret = 0;
u32 options;
int sh_len;
src_nents = sg_count(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
}
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
crypto: caam - fix uninitialized S/G table size in ahash_digest Not initializing edesc->sec4_sg_bytes correctly causes ahash_done callback to free unallocated DMA memory: caam_jr ffe301000.jr: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x300900000000b44d] [size=46158 bytes] WARNING: at lib/dma-debug.c:1080 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 1358 Comm: cryptomgr_test Tainted: G W 3.16.0-rc1 #23 task: eed04250 ti: effd2000 task.ti: c6046000 NIP: c02889fc LR: c02889fc CTR: c02d7020 REGS: effd3d50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00029002 <CE,EE,ME> CR: 44048082 XER: 00000000 GPR00: c02889fc effd3e00 eed04250 00000091 c1de3478 c1de382c 00000000 00029002 GPR08: 00000007 00000000 01660000 00000000 22048082 00000000 00000018 c07db080 GPR16: 00000006 00000100 0000002c ee2497e0 c07e1e10 c0da1180 00029002 c0d912c8 GPR24: 00000014 ee2497c0 effd3e58 00000000 c078ad4c ee130210 30090000 0000b44d NIP [c02889fc] check_unmap+0x8ac/0xab0 LR [c02889fc] check_unmap+0x8ac/0xab0 Call Trace: [effd3e00] [c02889fc] check_unmap+0x8ac/0xab0 (unreliable) [effd3e50] [c0288c78] debug_dma_unmap_page+0x78/0x90 [effd3ed0] [f9404fec] ahash_done+0x11c/0x190 [caamhash] [effd3f00] [c0429640] caam_jr_dequeue+0x1c0/0x280 [effd3f50] [c002c94c] tasklet_action+0xcc/0x1a0 [effd3f80] [c002cb30] __do_softirq+0x110/0x220 [effd3fe0] [c002cf34] irq_exit+0xa4/0xe0 [effd3ff0] [c000d834] call_do_irq+0x24/0x3c [c6047ae0] [c000489c] do_IRQ+0x8c/0x110 [c6047b00] [c000f86c] ret_from_except+0x0/0x18 --- Exception: 501 at _raw_spin_unlock_irq+0x30/0x50 LR = _raw_spin_unlock_irq+0x2c/0x50 [c6047bd0] [c0590158] wait_for_common+0xb8/0x170 [c6047c10] [c059024c] wait_for_completion_interruptible+0x1c/0x40 [c6047c20] [c022fc78] do_one_async_hash_op.isra.2.part.3+0x18/0x40 [c6047c30] [c022ff98] __test_hash+0x2f8/0x6c0 [c6047de0] [c0230388] test_hash+0x28/0xb0 [c6047e00] [c0230458] alg_test_hash+0x48/0xc0 [c6047e20] [c022fa94] alg_test+0x114/0x2e0 [c6047ea0] [c022cd1c] cryptomgr_test+0x4c/0x60 [c6047eb0] [c00497a4] kthread+0xc4/0xe0 [c6047f40] [c000f2fc] ret_from_kernel_thread+0x5c/0x64 Instruction dump: 41de01c8 80a9002c 2f850000 40fe0008 80a90008 80fa0018 3c60c06d 811a001c 3863f4a4 813a0020 815a0024 4830cd01 <0fe00000> 81340048 2f890000 40feff48 Signed-off-by: Horia Geanta <horia.geanta@freescale.com> Acked-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-07-11 19:34:53 +07:00
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->src_nents = src_nents;
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
if (src_nents) {
sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
src_dma = edesc->sec4_sg_dma;
options = LDST_SGF;
} else {
src_dma = sg_dma_address(req->src);
options = 0;
}
append_seq_in_ptr(desc, src_dma, req->nbytes, options);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
}
return ret;
}
/* submit ahash final if it the first job descriptor */
static int ahash_final_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
u32 *sh_desc = ctx->sh_desc_digest, *desc;
dma_addr_t ptr = ctx->sh_desc_digest_dma;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
}
crypto: caam - fix uninitialized edesc->sec4_sg_bytes field sec4_sg_bytes not being properly initialized causes ahash_done to try to free unallocated DMA memory: caam_jr ffe301000.jr: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0xdeadbeefdeadbeef] [size=3735928559 bytes] ------------[ cut here ]------------ WARNING: at lib/dma-debug.c:1093 Modules linked in: CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.0.0-rc1+ #6 task: e9598c00 ti: effca000 task.ti: e95a2000 NIP: c04ef24c LR: c04ef24c CTR: c0549730 REGS: effcbd40 TRAP: 0700 Not tainted (4.0.0-rc1+) MSR: 00029002 <CE,EE,ME> CR: 22008084 XER: 20000000 GPR00: c04ef24c effcbdf0 e9598c00 00000096 c08f7424 c00ab2b0 00000000 00000001 GPR08: c0fe7510 effca000 00000000 000001c3 22008082 00000000 c1048e77 c1050000 GPR16: c0c36700 493c0040 0000002c e690e4a0 c1054fb4 c18bac40 00029002 c18b0788 GPR24: 00000014 e690e480 effcbe48 00000000 c0fde128 e6ffac10 deadbeef deadbeef NIP [c04ef24c] check_unmap+0x93c/0xb40 LR [c04ef24c] check_unmap+0x93c/0xb40 Call Trace: [effcbdf0] [c04ef24c] check_unmap+0x93c/0xb40 (unreliable) [effcbe40] [c04ef4f4] debug_dma_unmap_page+0xa4/0xc0 [effcbec0] [c070cda8] ahash_done+0x128/0x1a0 [effcbef0] [c0700070] caam_jr_dequeue+0x1d0/0x290 [effcbf40] [c0045f40] tasklet_action+0x110/0x1f0 [effcbf80] [c0044bc8] __do_softirq+0x188/0x700 [effcbfe0] [c00455d8] irq_exit+0x108/0x120 [effcbff0] [c000f520] call_do_irq+0x24/0x3c [e95a3e20] [c00059b8] do_IRQ+0xc8/0x170 [e95a3e50] [c0011bc8] ret_from_except+0x0/0x18 Signed-off-by: Yanjiang Jin <yanjiang.jin@windriver.com> Acked-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-03-06 09:34:41 +07:00
edesc->sec4_sg_bytes = 0;
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map src\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
edesc->src_nents = 0;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
}
return ret;
}
/* submit ahash update if it the first job descriptor after update */
static int ahash_update_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
int *next_buflen = state->current_buf ? &state->buflen_0 :
&state->buflen_1;
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents;
struct ahash_edesc *edesc;
u32 *desc, *sh_desc = ctx->sh_desc_update_first;
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
int ret = 0;
int sh_len;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - (*next_buflen));
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_bytes = (1 + src_nents) *
sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
"could not allocate extended descriptor\n");
return -ENOMEM;
}
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
crypto: caam - fix uninitialized edesc->dst_dma field dst_dma not being properly initialized causes ahash_done_ctx_dst to try to free unallocated DMA memory: caam_jr ffe301000.jr: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x0000000006513340] [size=28 bytes] WARNING: at lib/dma-debug.c:1080 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 1373 Comm: cryptomgr_test Tainted: G W 3.16.0-rc1 #23 task: ee23e350 ti: effd2000 task.ti: ee1f6000 NIP: c02889fc LR: c02889fc CTR: c02d7020 REGS: effd3d50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00029002 <CE,EE,ME> CR: 44048082 XER: 00000000 GPR00: c02889fc effd3e00 ee23e350 0000008e c1de3478 c1de382c 00000000 00029002 GPR08: 00000007 00000000 01660000 00000000 24048082 00000000 00000018 c07db080 GPR16: 00000006 00000100 0000002c eeb4a7e0 c07e1e10 c0da1180 00029002 c0d9b3c8 GPR24: eeb4a7c0 00000000 effd3e58 00000000 c078ad4c ee130210 00000000 06513340 NIP [c02889fc] check_unmap+0x8ac/0xab0 LR [c02889fc] check_unmap+0x8ac/0xab0 Call Trace: [effd3e00] [c02889fc] check_unmap+0x8ac/0xab0 (unreliable) [effd3e50] [c0288c78] debug_dma_unmap_page+0x78/0x90 [effd3ed0] [f94b89ec] ahash_done_ctx_dst+0x11c/0x200 [caamhash] [effd3f00] [c0429640] caam_jr_dequeue+0x1c0/0x280 [effd3f50] [c002c94c] tasklet_action+0xcc/0x1a0 [effd3f80] [c002cb30] __do_softirq+0x110/0x220 [effd3fe0] [c002cf34] irq_exit+0xa4/0xe0 [effd3ff0] [c000d834] call_do_irq+0x24/0x3c [ee1f7ae0] [c000489c] do_IRQ+0x8c/0x110 [ee1f7b00] [c000f86c] ret_from_except+0x0/0x18 --- Exception: 501 at _raw_spin_unlock_irq+0x30/0x50 LR = _raw_spin_unlock_irq+0x2c/0x50 [ee1f7bd0] [c0590158] wait_for_common+0xb8/0x170 [ee1f7c10] [c059024c] wait_for_completion_interruptible+0x1c/0x40 [ee1f7c20] [c022fc78] do_one_async_hash_op.isra.2.part.3+0x18/0x40 [ee1f7c30] [c022ffb8] __test_hash+0x318/0x6c0 [ee1f7de0] [c0230388] test_hash+0x28/0xb0 [ee1f7e00] [c02304a4] alg_test_hash+0x94/0xc0 [ee1f7e20] [c022fa94] alg_test+0x114/0x2e0 [ee1f7ea0] [c022cd1c] cryptomgr_test+0x4c/0x60 [ee1f7eb0] [c00497a4] kthread+0xc4/0xe0 [ee1f7f40] [c000f2fc] ret_from_kernel_thread+0x5c/0x64 Instruction dump: 41de01c8 80a9002c 2f850000 40fe0008 80a90008 80fa0018 3c60c06d 811a001c 3863f4a4 813a0020 815a0024 4830cd01 <0fe00000> 81340048 2f890000 40feff48 Signed-off-by: Horia Geanta <horia.geanta@freescale.com> Acked-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-07-11 19:34:54 +07:00
edesc->dst_dma = 0;
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
buf, *buflen);
src_map_to_sec4_sg(jrdev, req->src, src_nents,
edesc->sec4_sg + 1);
if (*next_buflen) {
crypto: caam - remove duplicated sg copy functions Replace equivalent (and partially incorrect) scatter-gather functions with ones from crypto-API. The replacement is motivated by page-faults in sg_copy_part triggered by successive calls to crypto_hash_update. The following fault appears after calling crypto_ahash_update twice, first with 13 and then with 285 bytes: Unable to handle kernel paging request for data at address 0x00000008 Faulting instruction address: 0xf9bf9a8c Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=8 CoreNet Generic Modules linked in: tcrypt(+) caamhash caam_jr caam tls CPU: 6 PID: 1497 Comm: cryptomgr_test Not tainted 3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2 #75 task: e9308530 ti: e700e000 task.ti: e700e000 NIP: f9bf9a8c LR: f9bfcf28 CTR: c0019ea0 REGS: e700fb80 TRAP: 0300 Not tainted (3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2) MSR: 00029002 <CE,EE,ME> CR: 44f92024 XER: 20000000 DEAR: 00000008, ESR: 00000000 GPR00: f9bfcf28 e700fc30 e9308530 e70b1e55 00000000 ffffffdd e70b1e54 0bebf888 GPR08: 902c7ef5 c0e771e2 00000002 00000888 c0019ea0 00000000 00000000 c07a4154 GPR16: c08d0000 e91a8f9c 00000001 e98fb400 00000100 e9c83028 e70b1e08 e70b1d48 GPR24: e992ce10 e70b1dc8 f9bfe4f4 e70b1e55 ffffffdd e70b1ce0 00000000 00000000 NIP [f9bf9a8c] sg_copy+0x1c/0x100 [caamhash] LR [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] Call Trace: [e700fc30] [f9bf9c50] sg_copy_part+0xe0/0x160 [caamhash] (unreliable) [e700fc50] [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] [e700fcb0] [f954e19c] crypto_tls_genicv+0x13c/0x300 [tls] [e700fd10] [f954e65c] crypto_tls_encrypt+0x5c/0x260 [tls] [e700fd40] [c02250ec] __test_aead.constprop.9+0x2bc/0xb70 [e700fe40] [c02259f0] alg_test_aead+0x50/0xc0 [e700fe60] [c02241e4] alg_test+0x114/0x2e0 [e700fee0] [c022276c] cryptomgr_test+0x4c/0x60 [e700fef0] [c004f658] kthread+0x98/0xa0 [e700ff40] [c000fd04] ret_from_kernel_thread+0x5c/0x64 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-08-14 17:51:56 +07:00
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
*next_buflen, 0);
}
state->current_buf = !state->current_buf;
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
if (ret)
return ret;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
if (!ret) {
ret = -EINPROGRESS;
state->update = ahash_update_ctx;
state->finup = ahash_finup_ctx;
state->final = ahash_final_ctx;
} else {
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
DMA_TO_DEVICE);
kfree(edesc);
}
} else if (*next_buflen) {
crypto: caam - remove duplicated sg copy functions Replace equivalent (and partially incorrect) scatter-gather functions with ones from crypto-API. The replacement is motivated by page-faults in sg_copy_part triggered by successive calls to crypto_hash_update. The following fault appears after calling crypto_ahash_update twice, first with 13 and then with 285 bytes: Unable to handle kernel paging request for data at address 0x00000008 Faulting instruction address: 0xf9bf9a8c Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=8 CoreNet Generic Modules linked in: tcrypt(+) caamhash caam_jr caam tls CPU: 6 PID: 1497 Comm: cryptomgr_test Not tainted 3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2 #75 task: e9308530 ti: e700e000 task.ti: e700e000 NIP: f9bf9a8c LR: f9bfcf28 CTR: c0019ea0 REGS: e700fb80 TRAP: 0300 Not tainted (3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2) MSR: 00029002 <CE,EE,ME> CR: 44f92024 XER: 20000000 DEAR: 00000008, ESR: 00000000 GPR00: f9bfcf28 e700fc30 e9308530 e70b1e55 00000000 ffffffdd e70b1e54 0bebf888 GPR08: 902c7ef5 c0e771e2 00000002 00000888 c0019ea0 00000000 00000000 c07a4154 GPR16: c08d0000 e91a8f9c 00000001 e98fb400 00000100 e9c83028 e70b1e08 e70b1d48 GPR24: e992ce10 e70b1dc8 f9bfe4f4 e70b1e55 ffffffdd e70b1ce0 00000000 00000000 NIP [f9bf9a8c] sg_copy+0x1c/0x100 [caamhash] LR [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] Call Trace: [e700fc30] [f9bf9c50] sg_copy_part+0xe0/0x160 [caamhash] (unreliable) [e700fc50] [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] [e700fcb0] [f954e19c] crypto_tls_genicv+0x13c/0x300 [tls] [e700fd10] [f954e65c] crypto_tls_encrypt+0x5c/0x260 [tls] [e700fd40] [c02250ec] __test_aead.constprop.9+0x2bc/0xb70 [e700fe40] [c02259f0] alg_test_aead+0x50/0xc0 [e700fe60] [c02241e4] alg_test+0x114/0x2e0 [e700fee0] [c022276c] cryptomgr_test+0x4c/0x60 [e700fef0] [c004f658] kthread+0x98/0xa0 [e700ff40] [c000fd04] ret_from_kernel_thread+0x5c/0x64 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-08-14 17:51:56 +07:00
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
*next_buflen = 0;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
*next_buflen, 1);
#endif
return ret;
}
/* submit ahash finup if it the first job descriptor after update */
static int ahash_finup_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_digest, *desc;
dma_addr_t ptr = ctx->sh_desc_digest_dma;
int sec4_sg_bytes, sec4_sg_src_index, src_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int sh_len;
int ret = 0;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_src_index = 2;
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
}
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
state->buf_dma, buflen,
last_buflen);
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
req->nbytes, LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
}
return ret;
}
/* submit first update job descriptor after init */
static int ahash_update_first(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
int *next_buflen = state->current_buf ?
&state->buflen_1 : &state->buflen_0;
int to_hash;
u32 *sh_desc = ctx->sh_desc_update_first, *desc;
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
int sec4_sg_bytes, src_nents;
dma_addr_t src_dma;
u32 options;
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1);
to_hash = req->nbytes - *next_buflen;
if (to_hash) {
src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
"could not allocate extended descriptor\n");
return -ENOMEM;
}
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
crypto: caam - fix uninitialized edesc->dst_dma field dst_dma not being properly initialized causes ahash_done_ctx_dst to try to free unallocated DMA memory: caam_jr ffe301000.jr: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x0000000006513340] [size=28 bytes] WARNING: at lib/dma-debug.c:1080 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 1373 Comm: cryptomgr_test Tainted: G W 3.16.0-rc1 #23 task: ee23e350 ti: effd2000 task.ti: ee1f6000 NIP: c02889fc LR: c02889fc CTR: c02d7020 REGS: effd3d50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00029002 <CE,EE,ME> CR: 44048082 XER: 00000000 GPR00: c02889fc effd3e00 ee23e350 0000008e c1de3478 c1de382c 00000000 00029002 GPR08: 00000007 00000000 01660000 00000000 24048082 00000000 00000018 c07db080 GPR16: 00000006 00000100 0000002c eeb4a7e0 c07e1e10 c0da1180 00029002 c0d9b3c8 GPR24: eeb4a7c0 00000000 effd3e58 00000000 c078ad4c ee130210 00000000 06513340 NIP [c02889fc] check_unmap+0x8ac/0xab0 LR [c02889fc] check_unmap+0x8ac/0xab0 Call Trace: [effd3e00] [c02889fc] check_unmap+0x8ac/0xab0 (unreliable) [effd3e50] [c0288c78] debug_dma_unmap_page+0x78/0x90 [effd3ed0] [f94b89ec] ahash_done_ctx_dst+0x11c/0x200 [caamhash] [effd3f00] [c0429640] caam_jr_dequeue+0x1c0/0x280 [effd3f50] [c002c94c] tasklet_action+0xcc/0x1a0 [effd3f80] [c002cb30] __do_softirq+0x110/0x220 [effd3fe0] [c002cf34] irq_exit+0xa4/0xe0 [effd3ff0] [c000d834] call_do_irq+0x24/0x3c [ee1f7ae0] [c000489c] do_IRQ+0x8c/0x110 [ee1f7b00] [c000f86c] ret_from_except+0x0/0x18 --- Exception: 501 at _raw_spin_unlock_irq+0x30/0x50 LR = _raw_spin_unlock_irq+0x2c/0x50 [ee1f7bd0] [c0590158] wait_for_common+0xb8/0x170 [ee1f7c10] [c059024c] wait_for_completion_interruptible+0x1c/0x40 [ee1f7c20] [c022fc78] do_one_async_hash_op.isra.2.part.3+0x18/0x40 [ee1f7c30] [c022ffb8] __test_hash+0x318/0x6c0 [ee1f7de0] [c0230388] test_hash+0x28/0xb0 [ee1f7e00] [c02304a4] alg_test_hash+0x94/0xc0 [ee1f7e20] [c022fa94] alg_test+0x114/0x2e0 [ee1f7ea0] [c022cd1c] cryptomgr_test+0x4c/0x60 [ee1f7eb0] [c00497a4] kthread+0xc4/0xe0 [ee1f7f40] [c000f2fc] ret_from_kernel_thread+0x5c/0x64 Instruction dump: 41de01c8 80a9002c 2f850000 40fe0008 80a90008 80fa0018 3c60c06d 811a001c 3863f4a4 813a0020 815a0024 4830cd01 <0fe00000> 81340048 2f890000 40feff48 Signed-off-by: Horia Geanta <horia.geanta@freescale.com> Acked-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-07-11 19:34:54 +07:00
edesc->dst_dma = 0;
if (src_nents) {
sg_to_sec4_sg_last(req->src, src_nents,
edesc->sec4_sg, 0);
edesc->sec4_sg_dma = dma_map_single(jrdev,
edesc->sec4_sg,
sec4_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
src_dma = edesc->sec4_sg_dma;
options = LDST_SGF;
} else {
src_dma = sg_dma_address(req->src);
options = 0;
}
if (*next_buflen)
crypto: caam - remove duplicated sg copy functions Replace equivalent (and partially incorrect) scatter-gather functions with ones from crypto-API. The replacement is motivated by page-faults in sg_copy_part triggered by successive calls to crypto_hash_update. The following fault appears after calling crypto_ahash_update twice, first with 13 and then with 285 bytes: Unable to handle kernel paging request for data at address 0x00000008 Faulting instruction address: 0xf9bf9a8c Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=8 CoreNet Generic Modules linked in: tcrypt(+) caamhash caam_jr caam tls CPU: 6 PID: 1497 Comm: cryptomgr_test Not tainted 3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2 #75 task: e9308530 ti: e700e000 task.ti: e700e000 NIP: f9bf9a8c LR: f9bfcf28 CTR: c0019ea0 REGS: e700fb80 TRAP: 0300 Not tainted (3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2) MSR: 00029002 <CE,EE,ME> CR: 44f92024 XER: 20000000 DEAR: 00000008, ESR: 00000000 GPR00: f9bfcf28 e700fc30 e9308530 e70b1e55 00000000 ffffffdd e70b1e54 0bebf888 GPR08: 902c7ef5 c0e771e2 00000002 00000888 c0019ea0 00000000 00000000 c07a4154 GPR16: c08d0000 e91a8f9c 00000001 e98fb400 00000100 e9c83028 e70b1e08 e70b1d48 GPR24: e992ce10 e70b1dc8 f9bfe4f4 e70b1e55 ffffffdd e70b1ce0 00000000 00000000 NIP [f9bf9a8c] sg_copy+0x1c/0x100 [caamhash] LR [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] Call Trace: [e700fc30] [f9bf9c50] sg_copy_part+0xe0/0x160 [caamhash] (unreliable) [e700fc50] [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] [e700fcb0] [f954e19c] crypto_tls_genicv+0x13c/0x300 [tls] [e700fd10] [f954e65c] crypto_tls_encrypt+0x5c/0x260 [tls] [e700fd40] [c02250ec] __test_aead.constprop.9+0x2bc/0xb70 [e700fe40] [c02259f0] alg_test_aead+0x50/0xc0 [e700fe60] [c02241e4] alg_test+0x114/0x2e0 [e700fee0] [c022276c] cryptomgr_test+0x4c/0x60 [e700fef0] [c004f658] kthread+0x98/0xa0 [e700ff40] [c000fd04] ret_from_kernel_thread+0x5c/0x64 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-08-14 17:51:56 +07:00
scatterwalk_map_and_copy(next_buf, req->src, to_hash,
*next_buflen, 0);
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
append_seq_in_ptr(desc, src_dma, to_hash, options);
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
if (ret)
return ret;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
req);
if (!ret) {
ret = -EINPROGRESS;
state->update = ahash_update_ctx;
state->finup = ahash_finup_ctx;
state->final = ahash_final_ctx;
} else {
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
DMA_TO_DEVICE);
kfree(edesc);
}
} else if (*next_buflen) {
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
crypto: caam - remove duplicated sg copy functions Replace equivalent (and partially incorrect) scatter-gather functions with ones from crypto-API. The replacement is motivated by page-faults in sg_copy_part triggered by successive calls to crypto_hash_update. The following fault appears after calling crypto_ahash_update twice, first with 13 and then with 285 bytes: Unable to handle kernel paging request for data at address 0x00000008 Faulting instruction address: 0xf9bf9a8c Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=8 CoreNet Generic Modules linked in: tcrypt(+) caamhash caam_jr caam tls CPU: 6 PID: 1497 Comm: cryptomgr_test Not tainted 3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2 #75 task: e9308530 ti: e700e000 task.ti: e700e000 NIP: f9bf9a8c LR: f9bfcf28 CTR: c0019ea0 REGS: e700fb80 TRAP: 0300 Not tainted (3.12.19-rt30-QorIQ-SDK-V1.6+g9fda9f2) MSR: 00029002 <CE,EE,ME> CR: 44f92024 XER: 20000000 DEAR: 00000008, ESR: 00000000 GPR00: f9bfcf28 e700fc30 e9308530 e70b1e55 00000000 ffffffdd e70b1e54 0bebf888 GPR08: 902c7ef5 c0e771e2 00000002 00000888 c0019ea0 00000000 00000000 c07a4154 GPR16: c08d0000 e91a8f9c 00000001 e98fb400 00000100 e9c83028 e70b1e08 e70b1d48 GPR24: e992ce10 e70b1dc8 f9bfe4f4 e70b1e55 ffffffdd e70b1ce0 00000000 00000000 NIP [f9bf9a8c] sg_copy+0x1c/0x100 [caamhash] LR [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] Call Trace: [e700fc30] [f9bf9c50] sg_copy_part+0xe0/0x160 [caamhash] (unreliable) [e700fc50] [f9bfcf28] ahash_update_no_ctx+0x628/0x660 [caamhash] [e700fcb0] [f954e19c] crypto_tls_genicv+0x13c/0x300 [tls] [e700fd10] [f954e65c] crypto_tls_encrypt+0x5c/0x260 [tls] [e700fd40] [c02250ec] __test_aead.constprop.9+0x2bc/0xb70 [e700fe40] [c02259f0] alg_test_aead+0x50/0xc0 [e700fe60] [c02241e4] alg_test+0x114/0x2e0 [e700fee0] [c022276c] cryptomgr_test+0x4c/0x60 [e700fef0] [c004f658] kthread+0x98/0xa0 [e700ff40] [c000fd04] ret_from_kernel_thread+0x5c/0x64 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-08-14 17:51:56 +07:00
scatterwalk_map_and_copy(next_buf, req->src, 0,
req->nbytes, 0);
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
*next_buflen, 1);
#endif
return ret;
}
static int ahash_finup_first(struct ahash_request *req)
{
return ahash_digest(req);
}
static int ahash_init(struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
state->update = ahash_update_first;
state->finup = ahash_finup_first;
state->final = ahash_final_no_ctx;
state->current_buf = 0;
crypto: caam - fix uninitialized state->buf_dma field state->buf_dma not being initialized can cause try_buf_map_to_sec4_sg to try to free unallocated DMA memory: caam_jr ffe301000.jr: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x000000002eb15068] [size=0 bytes] WARNING: at lib/dma-debug.c:1080 Modules linked in: caamhash(+) [last unloaded: caamhash] CPU: 0 PID: 1387 Comm: cryptomgr_test Tainted: G W 3.16.0-rc1 #23 task: eed24e90 ti: eebd0000 task.ti: eebd0000 NIP: c02889fc LR: c02889fc CTR: c02d7020 REGS: eebd1a50 TRAP: 0700 Tainted: G W (3.16.0-rc1) MSR: 00029002 <CE,EE,ME> CR: 44042082 XER: 00000000 GPR00: c02889fc eebd1b00 eed24e90 0000008d c1de3478 c1de382c 00000000 00029002 GPR08: 00000007 00000000 01660000 00000000 24042082 00000000 c07a1900 eeda2a40 GPR16: 005d62a0 c078ad4c 00000000 eeb15068 c07e1e10 c0da1180 00029002 c0d97408 GPR24: c62497a0 00000014 eebd1b58 00000000 c078ad4c ee130210 00000000 2eb15068 NIP [c02889fc] check_unmap+0x8ac/0xab0 LR [c02889fc] check_unmap+0x8ac/0xab0 Call Trace: [eebd1b00] [c02889fc] check_unmap+0x8ac/0xab0 (unreliable) --- Exception: 0 at (null) LR = (null) [eebd1b50] [c0288c78] debug_dma_unmap_page+0x78/0x90 (unreliable) [eebd1bd0] [f956f738] ahash_final_ctx+0x6d8/0x7b0 [caamhash] [eebd1c30] [c022ff4c] __test_hash+0x2ac/0x6c0 [eebd1de0] [c0230388] test_hash+0x28/0xb0 [eebd1e00] [c02304a4] alg_test_hash+0x94/0xc0 [eebd1e20] [c022fa94] alg_test+0x114/0x2e0 [eebd1ea0] [c022cd1c] cryptomgr_test+0x4c/0x60 [eebd1eb0] [c00497a4] kthread+0xc4/0xe0 [eebd1f40] [c000f2fc] ret_from_kernel_thread+0x5c/0x64 Instruction dump: 41de01c8 80a9002c 2f850000 40fe0008 80a90008 80fa0018 3c60c06d 811a001c 3863f4a4 813a0020 815a0024 4830cd01 <0fe00000> 81340048 2f890000 40feff48 Signed-off-by: Horia Geanta <horia.geanta@freescale.com> Acked-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-07-11 19:34:55 +07:00
state->buf_dma = 0;
crypto: caam - improve initalization for context state saves Multiple function in asynchronous hashing use a saved-state block, a.k.a. struct caam_hash_state, which holds a stash of information between requests (init/update/final). Certain values in this state block are loaded for processing using an inline-if, and when this is done, the potential for uninitialized data can pose conflicts. Therefore, this patch improves initialization of state data to prevent false assignments using uninitialized data in the state block. This patch addresses the following traceback, originating in ahash_final_ctx(), although a problem like this could certainly exhibit other symptoms: kernel BUG at arch/arm/mm/dma-mapping.c:465! Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = 80004000 [00000000] *pgd=00000000 Internal error: Oops: 805 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01752-gdd441b9-dirty #40) PC is at __bug+0x1c/0x28 LR is at __bug+0x18/0x28 pc : [<80043240>] lr : [<8004323c>] psr: 60000013 sp : e423fd98 ip : 60000013 fp : 0000001c r10: e4191b84 r9 : 00000020 r8 : 00000009 r7 : 88005038 r6 : 00000001 r5 : 2d676572 r4 : e4191a60 r3 : 00000000 r2 : 00000001 r1 : 60000093 r0 : 00000033 Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c53c7d Table: 1000404a DAC: 00000015 Process cryptomgr_test (pid: 1306, stack limit = 0xe423e2f0) Stack: (0xe423fd98 to 0xe4240000) fd80: 11807fd1 80048544 fda0: 88005000 e4191a00 e5178040 8039dda0 00000000 00000014 2d676572 e4191008 fdc0: 88005018 e4191a60 00100100 e4191a00 00000000 8039ce0c e423fea8 00000007 fde0: e4191a00 e4227000 e5178000 8039ce18 e419183c 80203808 80a94a44 00000006 fe00: 00000000 80207180 00000000 00000006 e423ff08 00000000 00000007 e5178000 fe20: e41918a4 80a949b4 8c4844e2 00000000 00000049 74227000 8c4844e2 00000e90 fe40: 0000000e 74227e90 ffff8c58 80ac29e0 e423fed4 8006a350 8c81625c e423ff5c fe60: 00008576 e4002500 00000003 00030010 e4002500 00000003 e5180000 e4002500 fe80: e5178000 800e6d24 007fffff 00000000 00000010 e4001280 e4002500 60000013 fea0: 000000d0 804df078 00000000 00000000 00000000 00000000 00000000 00000000 fec0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 fee0: 00000000 00000000 e4227000 e4226000 e4753000 e4752000 e40a5000 e40a4000 ff00: e41e7000 e41e6000 00000000 00000000 00000000 e423ff14 e423ff14 00000000 ff20: 00000400 804f9080 e5178000 e4db0b40 00000000 e4db0b80 0000047c 00000400 ff40: 00000000 8020758c 00000400 ffffffff 0000008a 00000000 e4db0b40 80206e00 ff60: e4049dbc 00000000 00000000 00000003 e423ffa4 80062978 e41a8bfc 00000000 ff80: 00000000 e4049db4 00000013 e4049db0 00000013 00000000 00000000 00000000 ffa0: e4db0b40 e4db0b40 80204cbc 00000013 00000000 00000000 00000000 80204cfc ffc0: e4049da0 80089544 80040a40 00000000 e4db0b40 00000000 00000000 00000000 ffe0: e423ffe0 e423ffe0 e4049da0 800894c4 80040a40 80040a40 00000000 00000000 [<80043240>] (__bug+0x1c/0x28) from [<80048544>] (___dma_single_dev_to_cpu+0x84) [<80048544>] (___dma_single_dev_to_cpu+0x84/0x94) from [<8039dda0>] (ahash_fina) [<8039dda0>] (ahash_final_ctx+0x180/0x428) from [<8039ce18>] (ahash_final+0xc/0) [<8039ce18>] (ahash_final+0xc/0x10) from [<80203808>] (crypto_ahash_op+0x28/0xc) [<80203808>] (crypto_ahash_op+0x28/0xc0) from [<80207180>] (test_hash+0x214/0x5) [<80207180>] (test_hash+0x214/0x5b8) from [<8020758c>] (alg_test_hash+0x68/0x8c) [<8020758c>] (alg_test_hash+0x68/0x8c) from [<80206e00>] (alg_test+0x7c/0x1b8) [<80206e00>] (alg_test+0x7c/0x1b8) from [<80204cfc>] (cryptomgr_test+0x40/0x48) [<80204cfc>] (cryptomgr_test+0x40/0x48) from [<80089544>] (kthread+0x80/0x88) [<80089544>] (kthread+0x80/0x88) from [<80040a40>] (kernel_thread_exit+0x0/0x8) Code: e59f0010 e1a01003 eb126a8d e3a03000 (e5833000) ---[ end trace d52a403a1d1eaa86 ]--- Cc: stable@vger.kernel.org Signed-off-by: Steve Cornelius <steve.cornelius@freescale.com> Signed-off-by: Victoria Milhoan <vicki.milhoan@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-06-16 06:52:56 +07:00
state->buflen_0 = 0;
state->buflen_1 = 0;
return 0;
}
static int ahash_update(struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
return state->update(req);
}
static int ahash_finup(struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
return state->finup(req);
}
static int ahash_final(struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
return state->final(req);
}
static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
int len;
u8 *buf;
if (state->current_buf) {
buf = state->buf_1;
len = state->buflen_1;
} else {
buf = state->buf_0;
len = state->buflen_0;
}
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
export->buflen = len;
export->update = state->update;
export->final = state->final;
export->finup = state->finup;
return 0;
}
static int ahash_import(struct ahash_request *req, const void *in)
{
struct caam_hash_state *state = ahash_request_ctx(req);
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
memcpy(state->buf_0, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
state->buflen_0 = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
return 0;
}
struct caam_hash_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
char hmac_name[CRYPTO_MAX_ALG_NAME];
char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
u32 alg_op;
};
/* ahash descriptors */
static struct caam_hash_template driver_hash[] = {
{
.name = "sha1",
.driver_name = "sha1-caam",
.hmac_name = "hmac(sha1)",
.hmac_driver_name = "hmac-sha1-caam",
.blocksize = SHA1_BLOCK_SIZE,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct caam_export_state),
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
.hmac_name = "hmac(sha224)",
.hmac_driver_name = "hmac-sha224-caam",
.blocksize = SHA224_BLOCK_SIZE,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct caam_export_state),
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
.hmac_name = "hmac(sha256)",
.hmac_driver_name = "hmac-sha256-caam",
.blocksize = SHA256_BLOCK_SIZE,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct caam_export_state),
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
.hmac_name = "hmac(sha384)",
.hmac_driver_name = "hmac-sha384-caam",
.blocksize = SHA384_BLOCK_SIZE,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct caam_export_state),
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
.hmac_name = "hmac(sha512)",
.hmac_driver_name = "hmac-sha512-caam",
.blocksize = SHA512_BLOCK_SIZE,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct caam_export_state),
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
}, {
.name = "md5",
.driver_name = "md5-caam",
.hmac_name = "hmac(md5)",
.hmac_driver_name = "hmac-md5-caam",
.blocksize = MD5_BLOCK_WORDS * 4,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct caam_export_state),
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
};
struct caam_hash_alg {
struct list_head entry;
int alg_type;
int alg_op;
struct ahash_alg ahash_alg;
};
static int caam_hash_cra_init(struct crypto_tfm *tfm)
{
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
struct crypto_alg *base = tfm->__crt_alg;
struct hash_alg_common *halg =
container_of(base, struct hash_alg_common, base);
struct ahash_alg *alg =
container_of(halg, struct ahash_alg, halg);
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
HASH_MSG_LEN + 32,
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
int ret = 0;
/*
* Get a Job ring from Job Ring driver to ensure in-order
* crypto request processing per tfm
*/
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->jrdev);
}
/* copy descriptor header template value */
ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
ret = ahash_set_sh_desc(ahash);
return ret;
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->sh_desc_update_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
desc_bytes(ctx->sh_desc_update),
DMA_TO_DEVICE);
if (ctx->sh_desc_update_first_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(ctx->sh_desc_update_first),
DMA_TO_DEVICE);
if (ctx->sh_desc_fin_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
if (ctx->sh_desc_digest_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
desc_bytes(ctx->sh_desc_digest),
DMA_TO_DEVICE);
if (ctx->sh_desc_finup_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
}
static void __exit caam_algapi_hash_exit(void)
{
struct caam_hash_alg *t_alg, *n;
if (!hash_list.next)
return;
list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
crypto_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
}
static struct caam_hash_alg *
caam_hash_alloc(struct caam_hash_template *template,
bool keyed)
{
struct caam_hash_alg *t_alg;
struct ahash_alg *halg;
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg) {
pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
}
t_alg->ahash_alg = template->template_ahash;
halg = &t_alg->ahash_alg;
alg = &halg->halg.base;
if (keyed) {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
t_alg->ahash_alg.setkey = NULL;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
alg->cra_exit = caam_hash_cra_exit;
alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
t_alg->alg_op = template->alg_op;
return t_alg;
}
static int __init caam_algapi_hash_init(void)
{
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
int i = 0, err = 0;
struct caam_drv_private *priv;
unsigned int md_limit = SHA512_DIGEST_SIZE;
u32 cha_inst, cha_vid;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return -ENODEV;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev) {
of_node_put(dev_node);
return -ENODEV;
}
ctrldev = &pdev->dev;
priv = dev_get_drvdata(ctrldev);
of_node_put(dev_node);
/*
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
if (!priv)
return -ENODEV;
/*
* Register crypto algorithms the device supports. First, identify
* presence and attributes of MD block.
*/
cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
/*
* Skip registration of any hashing algorithms if MD block
* is not present.
*/
if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
return -ENODEV;
/* Limit digest size based on LP256 */
if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
INIT_LIST_HEAD(&hash_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
struct caam_hash_alg *t_alg;
struct caam_hash_template *alg = driver_hash + i;
/* If MD size is not supported by device, skip registration */
if (alg->template_ahash.halg.digestsize > md_limit)
continue;
/* register hmac version */
t_alg = caam_hash_alloc(alg, true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
pr_warn("%s alg allocation failed\n", alg->driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed: %d\n",
t_alg->ahash_alg.halg.base.cra_driver_name,
err);
kfree(t_alg);
} else
list_add_tail(&t_alg->entry, &hash_list);
/* register unkeyed version */
t_alg = caam_hash_alloc(alg, false);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
pr_warn("%s alg allocation failed\n", alg->driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed: %d\n",
t_alg->ahash_alg.halg.base.cra_driver_name,
err);
kfree(t_alg);
} else
list_add_tail(&t_alg->entry, &hash_list);
}
return err;
}
module_init(caam_algapi_hash_init);
module_exit(caam_algapi_hash_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
MODULE_AUTHOR("Freescale Semiconductor - NMG");