2019-05-27 13:55:01 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2007-04-16 17:48:54 +07:00
|
|
|
/*
|
|
|
|
* Asynchronous block chaining cipher operations.
|
2010-02-16 19:23:37 +07:00
|
|
|
*
|
2007-04-16 17:48:54 +07:00
|
|
|
* This is the asynchronous version of blkcipher.c indicating completion
|
|
|
|
* via a callback.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
|
|
|
*/
|
|
|
|
|
2007-12-17 19:07:31 +07:00
|
|
|
#include <crypto/internal/skcipher.h>
|
|
|
|
#include <linux/err.h>
|
2007-08-23 15:23:01 +07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/slab.h>
|
2007-04-16 17:48:54 +07:00
|
|
|
#include <linux/seq_file.h>
|
2011-09-27 12:42:32 +07:00
|
|
|
#include <linux/cryptouser.h>
|
2016-12-31 22:56:23 +07:00
|
|
|
#include <linux/compiler.h>
|
2011-09-27 12:42:32 +07:00
|
|
|
#include <net/netlink.h>
|
2007-04-16 17:48:54 +07:00
|
|
|
|
2010-05-19 11:13:07 +07:00
|
|
|
#include <crypto/scatterwalk.h>
|
|
|
|
|
2007-12-17 19:07:31 +07:00
|
|
|
#include "internal.h"
|
|
|
|
|
2010-05-19 11:13:07 +07:00
|
|
|
struct ablkcipher_buffer {
|
|
|
|
struct list_head entry;
|
|
|
|
struct scatter_walk dst;
|
|
|
|
unsigned int len;
|
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
ABLKCIPHER_WALK_SLOW = 1 << 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
|
|
|
|
{
|
|
|
|
scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
|
|
|
|
{
|
|
|
|
struct ablkcipher_buffer *p, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
|
|
|
|
ablkcipher_buffer_write(p);
|
|
|
|
list_del(&p->entry);
|
|
|
|
kfree(p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
|
|
|
|
|
|
|
|
static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
|
|
|
|
struct ablkcipher_buffer *p)
|
|
|
|
{
|
|
|
|
p->dst = walk->out;
|
|
|
|
list_add_tail(&p->entry, &walk->buffers);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get a spot of the specified length that does not straddle a page.
|
|
|
|
* The caller needs to ensure that there is enough space for this operation.
|
|
|
|
*/
|
|
|
|
static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
|
|
|
|
{
|
|
|
|
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
|
2014-12-05 12:06:16 +07:00
|
|
|
|
2010-05-19 11:13:07 +07:00
|
|
|
return max(start, end_page);
|
|
|
|
}
|
|
|
|
|
2018-07-24 00:54:58 +07:00
|
|
|
static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
|
|
|
|
unsigned int n)
|
2010-05-19 11:13:07 +07:00
|
|
|
{
|
|
|
|
for (;;) {
|
|
|
|
unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
|
|
|
|
|
|
|
|
if (len_this_page > n)
|
|
|
|
len_this_page = n;
|
|
|
|
scatterwalk_advance(&walk->out, n);
|
|
|
|
if (n == len_this_page)
|
|
|
|
break;
|
|
|
|
n -= len_this_page;
|
2015-01-20 15:06:16 +07:00
|
|
|
scatterwalk_start(&walk->out, sg_next(walk->out.sg));
|
2010-05-19 11:13:07 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-24 00:54:58 +07:00
|
|
|
static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
|
|
|
|
unsigned int n)
|
2010-05-19 11:13:07 +07:00
|
|
|
{
|
|
|
|
scatterwalk_advance(&walk->in, n);
|
|
|
|
scatterwalk_advance(&walk->out, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ablkcipher_walk_next(struct ablkcipher_request *req,
|
|
|
|
struct ablkcipher_walk *walk);
|
|
|
|
|
|
|
|
int ablkcipher_walk_done(struct ablkcipher_request *req,
|
|
|
|
struct ablkcipher_walk *walk, int err)
|
|
|
|
{
|
|
|
|
struct crypto_tfm *tfm = req->base.tfm;
|
2018-07-24 00:54:58 +07:00
|
|
|
unsigned int n; /* bytes processed */
|
|
|
|
bool more;
|
2010-05-19 11:13:07 +07:00
|
|
|
|
2018-07-24 00:54:58 +07:00
|
|
|
if (unlikely(err < 0))
|
|
|
|
goto finish;
|
2010-05-19 11:13:07 +07:00
|
|
|
|
2018-07-24 00:54:58 +07:00
|
|
|
n = walk->nbytes - err;
|
|
|
|
walk->total -= n;
|
|
|
|
more = (walk->total != 0);
|
2010-05-19 11:13:07 +07:00
|
|
|
|
2018-07-24 00:54:58 +07:00
|
|
|
if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
|
|
|
|
ablkcipher_done_fast(walk, n);
|
|
|
|
} else {
|
|
|
|
if (WARN_ON(err)) {
|
|
|
|
/* unexpected case; didn't process all bytes */
|
|
|
|
err = -EINVAL;
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
ablkcipher_done_slow(walk, n);
|
2010-05-19 11:13:07 +07:00
|
|
|
}
|
|
|
|
|
2018-07-24 00:54:58 +07:00
|
|
|
scatterwalk_done(&walk->in, 0, more);
|
|
|
|
scatterwalk_done(&walk->out, 1, more);
|
2010-05-19 11:13:07 +07:00
|
|
|
|
2018-07-24 00:54:58 +07:00
|
|
|
if (more) {
|
2010-05-19 11:13:07 +07:00
|
|
|
crypto_yield(req->base.flags);
|
|
|
|
return ablkcipher_walk_next(req, walk);
|
|
|
|
}
|
2018-07-24 00:54:58 +07:00
|
|
|
err = 0;
|
|
|
|
finish:
|
|
|
|
walk->nbytes = 0;
|
2010-05-19 11:13:07 +07:00
|
|
|
if (walk->iv != req->info)
|
|
|
|
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
|
2011-01-29 11:09:43 +07:00
|
|
|
kfree(walk->iv_buffer);
|
2010-05-19 11:13:07 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
|
|
|
|
|
|
|
|
static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
|
|
|
|
struct ablkcipher_walk *walk,
|
|
|
|
unsigned int bsize,
|
|
|
|
unsigned int alignmask,
|
|
|
|
void **src_p, void **dst_p)
|
|
|
|
{
|
|
|
|
unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
|
|
|
|
struct ablkcipher_buffer *p;
|
|
|
|
void *src, *dst, *base;
|
|
|
|
unsigned int n;
|
|
|
|
|
|
|
|
n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
|
|
|
|
n += (aligned_bsize * 3 - (alignmask + 1) +
|
|
|
|
(alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
|
|
|
|
|
|
|
|
p = kmalloc(n, GFP_ATOMIC);
|
|
|
|
if (!p)
|
2010-06-23 17:01:45 +07:00
|
|
|
return ablkcipher_walk_done(req, walk, -ENOMEM);
|
2010-05-19 11:13:07 +07:00
|
|
|
|
|
|
|
base = p + 1;
|
|
|
|
|
|
|
|
dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
|
|
|
|
src = dst = ablkcipher_get_spot(dst, bsize);
|
|
|
|
|
|
|
|
p->len = bsize;
|
|
|
|
p->data = dst;
|
|
|
|
|
|
|
|
scatterwalk_copychunks(src, &walk->in, bsize, 0);
|
|
|
|
|
|
|
|
ablkcipher_queue_write(walk, p);
|
|
|
|
|
|
|
|
walk->nbytes = bsize;
|
|
|
|
walk->flags |= ABLKCIPHER_WALK_SLOW;
|
|
|
|
|
|
|
|
*src_p = src;
|
|
|
|
*dst_p = dst;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
|
|
|
|
struct crypto_tfm *tfm,
|
|
|
|
unsigned int alignmask)
|
|
|
|
{
|
|
|
|
unsigned bs = walk->blocksize;
|
|
|
|
unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
|
|
|
|
unsigned aligned_bs = ALIGN(bs, alignmask + 1);
|
|
|
|
unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
|
|
|
|
(alignmask + 1);
|
|
|
|
u8 *iv;
|
|
|
|
|
|
|
|
size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
|
|
|
|
walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
|
|
|
|
if (!walk->iv_buffer)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
|
|
|
|
iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
|
|
|
|
iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
|
|
|
|
iv = ablkcipher_get_spot(iv, ivsize);
|
|
|
|
|
|
|
|
walk->iv = memcpy(iv, walk->iv, ivsize);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
|
|
|
|
struct ablkcipher_walk *walk)
|
|
|
|
{
|
|
|
|
walk->src.page = scatterwalk_page(&walk->in);
|
|
|
|
walk->src.offset = offset_in_page(walk->in.offset);
|
|
|
|
walk->dst.page = scatterwalk_page(&walk->out);
|
|
|
|
walk->dst.offset = offset_in_page(walk->out.offset);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ablkcipher_walk_next(struct ablkcipher_request *req,
|
|
|
|
struct ablkcipher_walk *walk)
|
|
|
|
{
|
|
|
|
struct crypto_tfm *tfm = req->base.tfm;
|
|
|
|
unsigned int alignmask, bsize, n;
|
|
|
|
void *src, *dst;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
alignmask = crypto_tfm_alg_alignmask(tfm);
|
|
|
|
n = walk->total;
|
|
|
|
if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
|
|
|
|
req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
|
|
|
|
return ablkcipher_walk_done(req, walk, -EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
walk->flags &= ~ABLKCIPHER_WALK_SLOW;
|
|
|
|
src = dst = NULL;
|
|
|
|
|
|
|
|
bsize = min(walk->blocksize, n);
|
|
|
|
n = scatterwalk_clamp(&walk->in, n);
|
|
|
|
n = scatterwalk_clamp(&walk->out, n);
|
|
|
|
|
|
|
|
if (n < bsize ||
|
|
|
|
!scatterwalk_aligned(&walk->in, alignmask) ||
|
|
|
|
!scatterwalk_aligned(&walk->out, alignmask)) {
|
|
|
|
err = ablkcipher_next_slow(req, walk, bsize, alignmask,
|
|
|
|
&src, &dst);
|
|
|
|
goto set_phys_lowmem;
|
|
|
|
}
|
|
|
|
|
|
|
|
walk->nbytes = n;
|
|
|
|
|
|
|
|
return ablkcipher_next_fast(req, walk);
|
|
|
|
|
|
|
|
set_phys_lowmem:
|
|
|
|
if (err >= 0) {
|
|
|
|
walk->src.page = virt_to_page(src);
|
|
|
|
walk->dst.page = virt_to_page(dst);
|
|
|
|
walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
|
|
|
|
walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ablkcipher_walk_first(struct ablkcipher_request *req,
|
|
|
|
struct ablkcipher_walk *walk)
|
|
|
|
{
|
|
|
|
struct crypto_tfm *tfm = req->base.tfm;
|
|
|
|
unsigned int alignmask;
|
|
|
|
|
|
|
|
alignmask = crypto_tfm_alg_alignmask(tfm);
|
|
|
|
if (WARN_ON_ONCE(in_irq()))
|
|
|
|
return -EDEADLK;
|
|
|
|
|
crypto: skcipher - Copy iv from desc even for 0-len walks
Some ciphers actually support encrypting zero length plaintexts. For
example, many AEAD modes support this. The resulting ciphertext for
those winds up being only the authentication tag, which is a result of
the key, the iv, the additional data, and the fact that the plaintext
had zero length. The blkcipher constructors won't copy the IV to the
right place, however, when using a zero length input, resulting in
some significant problems when ciphers call their initialization
routines, only to find that the ->iv parameter is uninitialized. One
such example of this would be using chacha20poly1305 with a zero length
input, which then calls chacha20, which calls the key setup routine,
which eventually OOPSes due to the uninitialized ->iv member.
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-12-06 08:51:37 +07:00
|
|
|
walk->iv = req->info;
|
2010-05-19 11:13:07 +07:00
|
|
|
walk->nbytes = walk->total;
|
|
|
|
if (unlikely(!walk->total))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
walk->iv_buffer = NULL;
|
|
|
|
if (unlikely(((unsigned long)walk->iv & alignmask))) {
|
|
|
|
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
|
2014-12-05 12:06:16 +07:00
|
|
|
|
2010-05-19 11:13:07 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
scatterwalk_start(&walk->in, walk->in.sg);
|
|
|
|
scatterwalk_start(&walk->out, walk->out.sg);
|
|
|
|
|
|
|
|
return ablkcipher_walk_next(req, walk);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ablkcipher_walk_phys(struct ablkcipher_request *req,
|
|
|
|
struct ablkcipher_walk *walk)
|
|
|
|
{
|
|
|
|
walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
|
|
|
|
return ablkcipher_walk_first(req, walk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
|
|
|
|
|
2007-08-23 15:23:01 +07:00
|
|
|
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
2007-05-19 16:51:21 +07:00
|
|
|
{
|
|
|
|
struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
|
|
|
|
unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
|
|
|
|
int ret;
|
|
|
|
u8 *buffer, *alignbuffer;
|
|
|
|
unsigned long absize;
|
|
|
|
|
|
|
|
absize = keylen + alignmask;
|
|
|
|
buffer = kmalloc(absize, GFP_ATOMIC);
|
|
|
|
if (!buffer)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
|
|
|
|
memcpy(alignbuffer, key, keylen);
|
|
|
|
ret = cipher->setkey(tfm, alignbuffer, keylen);
|
2007-08-03 19:33:47 +07:00
|
|
|
memset(alignbuffer, 0, keylen);
|
2007-05-19 16:51:21 +07:00
|
|
|
kfree(buffer);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-04-16 17:48:54 +07:00
|
|
|
static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
|
2007-05-19 16:51:21 +07:00
|
|
|
unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
|
2007-04-16 17:48:54 +07:00
|
|
|
|
|
|
|
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
|
|
|
|
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2007-05-19 16:51:21 +07:00
|
|
|
if ((unsigned long)key & alignmask)
|
|
|
|
return setkey_unaligned(tfm, key, keylen);
|
|
|
|
|
2007-04-16 17:48:54 +07:00
|
|
|
return cipher->setkey(tfm, key, keylen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
|
|
|
|
u32 mask)
|
|
|
|
{
|
|
|
|
return alg->cra_ctxsize;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
|
|
|
|
u32 mask)
|
|
|
|
{
|
|
|
|
struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
|
|
|
|
struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
|
|
|
|
|
|
|
|
if (alg->ivsize > PAGE_SIZE / 8)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
crt->setkey = setkey;
|
|
|
|
crt->encrypt = alg->encrypt;
|
|
|
|
crt->decrypt = alg->decrypt;
|
2007-12-05 17:08:36 +07:00
|
|
|
crt->base = __crypto_ablkcipher_cast(tfm);
|
2007-04-16 17:48:54 +07:00
|
|
|
crt->ivsize = alg->ivsize;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-03 19:46:07 +07:00
|
|
|
#ifdef CONFIG_NET
|
2011-09-27 12:42:32 +07:00
|
|
|
static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
struct crypto_report_blkcipher rblkcipher;
|
|
|
|
|
2018-11-04 04:56:03 +07:00
|
|
|
memset(&rblkcipher, 0, sizeof(rblkcipher));
|
|
|
|
|
|
|
|
strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
|
2018-12-17 06:55:06 +07:00
|
|
|
strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
|
2011-09-27 12:42:32 +07:00
|
|
|
|
|
|
|
rblkcipher.blocksize = alg->cra_blocksize;
|
|
|
|
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
|
|
|
|
rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
|
|
|
|
rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
|
|
|
|
|
2018-11-04 04:56:03 +07:00
|
|
|
return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
|
|
|
|
sizeof(rblkcipher), &rblkcipher);
|
2011-09-27 12:42:32 +07:00
|
|
|
}
|
2011-11-03 19:46:07 +07:00
|
|
|
#else
|
|
|
|
static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
#endif
|
2011-09-27 12:42:32 +07:00
|
|
|
|
2007-04-16 17:48:54 +07:00
|
|
|
static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
2016-12-31 22:56:23 +07:00
|
|
|
__maybe_unused;
|
2007-04-16 17:48:54 +07:00
|
|
|
static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
|
|
|
|
|
|
|
|
seq_printf(m, "type : ablkcipher\n");
|
2007-12-14 21:29:37 +07:00
|
|
|
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
|
|
|
|
"yes" : "no");
|
2007-04-16 17:48:54 +07:00
|
|
|
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
|
|
|
seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
|
|
|
|
seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
|
|
|
|
seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
|
2018-12-17 06:55:06 +07:00
|
|
|
seq_printf(m, "geniv : <default>\n");
|
2007-04-16 17:48:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
const struct crypto_type crypto_ablkcipher_type = {
|
|
|
|
.ctxsize = crypto_ablkcipher_ctxsize,
|
|
|
|
.init = crypto_init_ablkcipher_ops,
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
.show = crypto_ablkcipher_show,
|
|
|
|
#endif
|
2011-09-27 12:42:32 +07:00
|
|
|
.report = crypto_ablkcipher_report,
|
2007-04-16 17:48:54 +07:00
|
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
|