2019-05-27 13:55:01 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2008-05-14 19:41:47 +07:00
|
|
|
/*
|
|
|
|
* Asynchronous Cryptographic Hash operations.
|
|
|
|
*
|
|
|
|
* This is the asynchronous version of hash.c with notification of
|
|
|
|
* completion via a callback.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
|
|
|
|
*/
|
|
|
|
|
2008-07-07 21:19:53 +07:00
|
|
|
#include <crypto/internal/hash.h>
|
|
|
|
#include <crypto/scatterwalk.h>
|
2014-05-21 19:56:12 +07:00
|
|
|
#include <linux/bug.h>
|
2008-05-14 19:41:47 +07:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/seq_file.h>
|
2011-09-27 12:41:07 +07:00
|
|
|
#include <linux/cryptouser.h>
|
2016-12-31 22:56:23 +07:00
|
|
|
#include <linux/compiler.h>
|
2011-09-27 12:41:07 +07:00
|
|
|
#include <net/netlink.h>
|
2008-05-14 19:41:47 +07:00
|
|
|
|
|
|
|
#include "internal.h"
|
|
|
|
|
2009-07-15 11:40:40 +07:00
|
|
|
struct ahash_request_priv {
|
|
|
|
crypto_completion_t complete;
|
|
|
|
void *data;
|
|
|
|
u8 *result;
|
2017-04-10 16:27:57 +07:00
|
|
|
u32 flags;
|
2009-07-15 11:40:40 +07:00
|
|
|
void *ubuf[] CRYPTO_MINALIGN_ATTR;
|
|
|
|
};
|
|
|
|
|
2009-07-14 11:28:26 +07:00
|
|
|
static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
|
|
|
|
{
|
|
|
|
return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
|
|
|
|
halg);
|
|
|
|
}
|
|
|
|
|
2008-07-07 21:19:53 +07:00
|
|
|
static int hash_walk_next(struct crypto_hash_walk *walk)
|
|
|
|
{
|
|
|
|
unsigned int alignmask = walk->alignmask;
|
|
|
|
unsigned int offset = walk->offset;
|
|
|
|
unsigned int nbytes = min(walk->entrylen,
|
|
|
|
((unsigned int)(PAGE_SIZE)) - offset);
|
|
|
|
|
2014-05-21 19:56:12 +07:00
|
|
|
if (walk->flags & CRYPTO_ALG_ASYNC)
|
|
|
|
walk->data = kmap(walk->pg);
|
|
|
|
else
|
|
|
|
walk->data = kmap_atomic(walk->pg);
|
2008-07-07 21:19:53 +07:00
|
|
|
walk->data += offset;
|
|
|
|
|
2010-08-06 08:26:38 +07:00
|
|
|
if (offset & alignmask) {
|
|
|
|
unsigned int unaligned = alignmask + 1 - (offset & alignmask);
|
2014-12-05 12:44:54 +07:00
|
|
|
|
2010-08-06 08:26:38 +07:00
|
|
|
if (nbytes > unaligned)
|
|
|
|
nbytes = unaligned;
|
|
|
|
}
|
2008-07-07 21:19:53 +07:00
|
|
|
|
|
|
|
walk->entrylen -= nbytes;
|
|
|
|
return nbytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hash_walk_new_entry(struct crypto_hash_walk *walk)
|
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
|
|
|
|
sg = walk->sg;
|
|
|
|
walk->offset = sg->offset;
|
2016-05-04 16:52:56 +07:00
|
|
|
walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
|
|
|
|
walk->offset = offset_in_page(walk->offset);
|
2008-07-07 21:19:53 +07:00
|
|
|
walk->entrylen = sg->length;
|
|
|
|
|
|
|
|
if (walk->entrylen > walk->total)
|
|
|
|
walk->entrylen = walk->total;
|
|
|
|
walk->total -= walk->entrylen;
|
|
|
|
|
|
|
|
return hash_walk_next(walk);
|
|
|
|
}
|
|
|
|
|
|
|
|
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
|
|
|
|
{
|
|
|
|
unsigned int alignmask = walk->alignmask;
|
|
|
|
|
|
|
|
walk->data -= walk->offset;
|
|
|
|
|
2019-02-01 14:51:41 +07:00
|
|
|
if (walk->entrylen && (walk->offset & alignmask) && !err) {
|
|
|
|
unsigned int nbytes;
|
2008-07-07 21:19:53 +07:00
|
|
|
|
2019-02-01 14:51:41 +07:00
|
|
|
walk->offset = ALIGN(walk->offset, alignmask + 1);
|
|
|
|
nbytes = min(walk->entrylen,
|
|
|
|
(unsigned int)(PAGE_SIZE - walk->offset));
|
2018-03-26 07:53:25 +07:00
|
|
|
if (nbytes) {
|
2019-02-01 14:51:41 +07:00
|
|
|
walk->entrylen -= nbytes;
|
2018-03-26 07:53:25 +07:00
|
|
|
walk->data += walk->offset;
|
|
|
|
return nbytes;
|
|
|
|
}
|
2008-07-07 21:19:53 +07:00
|
|
|
}
|
|
|
|
|
2014-05-21 19:56:12 +07:00
|
|
|
if (walk->flags & CRYPTO_ALG_ASYNC)
|
|
|
|
kunmap(walk->pg);
|
|
|
|
else {
|
|
|
|
kunmap_atomic(walk->data);
|
|
|
|
/*
|
|
|
|
* The may sleep test only makes sense for sync users.
|
|
|
|
* Async users don't need to sleep here anyway.
|
|
|
|
*/
|
|
|
|
crypto_yield(walk->flags);
|
|
|
|
}
|
2008-07-07 21:19:53 +07:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2019-02-01 14:51:41 +07:00
|
|
|
if (walk->entrylen) {
|
2009-05-31 20:09:22 +07:00
|
|
|
walk->offset = 0;
|
|
|
|
walk->pg++;
|
2008-07-07 21:19:53 +07:00
|
|
|
return hash_walk_next(walk);
|
2009-05-31 20:09:22 +07:00
|
|
|
}
|
2008-07-07 21:19:53 +07:00
|
|
|
|
|
|
|
if (!walk->total)
|
|
|
|
return 0;
|
|
|
|
|
2015-01-20 15:06:16 +07:00
|
|
|
walk->sg = sg_next(walk->sg);
|
2008-07-07 21:19:53 +07:00
|
|
|
|
|
|
|
return hash_walk_new_entry(walk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
|
|
|
|
|
|
|
|
int crypto_hash_walk_first(struct ahash_request *req,
|
|
|
|
struct crypto_hash_walk *walk)
|
|
|
|
{
|
|
|
|
walk->total = req->nbytes;
|
|
|
|
|
2014-07-11 06:18:08 +07:00
|
|
|
if (!walk->total) {
|
|
|
|
walk->entrylen = 0;
|
2008-07-07 21:19:53 +07:00
|
|
|
return 0;
|
2014-07-11 06:18:08 +07:00
|
|
|
}
|
2008-07-07 21:19:53 +07:00
|
|
|
|
|
|
|
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
|
|
|
|
walk->sg = req->src;
|
2014-05-21 19:56:12 +07:00
|
|
|
walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
|
2008-07-07 21:19:53 +07:00
|
|
|
|
|
|
|
return hash_walk_new_entry(walk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
|
|
|
|
|
2014-05-21 19:56:12 +07:00
|
|
|
int crypto_ahash_walk_first(struct ahash_request *req,
|
|
|
|
struct crypto_hash_walk *walk)
|
|
|
|
{
|
|
|
|
walk->total = req->nbytes;
|
|
|
|
|
2014-07-11 06:18:08 +07:00
|
|
|
if (!walk->total) {
|
|
|
|
walk->entrylen = 0;
|
2014-05-21 19:56:12 +07:00
|
|
|
return 0;
|
2014-07-11 06:18:08 +07:00
|
|
|
}
|
2014-05-21 19:56:12 +07:00
|
|
|
|
|
|
|
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
|
|
|
|
walk->sg = req->src;
|
|
|
|
walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
|
|
|
|
walk->flags |= CRYPTO_ALG_ASYNC;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
|
|
|
|
|
|
|
|
return hash_walk_new_entry(walk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
|
|
|
|
|
2008-05-14 19:41:47 +07:00
|
|
|
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
unsigned long alignmask = crypto_ahash_alignmask(tfm);
|
|
|
|
int ret;
|
|
|
|
u8 *buffer, *alignbuffer;
|
|
|
|
unsigned long absize;
|
|
|
|
|
|
|
|
absize = keylen + alignmask;
|
2009-07-14 20:48:35 +07:00
|
|
|
buffer = kmalloc(absize, GFP_KERNEL);
|
2008-05-14 19:41:47 +07:00
|
|
|
if (!buffer)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
|
|
|
|
memcpy(alignbuffer, key, keylen);
|
2009-07-15 19:39:05 +07:00
|
|
|
ret = tfm->setkey(tfm, alignbuffer, keylen);
|
2009-07-14 20:35:36 +07:00
|
|
|
kzfree(buffer);
|
2008-05-14 19:41:47 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-07 09:47:42 +07:00
|
|
|
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahash_set_needkey(struct crypto_ahash *tfm)
|
|
|
|
{
|
|
|
|
const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
|
|
|
|
|
|
|
|
if (tfm->setkey != ahash_nosetkey &&
|
|
|
|
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
|
|
|
|
crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
|
|
}
|
|
|
|
|
2009-07-15 11:40:40 +07:00
|
|
|
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
|
2008-05-14 19:41:47 +07:00
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
unsigned long alignmask = crypto_ahash_alignmask(tfm);
|
crypto: hash - prevent using keyed hashes without setting key
Currently, almost none of the keyed hash algorithms check whether a key
has been set before proceeding. Some algorithms are okay with this and
will effectively just use a key of all 0's or some other bogus default.
However, others will severely break, as demonstrated using
"hmac(sha3-512-generic)", the unkeyed use of which causes a kernel crash
via a (potentially exploitable) stack buffer overflow.
A while ago, this problem was solved for AF_ALG by pairing each hash
transform with a 'has_key' bool. However, there are still other places
in the kernel where userspace can specify an arbitrary hash algorithm by
name, and the kernel uses it as unkeyed hash without checking whether it
is really unkeyed. Examples of this include:
- KEYCTL_DH_COMPUTE, via the KDF extension
- dm-verity
- dm-crypt, via the ESSIV support
- dm-integrity, via the "internal hash" mode with no key given
- drbd (Distributed Replicated Block Device)
This bug is especially bad for KEYCTL_DH_COMPUTE as that requires no
privileges to call.
Fix the bug for all users by adding a flag CRYPTO_TFM_NEED_KEY to the
->crt_flags of each hash transform that indicates whether the transform
still needs to be keyed or not. Then, make the hash init, import, and
digest functions return -ENOKEY if the key is still needed.
The new flag also replaces the 'has_key' bool which algif_hash was
previously using, thereby simplifying the algif_hash implementation.
Reported-by: syzbot <syzkaller@googlegroups.com>
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-01-04 02:16:27 +07:00
|
|
|
int err;
|
2008-05-14 19:41:47 +07:00
|
|
|
|
|
|
|
if ((unsigned long)key & alignmask)
|
crypto: hash - prevent using keyed hashes without setting key
Currently, almost none of the keyed hash algorithms check whether a key
has been set before proceeding. Some algorithms are okay with this and
will effectively just use a key of all 0's or some other bogus default.
However, others will severely break, as demonstrated using
"hmac(sha3-512-generic)", the unkeyed use of which causes a kernel crash
via a (potentially exploitable) stack buffer overflow.
A while ago, this problem was solved for AF_ALG by pairing each hash
transform with a 'has_key' bool. However, there are still other places
in the kernel where userspace can specify an arbitrary hash algorithm by
name, and the kernel uses it as unkeyed hash without checking whether it
is really unkeyed. Examples of this include:
- KEYCTL_DH_COMPUTE, via the KDF extension
- dm-verity
- dm-crypt, via the ESSIV support
- dm-integrity, via the "internal hash" mode with no key given
- drbd (Distributed Replicated Block Device)
This bug is especially bad for KEYCTL_DH_COMPUTE as that requires no
privileges to call.
Fix the bug for all users by adding a flag CRYPTO_TFM_NEED_KEY to the
->crt_flags of each hash transform that indicates whether the transform
still needs to be keyed or not. Then, make the hash init, import, and
digest functions return -ENOKEY if the key is still needed.
The new flag also replaces the 'has_key' bool which algif_hash was
previously using, thereby simplifying the algif_hash implementation.
Reported-by: syzbot <syzkaller@googlegroups.com>
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-01-04 02:16:27 +07:00
|
|
|
err = ahash_setkey_unaligned(tfm, key, keylen);
|
|
|
|
else
|
|
|
|
err = tfm->setkey(tfm, key, keylen);
|
|
|
|
|
2019-01-07 09:47:42 +07:00
|
|
|
if (unlikely(err)) {
|
|
|
|
ahash_set_needkey(tfm);
|
crypto: hash - prevent using keyed hashes without setting key
Currently, almost none of the keyed hash algorithms check whether a key
has been set before proceeding. Some algorithms are okay with this and
will effectively just use a key of all 0's or some other bogus default.
However, others will severely break, as demonstrated using
"hmac(sha3-512-generic)", the unkeyed use of which causes a kernel crash
via a (potentially exploitable) stack buffer overflow.
A while ago, this problem was solved for AF_ALG by pairing each hash
transform with a 'has_key' bool. However, there are still other places
in the kernel where userspace can specify an arbitrary hash algorithm by
name, and the kernel uses it as unkeyed hash without checking whether it
is really unkeyed. Examples of this include:
- KEYCTL_DH_COMPUTE, via the KDF extension
- dm-verity
- dm-crypt, via the ESSIV support
- dm-integrity, via the "internal hash" mode with no key given
- drbd (Distributed Replicated Block Device)
This bug is especially bad for KEYCTL_DH_COMPUTE as that requires no
privileges to call.
Fix the bug for all users by adding a flag CRYPTO_TFM_NEED_KEY to the
->crt_flags of each hash transform that indicates whether the transform
still needs to be keyed or not. Then, make the hash init, import, and
digest functions return -ENOKEY if the key is still needed.
The new flag also replaces the 'has_key' bool which algif_hash was
previously using, thereby simplifying the algif_hash implementation.
Reported-by: syzbot <syzkaller@googlegroups.com>
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-01-04 02:16:27 +07:00
|
|
|
return err;
|
2019-01-07 09:47:42 +07:00
|
|
|
}
|
2008-05-14 19:41:47 +07:00
|
|
|
|
crypto: hash - prevent using keyed hashes without setting key
Currently, almost none of the keyed hash algorithms check whether a key
has been set before proceeding. Some algorithms are okay with this and
will effectively just use a key of all 0's or some other bogus default.
However, others will severely break, as demonstrated using
"hmac(sha3-512-generic)", the unkeyed use of which causes a kernel crash
via a (potentially exploitable) stack buffer overflow.
A while ago, this problem was solved for AF_ALG by pairing each hash
transform with a 'has_key' bool. However, there are still other places
in the kernel where userspace can specify an arbitrary hash algorithm by
name, and the kernel uses it as unkeyed hash without checking whether it
is really unkeyed. Examples of this include:
- KEYCTL_DH_COMPUTE, via the KDF extension
- dm-verity
- dm-crypt, via the ESSIV support
- dm-integrity, via the "internal hash" mode with no key given
- drbd (Distributed Replicated Block Device)
This bug is especially bad for KEYCTL_DH_COMPUTE as that requires no
privileges to call.
Fix the bug for all users by adding a flag CRYPTO_TFM_NEED_KEY to the
->crt_flags of each hash transform that indicates whether the transform
still needs to be keyed or not. Then, make the hash init, import, and
digest functions return -ENOKEY if the key is still needed.
The new flag also replaces the 'has_key' bool which algif_hash was
previously using, thereby simplifying the algif_hash implementation.
Reported-by: syzbot <syzkaller@googlegroups.com>
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-01-04 02:16:27 +07:00
|
|
|
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
|
|
return 0;
|
2008-05-14 19:41:47 +07:00
|
|
|
}
|
2009-07-15 11:40:40 +07:00
|
|
|
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
|
2008-05-14 19:41:47 +07:00
|
|
|
|
2009-07-15 11:40:40 +07:00
|
|
|
static inline unsigned int ahash_align_buffer_size(unsigned len,
|
|
|
|
unsigned long mask)
|
|
|
|
{
|
|
|
|
return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
|
|
|
|
}
|
|
|
|
|
2014-03-14 08:37:05 +07:00
|
|
|
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
|
2009-07-15 11:40:40 +07:00
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
unsigned long alignmask = crypto_ahash_alignmask(tfm);
|
|
|
|
unsigned int ds = crypto_ahash_digestsize(tfm);
|
|
|
|
struct ahash_request_priv *priv;
|
|
|
|
|
|
|
|
priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
|
|
|
|
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
2009-07-24 12:56:31 +07:00
|
|
|
GFP_KERNEL : GFP_ATOMIC);
|
2009-07-15 11:40:40 +07:00
|
|
|
if (!priv)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-03-14 08:37:04 +07:00
|
|
|
/*
|
|
|
|
* WARNING: Voodoo programming below!
|
|
|
|
*
|
|
|
|
* The code below is obscure and hard to understand, thus explanation
|
|
|
|
* is necessary. See include/crypto/hash.h and include/linux/crypto.h
|
|
|
|
* to understand the layout of structures used here!
|
|
|
|
*
|
|
|
|
* The code here will replace portions of the ORIGINAL request with
|
|
|
|
* pointers to new code and buffers so the hashing operation can store
|
|
|
|
* the result in aligned buffer. We will call the modified request
|
|
|
|
* an ADJUSTED request.
|
|
|
|
*
|
|
|
|
* The newly mangled request will look as such:
|
|
|
|
*
|
|
|
|
* req {
|
|
|
|
* .result = ADJUSTED[new aligned buffer]
|
|
|
|
* .base.complete = ADJUSTED[pointer to completion function]
|
|
|
|
* .base.data = ADJUSTED[*req (pointer to self)]
|
|
|
|
* .priv = ADJUSTED[new priv] {
|
|
|
|
* .result = ORIGINAL(result)
|
|
|
|
* .complete = ORIGINAL(base.complete)
|
|
|
|
* .data = ORIGINAL(base.data)
|
|
|
|
* }
|
|
|
|
*/
|
|
|
|
|
2009-07-15 11:40:40 +07:00
|
|
|
priv->result = req->result;
|
|
|
|
priv->complete = req->base.complete;
|
|
|
|
priv->data = req->base.data;
|
2017-04-10 16:27:57 +07:00
|
|
|
priv->flags = req->base.flags;
|
|
|
|
|
2014-03-14 08:37:04 +07:00
|
|
|
/*
|
|
|
|
* WARNING: We do not backup req->priv here! The req->priv
|
|
|
|
* is for internal use of the Crypto API and the
|
|
|
|
* user must _NOT_ _EVER_ depend on it's content!
|
|
|
|
*/
|
2009-07-15 11:40:40 +07:00
|
|
|
|
|
|
|
req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
|
2014-03-14 08:37:05 +07:00
|
|
|
req->base.complete = cplt;
|
2009-07-15 11:40:40 +07:00
|
|
|
req->base.data = req;
|
|
|
|
req->priv = priv;
|
|
|
|
|
2014-03-14 08:37:05 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-10 16:27:57 +07:00
|
|
|
static void ahash_restore_req(struct ahash_request *req, int err)
|
2014-03-14 08:37:05 +07:00
|
|
|
{
|
|
|
|
struct ahash_request_priv *priv = req->priv;
|
|
|
|
|
2017-04-10 16:27:57 +07:00
|
|
|
if (!err)
|
|
|
|
memcpy(priv->result, req->result,
|
|
|
|
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
|
|
|
|
|
2014-03-14 08:37:05 +07:00
|
|
|
/* Restore the original crypto request. */
|
|
|
|
req->result = priv->result;
|
2017-04-10 16:27:57 +07:00
|
|
|
|
|
|
|
ahash_request_set_callback(req, priv->flags,
|
|
|
|
priv->complete, priv->data);
|
2014-03-14 08:37:05 +07:00
|
|
|
req->priv = NULL;
|
|
|
|
|
|
|
|
/* Free the req->priv.priv from the ADJUSTED request. */
|
|
|
|
kzfree(priv);
|
|
|
|
}
|
|
|
|
|
2017-04-10 16:27:57 +07:00
|
|
|
static void ahash_notify_einprogress(struct ahash_request *req)
|
2014-03-14 08:37:05 +07:00
|
|
|
{
|
|
|
|
struct ahash_request_priv *priv = req->priv;
|
2017-04-10 16:27:57 +07:00
|
|
|
struct crypto_async_request oreq;
|
2014-03-14 08:37:05 +07:00
|
|
|
|
2017-04-10 16:27:57 +07:00
|
|
|
oreq.data = priv->data;
|
2014-03-14 08:37:05 +07:00
|
|
|
|
2017-04-10 16:27:57 +07:00
|
|
|
priv->complete(&oreq, -EINPROGRESS);
|
2014-03-14 08:37:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
|
|
|
|
{
|
|
|
|
struct ahash_request *areq = req->data;
|
|
|
|
|
2017-04-10 16:27:57 +07:00
|
|
|
if (err == -EINPROGRESS) {
|
|
|
|
ahash_notify_einprogress(areq);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-03-14 08:37:05 +07:00
|
|
|
/*
|
|
|
|
* Restore the original request, see ahash_op_unaligned() for what
|
|
|
|
* goes where.
|
|
|
|
*
|
|
|
|
* The "struct ahash_request *req" here is in fact the "req.base"
|
|
|
|
* from the ADJUSTED request from ahash_op_unaligned(), thus as it
|
|
|
|
* is a pointer to self, it is also the ADJUSTED "req" .
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* First copy req->result into req->priv.result */
|
2017-04-10 16:27:57 +07:00
|
|
|
ahash_restore_req(areq, err);
|
2014-03-14 08:37:05 +07:00
|
|
|
|
|
|
|
/* Complete the ORIGINAL request. */
|
|
|
|
areq->base.complete(&areq->base, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_op_unaligned(struct ahash_request *req,
|
|
|
|
int (*op)(struct ahash_request *))
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = ahash_save_req(req, ahash_op_unaligned_done);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2009-07-15 11:40:40 +07:00
|
|
|
err = op(req);
|
2017-10-18 14:00:36 +07:00
|
|
|
if (err == -EINPROGRESS || err == -EBUSY)
|
2017-04-10 16:27:57 +07:00
|
|
|
return err;
|
|
|
|
|
|
|
|
ahash_restore_req(req, err);
|
2009-07-15 11:40:40 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int crypto_ahash_op(struct ahash_request *req,
|
|
|
|
int (*op)(struct ahash_request *))
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
unsigned long alignmask = crypto_ahash_alignmask(tfm);
|
|
|
|
|
|
|
|
if ((unsigned long)req->result & alignmask)
|
|
|
|
return ahash_op_unaligned(req, op);
|
|
|
|
|
|
|
|
return op(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
int crypto_ahash_final(struct ahash_request *req)
|
|
|
|
{
|
2018-11-29 21:42:21 +07:00
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct crypto_alg *alg = tfm->base.__crt_alg;
|
|
|
|
unsigned int nbytes = req->nbytes;
|
2018-09-19 17:10:54 +07:00
|
|
|
int ret;
|
|
|
|
|
2018-11-29 21:42:21 +07:00
|
|
|
crypto_stats_get(alg);
|
2018-09-19 17:10:54 +07:00
|
|
|
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
|
2018-11-29 21:42:21 +07:00
|
|
|
crypto_stats_ahash_final(nbytes, ret, alg);
|
2018-09-19 17:10:54 +07:00
|
|
|
return ret;
|
2009-07-15 11:40:40 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_ahash_final);
|
|
|
|
|
|
|
|
int crypto_ahash_finup(struct ahash_request *req)
|
|
|
|
{
|
2018-11-29 21:42:21 +07:00
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct crypto_alg *alg = tfm->base.__crt_alg;
|
|
|
|
unsigned int nbytes = req->nbytes;
|
2018-09-19 17:10:54 +07:00
|
|
|
int ret;
|
|
|
|
|
2018-11-29 21:42:21 +07:00
|
|
|
crypto_stats_get(alg);
|
2018-09-19 17:10:54 +07:00
|
|
|
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
|
2018-11-29 21:42:21 +07:00
|
|
|
crypto_stats_ahash_final(nbytes, ret, alg);
|
2018-09-19 17:10:54 +07:00
|
|
|
return ret;
|
2009-07-15 11:40:40 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
|
|
|
|
|
|
|
|
int crypto_ahash_digest(struct ahash_request *req)
|
|
|
|
{
|
crypto: hash - prevent using keyed hashes without setting key
Currently, almost none of the keyed hash algorithms check whether a key
has been set before proceeding. Some algorithms are okay with this and
will effectively just use a key of all 0's or some other bogus default.
However, others will severely break, as demonstrated using
"hmac(sha3-512-generic)", the unkeyed use of which causes a kernel crash
via a (potentially exploitable) stack buffer overflow.
A while ago, this problem was solved for AF_ALG by pairing each hash
transform with a 'has_key' bool. However, there are still other places
in the kernel where userspace can specify an arbitrary hash algorithm by
name, and the kernel uses it as unkeyed hash without checking whether it
is really unkeyed. Examples of this include:
- KEYCTL_DH_COMPUTE, via the KDF extension
- dm-verity
- dm-crypt, via the ESSIV support
- dm-integrity, via the "internal hash" mode with no key given
- drbd (Distributed Replicated Block Device)
This bug is especially bad for KEYCTL_DH_COMPUTE as that requires no
privileges to call.
Fix the bug for all users by adding a flag CRYPTO_TFM_NEED_KEY to the
->crt_flags of each hash transform that indicates whether the transform
still needs to be keyed or not. Then, make the hash init, import, and
digest functions return -ENOKEY if the key is still needed.
The new flag also replaces the 'has_key' bool which algif_hash was
previously using, thereby simplifying the algif_hash implementation.
Reported-by: syzbot <syzkaller@googlegroups.com>
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-01-04 02:16:27 +07:00
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
2018-11-29 21:42:21 +07:00
|
|
|
struct crypto_alg *alg = tfm->base.__crt_alg;
|
|
|
|
unsigned int nbytes = req->nbytes;
|
2018-09-19 17:10:54 +07:00
|
|
|
int ret;
|
crypto: hash - prevent using keyed hashes without setting key
Currently, almost none of the keyed hash algorithms check whether a key
has been set before proceeding. Some algorithms are okay with this and
will effectively just use a key of all 0's or some other bogus default.
However, others will severely break, as demonstrated using
"hmac(sha3-512-generic)", the unkeyed use of which causes a kernel crash
via a (potentially exploitable) stack buffer overflow.
A while ago, this problem was solved for AF_ALG by pairing each hash
transform with a 'has_key' bool. However, there are still other places
in the kernel where userspace can specify an arbitrary hash algorithm by
name, and the kernel uses it as unkeyed hash without checking whether it
is really unkeyed. Examples of this include:
- KEYCTL_DH_COMPUTE, via the KDF extension
- dm-verity
- dm-crypt, via the ESSIV support
- dm-integrity, via the "internal hash" mode with no key given
- drbd (Distributed Replicated Block Device)
This bug is especially bad for KEYCTL_DH_COMPUTE as that requires no
privileges to call.
Fix the bug for all users by adding a flag CRYPTO_TFM_NEED_KEY to the
->crt_flags of each hash transform that indicates whether the transform
still needs to be keyed or not. Then, make the hash init, import, and
digest functions return -ENOKEY if the key is still needed.
The new flag also replaces the 'has_key' bool which algif_hash was
previously using, thereby simplifying the algif_hash implementation.
Reported-by: syzbot <syzkaller@googlegroups.com>
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-01-04 02:16:27 +07:00
|
|
|
|
2018-11-29 21:42:21 +07:00
|
|
|
crypto_stats_get(alg);
|
crypto: hash - prevent using keyed hashes without setting key
Currently, almost none of the keyed hash algorithms check whether a key
has been set before proceeding. Some algorithms are okay with this and
will effectively just use a key of all 0's or some other bogus default.
However, others will severely break, as demonstrated using
"hmac(sha3-512-generic)", the unkeyed use of which causes a kernel crash
via a (potentially exploitable) stack buffer overflow.
A while ago, this problem was solved for AF_ALG by pairing each hash
transform with a 'has_key' bool. However, there are still other places
in the kernel where userspace can specify an arbitrary hash algorithm by
name, and the kernel uses it as unkeyed hash without checking whether it
is really unkeyed. Examples of this include:
- KEYCTL_DH_COMPUTE, via the KDF extension
- dm-verity
- dm-crypt, via the ESSIV support
- dm-integrity, via the "internal hash" mode with no key given
- drbd (Distributed Replicated Block Device)
This bug is especially bad for KEYCTL_DH_COMPUTE as that requires no
privileges to call.
Fix the bug for all users by adding a flag CRYPTO_TFM_NEED_KEY to the
->crt_flags of each hash transform that indicates whether the transform
still needs to be keyed or not. Then, make the hash init, import, and
digest functions return -ENOKEY if the key is still needed.
The new flag also replaces the 'has_key' bool which algif_hash was
previously using, thereby simplifying the algif_hash implementation.
Reported-by: syzbot <syzkaller@googlegroups.com>
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-01-04 02:16:27 +07:00
|
|
|
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
2018-09-19 17:10:54 +07:00
|
|
|
ret = -ENOKEY;
|
|
|
|
else
|
|
|
|
ret = crypto_ahash_op(req, tfm->digest);
|
2018-11-29 21:42:21 +07:00
|
|
|
crypto_stats_ahash_final(nbytes, ret, alg);
|
2018-09-19 17:10:54 +07:00
|
|
|
return ret;
|
2009-07-15 11:40:40 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
|
|
|
|
|
2017-04-10 16:27:57 +07:00
|
|
|
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
|
2009-07-15 11:40:40 +07:00
|
|
|
{
|
2017-04-10 16:27:57 +07:00
|
|
|
struct ahash_request *areq = req->data;
|
2009-07-15 11:40:40 +07:00
|
|
|
|
|
|
|
if (err == -EINPROGRESS)
|
|
|
|
return;
|
|
|
|
|
2017-04-10 16:27:57 +07:00
|
|
|
ahash_restore_req(areq, err);
|
2009-07-15 11:40:40 +07:00
|
|
|
|
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 08:37:06 +07:00
|
|
|
areq->base.complete(&areq->base, err);
|
2009-07-15 11:40:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
|
|
|
|
{
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
req->base.complete = ahash_def_finup_done2;
|
2017-04-10 16:27:57 +07:00
|
|
|
|
2009-07-15 11:40:40 +07:00
|
|
|
err = crypto_ahash_reqtfm(req)->final(req);
|
2017-10-18 14:00:36 +07:00
|
|
|
if (err == -EINPROGRESS || err == -EBUSY)
|
2017-04-10 16:27:57 +07:00
|
|
|
return err;
|
2009-07-15 11:40:40 +07:00
|
|
|
|
|
|
|
out:
|
2017-04-10 16:27:57 +07:00
|
|
|
ahash_restore_req(req, err);
|
2009-07-15 11:40:40 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
|
|
|
|
{
|
|
|
|
struct ahash_request *areq = req->data;
|
|
|
|
|
2017-04-10 16:27:57 +07:00
|
|
|
if (err == -EINPROGRESS) {
|
|
|
|
ahash_notify_einprogress(areq);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
|
2009-07-15 11:40:40 +07:00
|
|
|
err = ahash_def_finup_finish1(areq, err);
|
2017-04-10 16:27:57 +07:00
|
|
|
if (areq->priv)
|
|
|
|
return;
|
2009-07-15 11:40:40 +07:00
|
|
|
|
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 08:37:06 +07:00
|
|
|
areq->base.complete(&areq->base, err);
|
2009-07-15 11:40:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_def_finup(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 08:37:06 +07:00
|
|
|
int err;
|
2009-07-15 11:40:40 +07:00
|
|
|
|
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 08:37:06 +07:00
|
|
|
err = ahash_save_req(req, ahash_def_finup_done1);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2009-07-15 11:40:40 +07:00
|
|
|
|
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 08:37:06 +07:00
|
|
|
err = tfm->update(req);
|
2017-10-18 14:00:36 +07:00
|
|
|
if (err == -EINPROGRESS || err == -EBUSY)
|
2017-04-10 16:27:57 +07:00
|
|
|
return err;
|
|
|
|
|
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 08:37:06 +07:00
|
|
|
return ahash_def_finup_finish1(req, err);
|
2009-07-15 11:40:40 +07:00
|
|
|
}
|
|
|
|
|
2009-07-14 11:28:26 +07:00
|
|
|
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
|
|
|
|
struct ahash_alg *alg = crypto_ahash_alg(hash);
|
|
|
|
|
2009-07-15 11:40:40 +07:00
|
|
|
hash->setkey = ahash_nosetkey;
|
|
|
|
|
2009-07-14 11:28:26 +07:00
|
|
|
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
|
|
|
|
return crypto_init_shash_ops_async(tfm);
|
|
|
|
|
|
|
|
hash->init = alg->init;
|
|
|
|
hash->update = alg->update;
|
2009-07-15 11:40:40 +07:00
|
|
|
hash->final = alg->final;
|
|
|
|
hash->finup = alg->finup ?: ahash_def_finup;
|
2009-07-14 11:28:26 +07:00
|
|
|
hash->digest = alg->digest;
|
2018-01-19 01:34:04 +07:00
|
|
|
hash->export = alg->export;
|
|
|
|
hash->import = alg->import;
|
2009-07-15 11:40:40 +07:00
|
|
|
|
2016-01-08 20:28:26 +07:00
|
|
|
if (alg->setkey) {
|
2009-07-15 11:40:40 +07:00
|
|
|
hash->setkey = alg->setkey;
|
2019-01-07 09:47:42 +07:00
|
|
|
ahash_set_needkey(hash);
|
2016-01-08 20:28:26 +07:00
|
|
|
}
|
2009-07-14 11:28:26 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
|
|
|
|
{
|
2016-06-29 17:03:47 +07:00
|
|
|
if (alg->cra_type != &crypto_ahash_type)
|
|
|
|
return sizeof(struct crypto_shash *);
|
2009-07-14 11:28:26 +07:00
|
|
|
|
2016-06-29 17:03:47 +07:00
|
|
|
return crypto_alg_extsize(alg);
|
2009-07-14 11:28:26 +07:00
|
|
|
}
|
|
|
|
|
2011-11-03 19:46:07 +07:00
|
|
|
#ifdef CONFIG_NET
|
2011-09-27 12:41:07 +07:00
|
|
|
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
struct crypto_report_hash rhash;
|
|
|
|
|
2018-11-04 04:56:03 +07:00
|
|
|
memset(&rhash, 0, sizeof(rhash));
|
|
|
|
|
|
|
|
strscpy(rhash.type, "ahash", sizeof(rhash.type));
|
2011-09-27 12:41:07 +07:00
|
|
|
|
|
|
|
rhash.blocksize = alg->cra_blocksize;
|
|
|
|
rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
|
|
|
|
|
2018-11-04 04:56:03 +07:00
|
|
|
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
|
2011-09-27 12:41:07 +07:00
|
|
|
}
|
2011-11-03 19:46:07 +07:00
|
|
|
#else
|
|
|
|
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
#endif
|
2011-09-27 12:41:07 +07:00
|
|
|
|
2008-05-14 19:41:47 +07:00
|
|
|
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
|
2016-12-31 22:56:23 +07:00
|
|
|
__maybe_unused;
|
2008-05-14 19:41:47 +07:00
|
|
|
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
|
|
|
|
{
|
|
|
|
seq_printf(m, "type : ahash\n");
|
|
|
|
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
|
|
|
|
"yes" : "no");
|
|
|
|
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
2009-07-14 11:28:26 +07:00
|
|
|
seq_printf(m, "digestsize : %u\n",
|
|
|
|
__crypto_hash_alg_common(alg)->digestsize);
|
2008-05-14 19:41:47 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
const struct crypto_type crypto_ahash_type = {
|
2009-07-14 11:28:26 +07:00
|
|
|
.extsize = crypto_ahash_extsize,
|
|
|
|
.init_tfm = crypto_ahash_init_tfm,
|
2008-05-14 19:41:47 +07:00
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
.show = crypto_ahash_show,
|
|
|
|
#endif
|
2011-09-27 12:41:07 +07:00
|
|
|
.report = crypto_ahash_report,
|
2009-07-14 11:28:26 +07:00
|
|
|
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
|
|
|
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
|
|
|
|
.type = CRYPTO_ALG_TYPE_AHASH,
|
|
|
|
.tfmsize = offsetof(struct crypto_ahash, base),
|
2008-05-14 19:41:47 +07:00
|
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_ahash_type);
|
|
|
|
|
2009-07-14 11:28:26 +07:00
|
|
|
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
|
|
|
|
u32 mask)
|
|
|
|
{
|
|
|
|
return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
|
|
|
|
|
2016-01-23 12:52:40 +07:00
|
|
|
int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
|
|
|
|
{
|
|
|
|
return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_has_ahash);
|
|
|
|
|
2009-07-14 13:06:06 +07:00
|
|
|
static int ahash_prepare_alg(struct ahash_alg *alg)
|
|
|
|
{
|
|
|
|
struct crypto_alg *base = &alg->halg.base;
|
|
|
|
|
2018-08-08 04:18:38 +07:00
|
|
|
if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
|
|
|
|
alg->halg.statesize > HASH_MAX_STATESIZE ||
|
2015-10-10 02:43:33 +07:00
|
|
|
alg->halg.statesize == 0)
|
2009-07-14 13:06:06 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
base->cra_type = &crypto_ahash_type;
|
|
|
|
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
|
|
|
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int crypto_register_ahash(struct ahash_alg *alg)
|
|
|
|
{
|
|
|
|
struct crypto_alg *base = &alg->halg.base;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = ahash_prepare_alg(alg);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return crypto_register_alg(base);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_register_ahash);
|
|
|
|
|
|
|
|
int crypto_unregister_ahash(struct ahash_alg *alg)
|
|
|
|
{
|
|
|
|
return crypto_unregister_alg(&alg->halg.base);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
|
|
|
|
|
2017-08-10 19:53:52 +07:00
|
|
|
int crypto_register_ahashes(struct ahash_alg *algs, int count)
|
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
ret = crypto_register_ahash(&algs[i]);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
for (--i; i >= 0; --i)
|
|
|
|
crypto_unregister_ahash(&algs[i]);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_register_ahashes);
|
|
|
|
|
|
|
|
void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = count - 1; i >= 0; --i)
|
|
|
|
crypto_unregister_ahash(&algs[i]);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
|
|
|
|
|
2009-07-14 13:06:06 +07:00
|
|
|
int ahash_register_instance(struct crypto_template *tmpl,
|
|
|
|
struct ahash_instance *inst)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = ahash_prepare_alg(&inst->alg);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ahash_register_instance);
|
|
|
|
|
|
|
|
void ahash_free_instance(struct crypto_instance *inst)
|
|
|
|
{
|
|
|
|
crypto_drop_spawn(crypto_instance_ctx(inst));
|
|
|
|
kfree(ahash_instance(inst));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ahash_free_instance);
|
|
|
|
|
|
|
|
int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
|
|
|
|
struct hash_alg_common *alg,
|
|
|
|
struct crypto_instance *inst)
|
|
|
|
{
|
|
|
|
return crypto_init_spawn2(&spawn->base, &alg->base, inst,
|
|
|
|
&crypto_ahash_type);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
|
|
|
|
|
|
|
|
struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
|
|
|
|
{
|
|
|
|
struct crypto_alg *alg;
|
|
|
|
|
|
|
|
alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
|
|
|
|
return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ahash_attr_alg);
|
|
|
|
|
2018-01-04 02:16:22 +07:00
|
|
|
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
|
|
|
|
{
|
|
|
|
struct crypto_alg *alg = &halg->base;
|
|
|
|
|
|
|
|
if (alg->cra_type != &crypto_ahash_type)
|
|
|
|
return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
|
|
|
|
|
|
|
|
return __crypto_ahash_alg(alg)->setkey != NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
|
|
|
|
|
2008-05-14 19:41:47 +07:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
|