2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2003 Christophe Saout <christophe@saout.de>
|
|
|
|
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
|
2009-12-11 06:51:57 +07:00
|
|
|
* Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* This file is released under the GPL.
|
|
|
|
*/
|
|
|
|
|
2008-02-08 09:11:09 +07:00
|
|
|
#include <linux/completion.h>
|
2006-08-22 17:29:17 +07:00
|
|
|
#include <linux/err.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/mempool.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/crypto.h>
|
|
|
|
#include <linux/workqueue.h>
|
2006-10-20 13:28:16 +07:00
|
|
|
#include <linux/backing-dev.h>
|
2011-01-14 02:59:53 +07:00
|
|
|
#include <linux/percpu.h>
|
2011-07-27 06:09:06 +07:00
|
|
|
#include <linux/atomic.h>
|
2005-09-17 14:55:31 +07:00
|
|
|
#include <linux/scatterlist.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/page.h>
|
2006-09-03 05:56:39 +07:00
|
|
|
#include <asm/unaligned.h>
|
2011-01-14 02:59:55 +07:00
|
|
|
#include <crypto/hash.h>
|
|
|
|
#include <crypto/md5.h>
|
|
|
|
#include <crypto/algapi.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-10-21 23:44:59 +07:00
|
|
|
#include <linux/device-mapper.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-26 14:27:35 +07:00
|
|
|
#define DM_MSG_PREFIX "crypt"
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* context holding the current state of a multi-part conversion
|
|
|
|
*/
|
|
|
|
struct convert_context {
|
2008-02-08 09:11:09 +07:00
|
|
|
struct completion restart;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct bio *bio_in;
|
|
|
|
struct bio *bio_out;
|
|
|
|
unsigned int offset_in;
|
|
|
|
unsigned int offset_out;
|
|
|
|
unsigned int idx_in;
|
|
|
|
unsigned int idx_out;
|
2012-07-27 21:08:05 +07:00
|
|
|
sector_t cc_sector;
|
2012-07-27 21:08:04 +07:00
|
|
|
atomic_t cc_pending;
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2008-02-08 09:10:38 +07:00
|
|
|
/*
|
|
|
|
* per bio private data
|
|
|
|
*/
|
|
|
|
struct dm_crypt_io {
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc;
|
2008-02-08 09:10:38 +07:00
|
|
|
struct bio *base_bio;
|
|
|
|
struct work_struct work;
|
|
|
|
|
|
|
|
struct convert_context ctx;
|
|
|
|
|
2012-07-27 21:08:04 +07:00
|
|
|
atomic_t io_pending;
|
2008-02-08 09:10:38 +07:00
|
|
|
int error;
|
2008-02-08 09:10:54 +07:00
|
|
|
sector_t sector;
|
2008-10-21 23:45:02 +07:00
|
|
|
struct dm_crypt_io *base_io;
|
2008-02-08 09:10:38 +07:00
|
|
|
};
|
|
|
|
|
2008-02-08 09:11:04 +07:00
|
|
|
struct dm_crypt_request {
|
2009-03-17 00:44:33 +07:00
|
|
|
struct convert_context *ctx;
|
2008-02-08 09:11:04 +07:00
|
|
|
struct scatterlist sg_in;
|
|
|
|
struct scatterlist sg_out;
|
2011-01-14 02:59:54 +07:00
|
|
|
sector_t iv_sector;
|
2008-02-08 09:11:04 +07:00
|
|
|
};
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
struct crypt_config;
|
|
|
|
|
|
|
|
struct crypt_iv_operations {
|
|
|
|
int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
|
2007-10-20 04:42:37 +07:00
|
|
|
const char *opts);
|
2005-04-17 05:20:36 +07:00
|
|
|
void (*dtr)(struct crypt_config *cc);
|
2009-12-11 06:51:56 +07:00
|
|
|
int (*init)(struct crypt_config *cc);
|
2009-12-11 06:51:57 +07:00
|
|
|
int (*wipe)(struct crypt_config *cc);
|
2011-01-14 02:59:54 +07:00
|
|
|
int (*generator)(struct crypt_config *cc, u8 *iv,
|
|
|
|
struct dm_crypt_request *dmreq);
|
|
|
|
int (*post)(struct crypt_config *cc, u8 *iv,
|
|
|
|
struct dm_crypt_request *dmreq);
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2009-12-11 06:51:55 +07:00
|
|
|
struct iv_essiv_private {
|
2009-12-11 06:51:56 +07:00
|
|
|
struct crypto_hash *hash_tfm;
|
|
|
|
u8 *salt;
|
2009-12-11 06:51:55 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct iv_benbi_private {
|
|
|
|
int shift;
|
|
|
|
};
|
|
|
|
|
2011-01-14 02:59:55 +07:00
|
|
|
#define LMK_SEED_SIZE 64 /* hash + 0 */
|
|
|
|
struct iv_lmk_private {
|
|
|
|
struct crypto_shash *hash_tfm;
|
|
|
|
u8 *seed;
|
|
|
|
};
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Crypt: maps a linear range of a block device
|
|
|
|
* and encrypts / decrypts at the same time.
|
|
|
|
*/
|
2006-10-03 15:15:37 +07:00
|
|
|
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
|
2011-01-14 02:59:53 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Duplicated per-CPU state for cipher.
|
|
|
|
*/
|
|
|
|
struct crypt_cpu {
|
|
|
|
struct ablkcipher_request *req;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The fields in here must be read only after initialization,
|
|
|
|
* changing state should be in crypt_cpu.
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
struct crypt_config {
|
|
|
|
struct dm_dev *dev;
|
|
|
|
sector_t start;
|
|
|
|
|
|
|
|
/*
|
2008-02-08 09:11:07 +07:00
|
|
|
* pool for per bio private data, crypto requests and
|
|
|
|
* encryption requeusts/buffer pages
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
mempool_t *io_pool;
|
2008-02-08 09:11:07 +07:00
|
|
|
mempool_t *req_pool;
|
2005-04-17 05:20:36 +07:00
|
|
|
mempool_t *page_pool;
|
2006-10-03 15:15:40 +07:00
|
|
|
struct bio_set *bs;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-10-20 04:38:58 +07:00
|
|
|
struct workqueue_struct *io_queue;
|
|
|
|
struct workqueue_struct *crypt_queue;
|
2008-03-29 04:16:07 +07:00
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
char *cipher;
|
2011-01-14 02:59:52 +07:00
|
|
|
char *cipher_string;
|
2010-08-12 10:14:07 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
struct crypt_iv_operations *iv_gen_ops;
|
2006-12-06 04:41:52 +07:00
|
|
|
union {
|
2009-12-11 06:51:55 +07:00
|
|
|
struct iv_essiv_private essiv;
|
|
|
|
struct iv_benbi_private benbi;
|
2011-01-14 02:59:55 +07:00
|
|
|
struct iv_lmk_private lmk;
|
2006-12-06 04:41:52 +07:00
|
|
|
} iv_gen_private;
|
2005-04-17 05:20:36 +07:00
|
|
|
sector_t iv_offset;
|
|
|
|
unsigned int iv_size;
|
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
/*
|
|
|
|
* Duplicated per cpu state. Access through
|
|
|
|
* per_cpu_ptr() only.
|
|
|
|
*/
|
|
|
|
struct crypt_cpu __percpu *cpu;
|
2012-07-27 21:08:05 +07:00
|
|
|
|
|
|
|
/* ESSIV: struct crypto_cipher *essiv_tfm */
|
|
|
|
void *iv_private;
|
|
|
|
struct crypto_ablkcipher **tfms;
|
2011-01-14 02:59:54 +07:00
|
|
|
unsigned tfms_count;
|
2011-01-14 02:59:53 +07:00
|
|
|
|
2008-02-08 09:11:07 +07:00
|
|
|
/*
|
|
|
|
* Layout of each crypto request:
|
|
|
|
*
|
|
|
|
* struct ablkcipher_request
|
|
|
|
* context
|
|
|
|
* padding
|
|
|
|
* struct dm_crypt_request
|
|
|
|
* padding
|
|
|
|
* IV
|
|
|
|
*
|
|
|
|
* The padding is added so that dm_crypt_request and the IV are
|
|
|
|
* correctly aligned.
|
|
|
|
*/
|
|
|
|
unsigned int dmreq_start;
|
|
|
|
|
2006-10-03 15:15:37 +07:00
|
|
|
unsigned long flags;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned int key_size;
|
2011-01-14 02:59:54 +07:00
|
|
|
unsigned int key_parts;
|
2005-04-17 05:20:36 +07:00
|
|
|
u8 key[0];
|
|
|
|
};
|
|
|
|
|
2006-10-03 15:15:40 +07:00
|
|
|
#define MIN_IOS 16
|
2005-04-17 05:20:36 +07:00
|
|
|
#define MIN_POOL_PAGES 32
|
|
|
|
|
2006-12-07 11:33:20 +07:00
|
|
|
static struct kmem_cache *_crypt_io_pool;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-07-12 23:26:32 +07:00
|
|
|
static void clone_init(struct dm_crypt_io *, struct bio *);
|
2008-02-08 09:10:52 +07:00
|
|
|
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
|
2011-01-14 02:59:54 +07:00
|
|
|
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
|
2007-05-09 16:32:52 +07:00
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
|
|
|
|
{
|
|
|
|
return this_cpu_ptr(cc->cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use this to access cipher attributes that are the same for each CPU.
|
|
|
|
*/
|
|
|
|
static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
|
|
|
|
{
|
2012-07-27 21:08:05 +07:00
|
|
|
return cc->tfms[0];
|
2011-01-14 02:59:53 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Different IV generation algorithms:
|
|
|
|
*
|
2006-09-02 15:17:33 +07:00
|
|
|
* plain: the initial vector is the 32-bit little-endian version of the sector
|
2007-10-20 04:10:43 +07:00
|
|
|
* number, padded with zeros if necessary.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2009-12-11 06:52:25 +07:00
|
|
|
* plain64: the initial vector is the 64-bit little-endian version of the sector
|
|
|
|
* number, padded with zeros if necessary.
|
|
|
|
*
|
2006-09-02 15:17:33 +07:00
|
|
|
* essiv: "encrypted sector|salt initial vector", the sector number is
|
|
|
|
* encrypted with the bulk cipher using a salt as key. The salt
|
|
|
|
* should be derived from the bulk cipher's key via hashing.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2006-09-03 05:56:39 +07:00
|
|
|
* benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
|
|
|
|
* (needed for LRW-32-AES and possible other narrow block modes)
|
|
|
|
*
|
2007-05-09 16:32:55 +07:00
|
|
|
* null: the initial vector is always zero. Provides compatibility with
|
|
|
|
* obsolete loop_fish2 devices. Do not use for new devices.
|
|
|
|
*
|
2011-01-14 02:59:55 +07:00
|
|
|
* lmk: Compatible implementation of the block chaining mode used
|
|
|
|
* by the Loop-AES block device encryption system
|
|
|
|
* designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
|
|
|
|
* It operates on full 512 byte sectors and uses CBC
|
|
|
|
* with an IV derived from the sector number, the data and
|
|
|
|
* optionally extra IV seed.
|
|
|
|
* This means that after decryption the first block
|
|
|
|
* of sector must be tweaked according to decrypted data.
|
|
|
|
* Loop-AES can use three encryption schemes:
|
|
|
|
* version 1: is plain aes-cbc mode
|
|
|
|
* version 2: uses 64 multikey scheme with lmk IV generator
|
|
|
|
* version 3: the same as version 2 with additional IV seed
|
|
|
|
* (it uses 65 keys, last key is used as IV seed)
|
|
|
|
*
|
2005-04-17 05:20:36 +07:00
|
|
|
* plumb: unimplemented, see:
|
|
|
|
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
|
|
|
|
*/
|
|
|
|
|
2011-01-14 02:59:54 +07:00
|
|
|
static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
|
|
|
|
struct dm_crypt_request *dmreq)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
memset(iv, 0, cc->iv_size);
|
2011-08-02 18:32:01 +07:00
|
|
|
*(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-11 06:52:25 +07:00
|
|
|
static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
|
2011-01-14 02:59:54 +07:00
|
|
|
struct dm_crypt_request *dmreq)
|
2009-12-11 06:52:25 +07:00
|
|
|
{
|
|
|
|
memset(iv, 0, cc->iv_size);
|
2011-08-02 18:32:01 +07:00
|
|
|
*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
|
2009-12-11 06:52:25 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-11 06:51:56 +07:00
|
|
|
/* Initialise ESSIV - compute salt but no local memory allocations */
|
|
|
|
static int crypt_iv_essiv_init(struct crypt_config *cc)
|
|
|
|
{
|
|
|
|
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
|
|
|
|
struct hash_desc desc;
|
|
|
|
struct scatterlist sg;
|
2011-01-14 02:59:53 +07:00
|
|
|
struct crypto_cipher *essiv_tfm;
|
2012-07-27 21:08:05 +07:00
|
|
|
int err;
|
2009-12-11 06:51:56 +07:00
|
|
|
|
|
|
|
sg_init_one(&sg, cc->key, cc->key_size);
|
|
|
|
desc.tfm = essiv->hash_tfm;
|
|
|
|
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
|
|
|
|
err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
essiv_tfm = cc->iv_private;
|
2011-01-14 02:59:53 +07:00
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
|
|
|
|
crypto_hash_digestsize(essiv->hash_tfm));
|
|
|
|
if (err)
|
|
|
|
return err;
|
2011-01-14 02:59:53 +07:00
|
|
|
|
|
|
|
return 0;
|
2009-12-11 06:51:56 +07:00
|
|
|
}
|
|
|
|
|
2009-12-11 06:51:57 +07:00
|
|
|
/* Wipe salt and reset key derived from volume key */
|
|
|
|
static int crypt_iv_essiv_wipe(struct crypt_config *cc)
|
|
|
|
{
|
|
|
|
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
|
|
|
|
unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
|
2011-01-14 02:59:53 +07:00
|
|
|
struct crypto_cipher *essiv_tfm;
|
2012-07-27 21:08:05 +07:00
|
|
|
int r, err = 0;
|
2009-12-11 06:51:57 +07:00
|
|
|
|
|
|
|
memset(essiv->salt, 0, salt_size);
|
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
essiv_tfm = cc->iv_private;
|
|
|
|
r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
|
|
|
|
if (r)
|
|
|
|
err = r;
|
2011-01-14 02:59:53 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up per cpu cipher state */
|
|
|
|
static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
|
|
|
|
struct dm_target *ti,
|
|
|
|
u8 *salt, unsigned saltsize)
|
|
|
|
{
|
|
|
|
struct crypto_cipher *essiv_tfm;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Setup the essiv_tfm with the given salt */
|
|
|
|
essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
|
|
|
|
if (IS_ERR(essiv_tfm)) {
|
|
|
|
ti->error = "Error allocating crypto tfm for ESSIV";
|
|
|
|
return essiv_tfm;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (crypto_cipher_blocksize(essiv_tfm) !=
|
|
|
|
crypto_ablkcipher_ivsize(any_tfm(cc))) {
|
|
|
|
ti->error = "Block size of ESSIV cipher does "
|
|
|
|
"not match IV size of block cipher";
|
|
|
|
crypto_free_cipher(essiv_tfm);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
|
|
|
|
if (err) {
|
|
|
|
ti->error = "Failed to set key for ESSIV cipher";
|
|
|
|
crypto_free_cipher(essiv_tfm);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
return essiv_tfm;
|
2009-12-11 06:51:57 +07:00
|
|
|
}
|
|
|
|
|
2009-12-11 06:51:55 +07:00
|
|
|
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
|
|
|
|
{
|
2011-01-14 02:59:53 +07:00
|
|
|
struct crypto_cipher *essiv_tfm;
|
2009-12-11 06:51:55 +07:00
|
|
|
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
|
|
|
|
|
2009-12-11 06:51:56 +07:00
|
|
|
crypto_free_hash(essiv->hash_tfm);
|
|
|
|
essiv->hash_tfm = NULL;
|
|
|
|
|
|
|
|
kzfree(essiv->salt);
|
|
|
|
essiv->salt = NULL;
|
2011-01-14 02:59:53 +07:00
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
essiv_tfm = cc->iv_private;
|
2011-01-14 02:59:53 +07:00
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
if (essiv_tfm)
|
|
|
|
crypto_free_cipher(essiv_tfm);
|
2011-01-14 02:59:53 +07:00
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
cc->iv_private = NULL;
|
2009-12-11 06:51:55 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
|
2007-10-20 04:42:37 +07:00
|
|
|
const char *opts)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2009-12-11 06:51:56 +07:00
|
|
|
struct crypto_cipher *essiv_tfm = NULL;
|
|
|
|
struct crypto_hash *hash_tfm = NULL;
|
|
|
|
u8 *salt = NULL;
|
2012-07-27 21:08:05 +07:00
|
|
|
int err;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-12-11 06:51:56 +07:00
|
|
|
if (!opts) {
|
2006-06-26 14:27:35 +07:00
|
|
|
ti->error = "Digest algorithm missing for ESSIV mode";
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-12-11 06:51:56 +07:00
|
|
|
/* Allocate hash algorithm */
|
2006-08-24 16:10:20 +07:00
|
|
|
hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
|
|
|
|
if (IS_ERR(hash_tfm)) {
|
2006-06-26 14:27:35 +07:00
|
|
|
ti->error = "Error initializing ESSIV hash";
|
2009-12-11 06:51:56 +07:00
|
|
|
err = PTR_ERR(hash_tfm);
|
|
|
|
goto bad;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2009-12-11 06:51:56 +07:00
|
|
|
salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
|
2009-12-11 06:51:56 +07:00
|
|
|
if (!salt) {
|
2006-06-26 14:27:35 +07:00
|
|
|
ti->error = "Error kmallocing salt storage in ESSIV";
|
2009-12-11 06:51:56 +07:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto bad;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2009-12-11 06:51:56 +07:00
|
|
|
cc->iv_gen_private.essiv.salt = salt;
|
|
|
|
cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
|
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
essiv_tfm = setup_essiv_cpu(cc, ti, salt,
|
|
|
|
crypto_hash_digestsize(hash_tfm));
|
|
|
|
if (IS_ERR(essiv_tfm)) {
|
|
|
|
crypt_iv_essiv_dtr(cc);
|
|
|
|
return PTR_ERR(essiv_tfm);
|
2011-01-14 02:59:53 +07:00
|
|
|
}
|
2012-07-27 21:08:05 +07:00
|
|
|
cc->iv_private = essiv_tfm;
|
2011-01-14 02:59:53 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
2009-12-11 06:51:56 +07:00
|
|
|
|
|
|
|
bad:
|
|
|
|
if (hash_tfm && !IS_ERR(hash_tfm))
|
|
|
|
crypto_free_hash(hash_tfm);
|
2009-12-11 06:51:56 +07:00
|
|
|
kfree(salt);
|
2009-12-11 06:51:56 +07:00
|
|
|
return err;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:54 +07:00
|
|
|
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
|
|
|
|
struct dm_crypt_request *dmreq)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypto_cipher *essiv_tfm = cc->iv_private;
|
2011-01-14 02:59:53 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
memset(iv, 0, cc->iv_size);
|
2011-08-02 18:32:01 +07:00
|
|
|
*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
|
2011-01-14 02:59:53 +07:00
|
|
|
crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-09-03 05:56:39 +07:00
|
|
|
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
|
|
|
|
const char *opts)
|
|
|
|
{
|
2011-01-14 02:59:53 +07:00
|
|
|
unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
|
2006-12-08 17:37:49 +07:00
|
|
|
int log = ilog2(bs);
|
2006-09-03 05:56:39 +07:00
|
|
|
|
|
|
|
/* we need to calculate how far we must shift the sector count
|
|
|
|
* to get the cipher block count, we use this shift in _gen */
|
|
|
|
|
|
|
|
if (1 << log != bs) {
|
|
|
|
ti->error = "cypher blocksize is not a power of 2";
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (log > 9) {
|
|
|
|
ti->error = "cypher blocksize is > 512";
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-12-11 06:51:55 +07:00
|
|
|
cc->iv_gen_private.benbi.shift = 9 - log;
|
2006-09-03 05:56:39 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void crypt_iv_benbi_dtr(struct crypt_config *cc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:54 +07:00
|
|
|
static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
|
|
|
|
struct dm_crypt_request *dmreq)
|
2006-09-03 05:56:39 +07:00
|
|
|
{
|
2006-12-06 04:41:52 +07:00
|
|
|
__be64 val;
|
|
|
|
|
2006-09-03 05:56:39 +07:00
|
|
|
memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
|
2006-12-06 04:41:52 +07:00
|
|
|
|
2011-01-14 02:59:54 +07:00
|
|
|
val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
|
2006-12-06 04:41:52 +07:00
|
|
|
put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
|
2006-09-03 05:56:39 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:54 +07:00
|
|
|
static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
|
|
|
|
struct dm_crypt_request *dmreq)
|
2007-05-09 16:32:55 +07:00
|
|
|
{
|
|
|
|
memset(iv, 0, cc->iv_size);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:55 +07:00
|
|
|
static void crypt_iv_lmk_dtr(struct crypt_config *cc)
|
|
|
|
{
|
|
|
|
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
|
|
|
|
|
|
|
|
if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
|
|
|
|
crypto_free_shash(lmk->hash_tfm);
|
|
|
|
lmk->hash_tfm = NULL;
|
|
|
|
|
|
|
|
kzfree(lmk->seed);
|
|
|
|
lmk->seed = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
|
|
|
|
const char *opts)
|
|
|
|
{
|
|
|
|
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
|
|
|
|
|
|
|
|
lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
|
|
|
|
if (IS_ERR(lmk->hash_tfm)) {
|
|
|
|
ti->error = "Error initializing LMK hash";
|
|
|
|
return PTR_ERR(lmk->hash_tfm);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No seed in LMK version 2 */
|
|
|
|
if (cc->key_parts == cc->tfms_count) {
|
|
|
|
lmk->seed = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
|
|
|
|
if (!lmk->seed) {
|
|
|
|
crypt_iv_lmk_dtr(cc);
|
|
|
|
ti->error = "Error kmallocing seed storage in LMK";
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int crypt_iv_lmk_init(struct crypt_config *cc)
|
|
|
|
{
|
|
|
|
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
|
|
|
|
int subkey_size = cc->key_size / cc->key_parts;
|
|
|
|
|
|
|
|
/* LMK seed is on the position of LMK_KEYS + 1 key */
|
|
|
|
if (lmk->seed)
|
|
|
|
memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
|
|
|
|
crypto_shash_digestsize(lmk->hash_tfm));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int crypt_iv_lmk_wipe(struct crypt_config *cc)
|
|
|
|
{
|
|
|
|
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
|
|
|
|
|
|
|
|
if (lmk->seed)
|
|
|
|
memset(lmk->seed, 0, LMK_SEED_SIZE);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
|
|
|
|
struct dm_crypt_request *dmreq,
|
|
|
|
u8 *data)
|
|
|
|
{
|
|
|
|
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
|
|
|
|
struct {
|
|
|
|
struct shash_desc desc;
|
|
|
|
char ctx[crypto_shash_descsize(lmk->hash_tfm)];
|
|
|
|
} sdesc;
|
|
|
|
struct md5_state md5state;
|
|
|
|
u32 buf[4];
|
|
|
|
int i, r;
|
|
|
|
|
|
|
|
sdesc.desc.tfm = lmk->hash_tfm;
|
|
|
|
sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
|
|
|
|
r = crypto_shash_init(&sdesc.desc);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (lmk->seed) {
|
|
|
|
r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sector is always 512B, block size 16, add data of blocks 1-31 */
|
|
|
|
r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
/* Sector is cropped to 56 bits here */
|
|
|
|
buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
|
|
|
|
buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
|
|
|
|
buf[2] = cpu_to_le32(4024);
|
|
|
|
buf[3] = 0;
|
|
|
|
r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
/* No MD5 padding here */
|
|
|
|
r = crypto_shash_export(&sdesc.desc, &md5state);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
for (i = 0; i < MD5_HASH_WORDS; i++)
|
|
|
|
__cpu_to_le32s(&md5state.hash[i]);
|
|
|
|
memcpy(iv, &md5state.hash, cc->iv_size);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
|
|
|
|
struct dm_crypt_request *dmreq)
|
|
|
|
{
|
|
|
|
u8 *src;
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
|
2011-11-28 12:26:02 +07:00
|
|
|
src = kmap_atomic(sg_page(&dmreq->sg_in));
|
2011-01-14 02:59:55 +07:00
|
|
|
r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
|
2011-11-28 12:26:02 +07:00
|
|
|
kunmap_atomic(src);
|
2011-01-14 02:59:55 +07:00
|
|
|
} else
|
|
|
|
memset(iv, 0, cc->iv_size);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
|
|
|
|
struct dm_crypt_request *dmreq)
|
|
|
|
{
|
|
|
|
u8 *dst;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
|
|
|
|
return 0;
|
|
|
|
|
2011-11-28 12:26:02 +07:00
|
|
|
dst = kmap_atomic(sg_page(&dmreq->sg_out));
|
2011-01-14 02:59:55 +07:00
|
|
|
r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
|
|
|
|
|
|
|
|
/* Tweak the first block of plaintext sector */
|
|
|
|
if (!r)
|
|
|
|
crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
|
|
|
|
|
2011-11-28 12:26:02 +07:00
|
|
|
kunmap_atomic(dst);
|
2011-01-14 02:59:55 +07:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static struct crypt_iv_operations crypt_iv_plain_ops = {
|
|
|
|
.generator = crypt_iv_plain_gen
|
|
|
|
};
|
|
|
|
|
2009-12-11 06:52:25 +07:00
|
|
|
static struct crypt_iv_operations crypt_iv_plain64_ops = {
|
|
|
|
.generator = crypt_iv_plain64_gen
|
|
|
|
};
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static struct crypt_iv_operations crypt_iv_essiv_ops = {
|
|
|
|
.ctr = crypt_iv_essiv_ctr,
|
|
|
|
.dtr = crypt_iv_essiv_dtr,
|
2009-12-11 06:51:56 +07:00
|
|
|
.init = crypt_iv_essiv_init,
|
2009-12-11 06:51:57 +07:00
|
|
|
.wipe = crypt_iv_essiv_wipe,
|
2005-04-17 05:20:36 +07:00
|
|
|
.generator = crypt_iv_essiv_gen
|
|
|
|
};
|
|
|
|
|
2006-09-03 05:56:39 +07:00
|
|
|
static struct crypt_iv_operations crypt_iv_benbi_ops = {
|
|
|
|
.ctr = crypt_iv_benbi_ctr,
|
|
|
|
.dtr = crypt_iv_benbi_dtr,
|
|
|
|
.generator = crypt_iv_benbi_gen
|
|
|
|
};
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-05-09 16:32:55 +07:00
|
|
|
static struct crypt_iv_operations crypt_iv_null_ops = {
|
|
|
|
.generator = crypt_iv_null_gen
|
|
|
|
};
|
|
|
|
|
2011-01-14 02:59:55 +07:00
|
|
|
static struct crypt_iv_operations crypt_iv_lmk_ops = {
|
|
|
|
.ctr = crypt_iv_lmk_ctr,
|
|
|
|
.dtr = crypt_iv_lmk_dtr,
|
|
|
|
.init = crypt_iv_lmk_init,
|
|
|
|
.wipe = crypt_iv_lmk_wipe,
|
|
|
|
.generator = crypt_iv_lmk_gen,
|
|
|
|
.post = crypt_iv_lmk_post
|
|
|
|
};
|
|
|
|
|
2007-10-20 04:42:37 +07:00
|
|
|
static void crypt_convert_init(struct crypt_config *cc,
|
|
|
|
struct convert_context *ctx,
|
|
|
|
struct bio *bio_out, struct bio *bio_in,
|
2008-02-08 09:10:41 +07:00
|
|
|
sector_t sector)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
ctx->bio_in = bio_in;
|
|
|
|
ctx->bio_out = bio_out;
|
|
|
|
ctx->offset_in = 0;
|
|
|
|
ctx->offset_out = 0;
|
|
|
|
ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
|
|
|
|
ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
|
2012-07-27 21:08:05 +07:00
|
|
|
ctx->cc_sector = sector + cc->iv_offset;
|
2008-02-08 09:11:09 +07:00
|
|
|
init_completion(&ctx->restart);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2009-03-17 00:44:33 +07:00
|
|
|
static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
|
|
|
|
struct ablkcipher_request *req)
|
|
|
|
{
|
|
|
|
return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
|
|
|
|
struct dm_crypt_request *dmreq)
|
|
|
|
{
|
|
|
|
return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
|
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:54 +07:00
|
|
|
static u8 *iv_of_dmreq(struct crypt_config *cc,
|
|
|
|
struct dm_crypt_request *dmreq)
|
|
|
|
{
|
|
|
|
return (u8 *)ALIGN((unsigned long)(dmreq + 1),
|
|
|
|
crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
|
|
|
|
}
|
|
|
|
|
2008-02-08 09:11:04 +07:00
|
|
|
static int crypt_convert_block(struct crypt_config *cc,
|
2008-02-08 09:11:14 +07:00
|
|
|
struct convert_context *ctx,
|
|
|
|
struct ablkcipher_request *req)
|
2008-02-08 09:11:04 +07:00
|
|
|
{
|
|
|
|
struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
|
|
|
|
struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
|
2008-02-08 09:11:14 +07:00
|
|
|
struct dm_crypt_request *dmreq;
|
|
|
|
u8 *iv;
|
2012-07-27 21:08:04 +07:00
|
|
|
int r;
|
2008-02-08 09:11:14 +07:00
|
|
|
|
2009-03-17 00:44:33 +07:00
|
|
|
dmreq = dmreq_of_req(cc, req);
|
2011-01-14 02:59:54 +07:00
|
|
|
iv = iv_of_dmreq(cc, dmreq);
|
2008-02-08 09:11:04 +07:00
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
dmreq->iv_sector = ctx->cc_sector;
|
2009-03-17 00:44:33 +07:00
|
|
|
dmreq->ctx = ctx;
|
2008-02-08 09:11:14 +07:00
|
|
|
sg_init_table(&dmreq->sg_in, 1);
|
|
|
|
sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
|
2008-02-08 09:11:04 +07:00
|
|
|
bv_in->bv_offset + ctx->offset_in);
|
|
|
|
|
2008-02-08 09:11:14 +07:00
|
|
|
sg_init_table(&dmreq->sg_out, 1);
|
|
|
|
sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
|
2008-02-08 09:11:04 +07:00
|
|
|
bv_out->bv_offset + ctx->offset_out);
|
|
|
|
|
|
|
|
ctx->offset_in += 1 << SECTOR_SHIFT;
|
|
|
|
if (ctx->offset_in >= bv_in->bv_len) {
|
|
|
|
ctx->offset_in = 0;
|
|
|
|
ctx->idx_in++;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->offset_out += 1 << SECTOR_SHIFT;
|
|
|
|
if (ctx->offset_out >= bv_out->bv_len) {
|
|
|
|
ctx->offset_out = 0;
|
|
|
|
ctx->idx_out++;
|
|
|
|
}
|
|
|
|
|
2008-02-08 09:11:14 +07:00
|
|
|
if (cc->iv_gen_ops) {
|
2011-01-14 02:59:54 +07:00
|
|
|
r = cc->iv_gen_ops->generator(cc, iv, dmreq);
|
2008-02-08 09:11:14 +07:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
|
|
|
|
1 << SECTOR_SHIFT, iv);
|
|
|
|
|
|
|
|
if (bio_data_dir(ctx->bio_in) == WRITE)
|
|
|
|
r = crypto_ablkcipher_encrypt(req);
|
|
|
|
else
|
|
|
|
r = crypto_ablkcipher_decrypt(req);
|
|
|
|
|
2011-01-14 02:59:54 +07:00
|
|
|
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
|
|
|
|
r = cc->iv_gen_ops->post(cc, iv, dmreq);
|
|
|
|
|
2008-02-08 09:11:14 +07:00
|
|
|
return r;
|
2008-02-08 09:11:04 +07:00
|
|
|
}
|
|
|
|
|
2008-02-08 09:11:12 +07:00
|
|
|
static void kcryptd_async_done(struct crypto_async_request *async_req,
|
|
|
|
int error);
|
2011-01-14 02:59:53 +07:00
|
|
|
|
2008-02-08 09:11:07 +07:00
|
|
|
static void crypt_alloc_req(struct crypt_config *cc,
|
|
|
|
struct convert_context *ctx)
|
|
|
|
{
|
2011-01-14 02:59:53 +07:00
|
|
|
struct crypt_cpu *this_cc = this_crypt_config(cc);
|
2012-07-27 21:08:05 +07:00
|
|
|
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
|
2011-01-14 02:59:53 +07:00
|
|
|
|
|
|
|
if (!this_cc->req)
|
|
|
|
this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
|
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
|
2011-01-14 02:59:53 +07:00
|
|
|
ablkcipher_request_set_callback(this_cc->req,
|
|
|
|
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
|
|
kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
|
2008-02-08 09:11:07 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Encrypt / decrypt data from one bio to another one (can be the same one)
|
|
|
|
*/
|
|
|
|
static int crypt_convert(struct crypt_config *cc,
|
2007-10-20 04:42:37 +07:00
|
|
|
struct convert_context *ctx)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2011-01-14 02:59:53 +07:00
|
|
|
struct crypt_cpu *this_cc = this_crypt_config(cc);
|
2008-03-29 04:16:07 +07:00
|
|
|
int r;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2012-07-27 21:08:04 +07:00
|
|
|
atomic_set(&ctx->cc_pending, 1);
|
2008-10-10 19:37:08 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
|
|
|
|
ctx->idx_out < ctx->bio_out->bi_vcnt) {
|
|
|
|
|
2008-02-08 09:11:14 +07:00
|
|
|
crypt_alloc_req(cc, ctx);
|
|
|
|
|
2012-07-27 21:08:04 +07:00
|
|
|
atomic_inc(&ctx->cc_pending);
|
2008-03-29 04:16:07 +07:00
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
r = crypt_convert_block(cc, ctx, this_cc->req);
|
2008-02-08 09:11:14 +07:00
|
|
|
|
|
|
|
switch (r) {
|
2008-03-29 04:16:07 +07:00
|
|
|
/* async */
|
2008-02-08 09:11:14 +07:00
|
|
|
case -EBUSY:
|
|
|
|
wait_for_completion(&ctx->restart);
|
|
|
|
INIT_COMPLETION(ctx->restart);
|
|
|
|
/* fall through*/
|
|
|
|
case -EINPROGRESS:
|
2011-01-14 02:59:53 +07:00
|
|
|
this_cc->req = NULL;
|
2012-07-27 21:08:05 +07:00
|
|
|
ctx->cc_sector++;
|
2008-03-29 04:16:07 +07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* sync */
|
2008-02-08 09:11:14 +07:00
|
|
|
case 0:
|
2012-07-27 21:08:04 +07:00
|
|
|
atomic_dec(&ctx->cc_pending);
|
2012-07-27 21:08:05 +07:00
|
|
|
ctx->cc_sector++;
|
2008-07-02 15:34:28 +07:00
|
|
|
cond_resched();
|
2008-02-08 09:11:14 +07:00
|
|
|
continue;
|
|
|
|
|
2008-03-29 04:16:07 +07:00
|
|
|
/* error */
|
|
|
|
default:
|
2012-07-27 21:08:04 +07:00
|
|
|
atomic_dec(&ctx->cc_pending);
|
2008-03-29 04:16:07 +07:00
|
|
|
return r;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-03-29 04:16:07 +07:00
|
|
|
return 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generate a new unfragmented bio with the given size
|
|
|
|
* This should never violate the device limitations
|
2008-10-10 19:37:08 +07:00
|
|
|
* May return a smaller bio when running out of pages, indicated by
|
|
|
|
* *out_of_pages set to 1.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2008-10-10 19:37:08 +07:00
|
|
|
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
|
|
|
|
unsigned *out_of_pages)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2006-10-03 15:15:37 +07:00
|
|
|
struct bio *clone;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
2005-10-21 14:22:34 +07:00
|
|
|
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
|
2007-12-13 21:16:10 +07:00
|
|
|
unsigned i, len;
|
|
|
|
struct page *page;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-05-09 16:32:53 +07:00
|
|
|
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
|
2006-10-03 15:15:37 +07:00
|
|
|
if (!clone)
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
|
2007-05-09 16:32:52 +07:00
|
|
|
clone_init(io, clone);
|
2008-10-10 19:37:08 +07:00
|
|
|
*out_of_pages = 0;
|
2006-10-03 15:15:40 +07:00
|
|
|
|
2007-05-09 16:32:54 +07:00
|
|
|
for (i = 0; i < nr_iovecs; i++) {
|
2007-12-13 21:16:10 +07:00
|
|
|
page = mempool_alloc(cc->page_pool, gfp_mask);
|
2008-10-10 19:37:08 +07:00
|
|
|
if (!page) {
|
|
|
|
*out_of_pages = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
2008-10-10 19:37:08 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
2012-03-29 00:41:22 +07:00
|
|
|
* If additional pages cannot be allocated without waiting,
|
|
|
|
* return a partially-allocated bio. The caller will then try
|
|
|
|
* to allocate more bios while submitting this partial bio.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2012-03-29 00:41:22 +07:00
|
|
|
gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-12-13 21:16:10 +07:00
|
|
|
len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
|
|
|
|
|
|
|
|
if (!bio_add_page(clone, page, len, 0)) {
|
|
|
|
mempool_free(page, cc->page_pool);
|
|
|
|
break;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-12-13 21:16:10 +07:00
|
|
|
size -= len;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-10-03 15:15:37 +07:00
|
|
|
if (!clone->bi_size) {
|
|
|
|
bio_put(clone);
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2006-10-03 15:15:37 +07:00
|
|
|
return clone;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2007-10-16 18:48:46 +07:00
|
|
|
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2007-10-16 18:48:46 +07:00
|
|
|
unsigned int i;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct bio_vec *bv;
|
|
|
|
|
2007-10-16 18:48:46 +07:00
|
|
|
for (i = 0; i < clone->bi_vcnt; i++) {
|
2006-10-03 15:15:37 +07:00
|
|
|
bv = bio_iovec_idx(clone, i);
|
2005-04-17 05:20:36 +07:00
|
|
|
BUG_ON(!bv->bv_page);
|
|
|
|
mempool_free(bv->bv_page, cc->page_pool);
|
|
|
|
bv->bv_page = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
|
2008-10-10 19:37:03 +07:00
|
|
|
struct bio *bio, sector_t sector)
|
|
|
|
{
|
|
|
|
struct dm_crypt_io *io;
|
|
|
|
|
|
|
|
io = mempool_alloc(cc->io_pool, GFP_NOIO);
|
2012-07-27 21:08:05 +07:00
|
|
|
io->cc = cc;
|
2008-10-10 19:37:03 +07:00
|
|
|
io->base_bio = bio;
|
|
|
|
io->sector = sector;
|
|
|
|
io->error = 0;
|
2008-10-21 23:45:02 +07:00
|
|
|
io->base_io = NULL;
|
2012-07-27 21:08:04 +07:00
|
|
|
atomic_set(&io->io_pending, 0);
|
2008-10-10 19:37:03 +07:00
|
|
|
|
|
|
|
return io;
|
|
|
|
}
|
|
|
|
|
2008-10-10 19:37:02 +07:00
|
|
|
static void crypt_inc_pending(struct dm_crypt_io *io)
|
|
|
|
{
|
2012-07-27 21:08:04 +07:00
|
|
|
atomic_inc(&io->io_pending);
|
2008-10-10 19:37:02 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* One of the bios was finished. Check for completion of
|
|
|
|
* the whole request and correctly clean up the buffer.
|
2008-10-21 23:45:02 +07:00
|
|
|
* If base_io is set, wait for the last fragment to complete.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2008-02-08 09:10:43 +07:00
|
|
|
static void crypt_dec_pending(struct dm_crypt_io *io)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2009-03-17 00:44:36 +07:00
|
|
|
struct bio *base_bio = io->base_bio;
|
|
|
|
struct dm_crypt_io *base_io = io->base_io;
|
|
|
|
int error = io->error;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2012-07-27 21:08:04 +07:00
|
|
|
if (!atomic_dec_and_test(&io->io_pending))
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
|
|
|
|
2009-03-17 00:44:36 +07:00
|
|
|
mempool_free(io, cc->io_pool);
|
|
|
|
|
|
|
|
if (likely(!base_io))
|
|
|
|
bio_endio(base_bio, error);
|
2008-10-21 23:45:02 +07:00
|
|
|
else {
|
2009-03-17 00:44:36 +07:00
|
|
|
if (error && !base_io->error)
|
|
|
|
base_io->error = error;
|
|
|
|
crypt_dec_pending(base_io);
|
2008-10-21 23:45:02 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-10-20 04:38:58 +07:00
|
|
|
* kcryptd/kcryptd_io:
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* Needed because it would be very unwise to do decryption in an
|
2006-10-03 15:15:39 +07:00
|
|
|
* interrupt context.
|
2007-10-20 04:38:58 +07:00
|
|
|
*
|
|
|
|
* kcryptd performs the actual encryption or decryption.
|
|
|
|
*
|
|
|
|
* kcryptd_io performs the IO submission.
|
|
|
|
*
|
|
|
|
* They must be separated as otherwise the final stages could be
|
|
|
|
* starved by new requests which can block in the first stages due
|
|
|
|
* to memory allocation.
|
2011-01-14 02:59:53 +07:00
|
|
|
*
|
|
|
|
* The work is done per CPU global for all dm-crypt instances.
|
|
|
|
* They should not depend on each other and do not block.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2007-09-27 17:47:43 +07:00
|
|
|
static void crypt_endio(struct bio *clone, int error)
|
2006-10-03 15:15:37 +07:00
|
|
|
{
|
2007-07-12 23:26:32 +07:00
|
|
|
struct dm_crypt_io *io = clone->bi_private;
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2008-02-08 09:10:46 +07:00
|
|
|
unsigned rw = bio_data_dir(clone);
|
2006-10-03 15:15:37 +07:00
|
|
|
|
2007-12-13 21:15:51 +07:00
|
|
|
if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
|
|
|
|
error = -EIO;
|
|
|
|
|
2006-10-03 15:15:37 +07:00
|
|
|
/*
|
2007-09-27 17:47:43 +07:00
|
|
|
* free the processed pages
|
2006-10-03 15:15:37 +07:00
|
|
|
*/
|
2008-02-08 09:10:46 +07:00
|
|
|
if (rw == WRITE)
|
2007-10-16 18:48:46 +07:00
|
|
|
crypt_free_buffer_pages(cc, clone);
|
2006-10-03 15:15:37 +07:00
|
|
|
|
|
|
|
bio_put(clone);
|
|
|
|
|
2008-02-08 09:10:46 +07:00
|
|
|
if (rw == READ && !error) {
|
|
|
|
kcryptd_queue_crypt(io);
|
|
|
|
return;
|
|
|
|
}
|
2008-02-08 09:10:43 +07:00
|
|
|
|
|
|
|
if (unlikely(error))
|
|
|
|
io->error = error;
|
|
|
|
|
|
|
|
crypt_dec_pending(io);
|
2006-10-03 15:15:37 +07:00
|
|
|
}
|
|
|
|
|
2007-07-12 23:26:32 +07:00
|
|
|
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
|
2006-10-03 15:15:37 +07:00
|
|
|
{
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2006-10-03 15:15:37 +07:00
|
|
|
|
|
|
|
clone->bi_private = io;
|
|
|
|
clone->bi_end_io = crypt_endio;
|
|
|
|
clone->bi_bdev = cc->dev->bdev;
|
|
|
|
clone->bi_rw = io->base_bio->bi_rw;
|
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
2006-10-03 15:15:37 +07:00
|
|
|
{
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2006-10-03 15:15:37 +07:00
|
|
|
struct bio *base_bio = io->base_bio;
|
|
|
|
struct bio *clone;
|
2006-10-03 15:15:38 +07:00
|
|
|
|
2006-10-03 15:15:37 +07:00
|
|
|
/*
|
|
|
|
* The block layer might modify the bvec array, so always
|
|
|
|
* copy the required bvecs because we need the original
|
|
|
|
* one in order to decrypt the whole bio data *afterwards*.
|
|
|
|
*/
|
2012-09-07 05:35:02 +07:00
|
|
|
clone = bio_clone_bioset(base_bio, gfp, cc->bs);
|
2011-03-10 14:52:07 +07:00
|
|
|
if (!clone)
|
2011-01-14 02:59:53 +07:00
|
|
|
return 1;
|
2006-10-03 15:15:37 +07:00
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
crypt_inc_pending(io);
|
|
|
|
|
2006-10-03 15:15:37 +07:00
|
|
|
clone_init(io, clone);
|
2008-02-08 09:10:54 +07:00
|
|
|
clone->bi_sector = cc->start + io->sector;
|
2006-10-03 15:15:37 +07:00
|
|
|
|
2006-10-03 15:15:38 +07:00
|
|
|
generic_make_request(clone);
|
2011-01-14 02:59:53 +07:00
|
|
|
return 0;
|
2006-10-03 15:15:37 +07:00
|
|
|
}
|
|
|
|
|
2008-02-08 09:10:49 +07:00
|
|
|
static void kcryptd_io_write(struct dm_crypt_io *io)
|
|
|
|
{
|
2008-02-08 09:11:12 +07:00
|
|
|
struct bio *clone = io->ctx.bio_out;
|
|
|
|
generic_make_request(clone);
|
2008-02-08 09:10:49 +07:00
|
|
|
}
|
|
|
|
|
2008-02-08 09:10:52 +07:00
|
|
|
static void kcryptd_io(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
|
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
if (bio_data_dir(io->base_bio) == READ) {
|
|
|
|
crypt_inc_pending(io);
|
|
|
|
if (kcryptd_io_read(io, GFP_NOIO))
|
|
|
|
io->error = -ENOMEM;
|
|
|
|
crypt_dec_pending(io);
|
|
|
|
} else
|
2008-02-08 09:10:52 +07:00
|
|
|
kcryptd_io_write(io);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kcryptd_queue_io(struct dm_crypt_io *io)
|
|
|
|
{
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2008-02-08 09:10:52 +07:00
|
|
|
|
|
|
|
INIT_WORK(&io->work, kcryptd_io);
|
|
|
|
queue_work(cc->io_queue, &io->work);
|
|
|
|
}
|
|
|
|
|
2012-03-29 00:41:22 +07:00
|
|
|
static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
|
2008-02-08 09:10:49 +07:00
|
|
|
{
|
2008-02-08 09:10:57 +07:00
|
|
|
struct bio *clone = io->ctx.bio_out;
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2008-02-08 09:10:57 +07:00
|
|
|
|
2012-03-29 00:41:22 +07:00
|
|
|
if (unlikely(io->error < 0)) {
|
2008-02-08 09:10:57 +07:00
|
|
|
crypt_free_buffer_pages(cc, clone);
|
|
|
|
bio_put(clone);
|
2008-10-10 19:37:06 +07:00
|
|
|
crypt_dec_pending(io);
|
2008-02-08 09:10:57 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* crypt_convert should have filled the clone bio */
|
|
|
|
BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
|
|
|
|
|
|
|
|
clone->bi_sector = cc->start + io->sector;
|
2008-02-08 09:11:02 +07:00
|
|
|
|
2008-02-08 09:11:12 +07:00
|
|
|
if (async)
|
|
|
|
kcryptd_queue_io(io);
|
2008-10-10 19:37:05 +07:00
|
|
|
else
|
2008-02-08 09:11:12 +07:00
|
|
|
generic_make_request(clone);
|
2008-02-08 09:10:49 +07:00
|
|
|
}
|
|
|
|
|
2008-10-10 19:37:04 +07:00
|
|
|
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
2006-10-03 15:15:37 +07:00
|
|
|
{
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2006-10-03 15:15:37 +07:00
|
|
|
struct bio *clone;
|
2008-10-21 23:45:02 +07:00
|
|
|
struct dm_crypt_io *new_io;
|
2008-10-10 19:37:08 +07:00
|
|
|
int crypt_finished;
|
2008-10-10 19:37:08 +07:00
|
|
|
unsigned out_of_pages = 0;
|
2008-02-08 09:10:57 +07:00
|
|
|
unsigned remaining = io->base_bio->bi_size;
|
2008-10-21 23:45:00 +07:00
|
|
|
sector_t sector = io->sector;
|
2008-02-08 09:10:57 +07:00
|
|
|
int r;
|
2006-10-03 15:15:37 +07:00
|
|
|
|
2008-10-10 19:37:04 +07:00
|
|
|
/*
|
|
|
|
* Prevent io from disappearing until this function completes.
|
|
|
|
*/
|
|
|
|
crypt_inc_pending(io);
|
2008-10-21 23:45:00 +07:00
|
|
|
crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
|
2008-10-10 19:37:04 +07:00
|
|
|
|
2006-10-03 15:15:38 +07:00
|
|
|
/*
|
|
|
|
* The allocated buffers can be smaller than the whole bio,
|
|
|
|
* so repeat the whole process until all the data can be handled.
|
|
|
|
*/
|
|
|
|
while (remaining) {
|
2008-10-10 19:37:08 +07:00
|
|
|
clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
|
2006-10-03 15:15:39 +07:00
|
|
|
if (unlikely(!clone)) {
|
2008-02-08 09:10:43 +07:00
|
|
|
io->error = -ENOMEM;
|
2008-10-10 19:37:04 +07:00
|
|
|
break;
|
2006-10-03 15:15:39 +07:00
|
|
|
}
|
2006-10-03 15:15:38 +07:00
|
|
|
|
2008-02-08 09:10:38 +07:00
|
|
|
io->ctx.bio_out = clone;
|
|
|
|
io->ctx.idx_out = 0;
|
2006-10-03 15:15:38 +07:00
|
|
|
|
2008-02-08 09:10:57 +07:00
|
|
|
remaining -= clone->bi_size;
|
2008-10-21 23:45:00 +07:00
|
|
|
sector += bio_sectors(clone);
|
2006-10-03 15:15:38 +07:00
|
|
|
|
2008-10-10 19:37:07 +07:00
|
|
|
crypt_inc_pending(io);
|
2012-03-29 00:41:22 +07:00
|
|
|
|
2008-02-08 09:10:57 +07:00
|
|
|
r = crypt_convert(cc, &io->ctx);
|
2012-03-29 00:41:22 +07:00
|
|
|
if (r < 0)
|
|
|
|
io->error = -EIO;
|
|
|
|
|
2012-07-27 21:08:04 +07:00
|
|
|
crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
|
2007-05-09 16:32:54 +07:00
|
|
|
|
2008-10-10 19:37:08 +07:00
|
|
|
/* Encryption was already finished, submit io now */
|
|
|
|
if (crypt_finished) {
|
2012-03-29 00:41:22 +07:00
|
|
|
kcryptd_crypt_write_io_submit(io, 0);
|
2008-10-10 19:37:08 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there was an error, do not try next fragments.
|
|
|
|
* For async, error is processed in async handler.
|
|
|
|
*/
|
2008-10-10 19:37:06 +07:00
|
|
|
if (unlikely(r < 0))
|
2008-10-10 19:37:04 +07:00
|
|
|
break;
|
2008-10-21 23:45:00 +07:00
|
|
|
|
|
|
|
io->sector = sector;
|
2008-10-10 19:37:07 +07:00
|
|
|
}
|
2006-10-03 15:15:38 +07:00
|
|
|
|
2008-10-10 19:37:08 +07:00
|
|
|
/*
|
|
|
|
* Out of memory -> run queues
|
|
|
|
* But don't wait if split was due to the io size restriction
|
|
|
|
*/
|
|
|
|
if (unlikely(out_of_pages))
|
2009-07-09 19:52:32 +07:00
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/100);
|
2008-10-10 19:37:08 +07:00
|
|
|
|
2008-10-21 23:45:02 +07:00
|
|
|
/*
|
|
|
|
* With async crypto it is unsafe to share the crypto context
|
|
|
|
* between fragments, so switch to a new dm_crypt_io structure.
|
|
|
|
*/
|
|
|
|
if (unlikely(!crypt_finished && remaining)) {
|
2012-07-27 21:08:05 +07:00
|
|
|
new_io = crypt_io_alloc(io->cc, io->base_bio,
|
2008-10-21 23:45:02 +07:00
|
|
|
sector);
|
|
|
|
crypt_inc_pending(new_io);
|
|
|
|
crypt_convert_init(cc, &new_io->ctx, NULL,
|
|
|
|
io->base_bio, sector);
|
|
|
|
new_io->ctx.idx_in = io->ctx.idx_in;
|
|
|
|
new_io->ctx.offset_in = io->ctx.offset_in;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fragments after the first use the base_io
|
|
|
|
* pending count.
|
|
|
|
*/
|
|
|
|
if (!io->base_io)
|
|
|
|
new_io->base_io = io;
|
|
|
|
else {
|
|
|
|
new_io->base_io = io->base_io;
|
|
|
|
crypt_inc_pending(io->base_io);
|
|
|
|
crypt_dec_pending(io);
|
|
|
|
}
|
|
|
|
|
|
|
|
io = new_io;
|
|
|
|
}
|
2006-10-03 15:15:38 +07:00
|
|
|
}
|
2008-02-08 09:11:02 +07:00
|
|
|
|
|
|
|
crypt_dec_pending(io);
|
2008-02-08 09:10:59 +07:00
|
|
|
}
|
|
|
|
|
2012-03-29 00:41:22 +07:00
|
|
|
static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
|
2008-02-08 09:10:43 +07:00
|
|
|
{
|
|
|
|
crypt_dec_pending(io);
|
|
|
|
}
|
|
|
|
|
2008-02-08 09:10:49 +07:00
|
|
|
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
|
2006-10-03 15:15:37 +07:00
|
|
|
{
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2008-02-08 09:10:43 +07:00
|
|
|
int r = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-10-10 19:37:02 +07:00
|
|
|
crypt_inc_pending(io);
|
2008-02-08 09:11:14 +07:00
|
|
|
|
2008-02-08 09:10:38 +07:00
|
|
|
crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
|
2008-02-08 09:10:54 +07:00
|
|
|
io->sector);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-02-08 09:10:43 +07:00
|
|
|
r = crypt_convert(cc, &io->ctx);
|
2012-03-29 00:41:22 +07:00
|
|
|
if (r < 0)
|
|
|
|
io->error = -EIO;
|
2008-02-08 09:10:43 +07:00
|
|
|
|
2012-07-27 21:08:04 +07:00
|
|
|
if (atomic_dec_and_test(&io->ctx.cc_pending))
|
2012-03-29 00:41:22 +07:00
|
|
|
kcryptd_crypt_read_done(io);
|
2008-02-08 09:11:14 +07:00
|
|
|
|
|
|
|
crypt_dec_pending(io);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-02-08 09:11:12 +07:00
|
|
|
static void kcryptd_async_done(struct crypto_async_request *async_req,
|
|
|
|
int error)
|
|
|
|
{
|
2009-03-17 00:44:33 +07:00
|
|
|
struct dm_crypt_request *dmreq = async_req->data;
|
|
|
|
struct convert_context *ctx = dmreq->ctx;
|
2008-02-08 09:11:12 +07:00
|
|
|
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2008-02-08 09:11:12 +07:00
|
|
|
|
|
|
|
if (error == -EINPROGRESS) {
|
|
|
|
complete(&ctx->restart);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:54 +07:00
|
|
|
if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
|
|
|
|
error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
|
|
|
|
|
2012-03-29 00:41:22 +07:00
|
|
|
if (error < 0)
|
|
|
|
io->error = -EIO;
|
|
|
|
|
2009-03-17 00:44:33 +07:00
|
|
|
mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
|
2008-02-08 09:11:12 +07:00
|
|
|
|
2012-07-27 21:08:04 +07:00
|
|
|
if (!atomic_dec_and_test(&ctx->cc_pending))
|
2008-02-08 09:11:12 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (bio_data_dir(io->base_bio) == READ)
|
2012-03-29 00:41:22 +07:00
|
|
|
kcryptd_crypt_read_done(io);
|
2008-02-08 09:11:12 +07:00
|
|
|
else
|
2012-03-29 00:41:22 +07:00
|
|
|
kcryptd_crypt_write_io_submit(io, 1);
|
2008-02-08 09:11:12 +07:00
|
|
|
}
|
|
|
|
|
2008-02-08 09:10:52 +07:00
|
|
|
static void kcryptd_crypt(struct work_struct *work)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2007-07-12 23:26:32 +07:00
|
|
|
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
|
2006-10-03 15:15:37 +07:00
|
|
|
|
2007-10-20 04:38:58 +07:00
|
|
|
if (bio_data_dir(io->base_bio) == READ)
|
2008-02-08 09:10:52 +07:00
|
|
|
kcryptd_crypt_read_convert(io);
|
2008-02-08 09:10:49 +07:00
|
|
|
else
|
2008-02-08 09:10:52 +07:00
|
|
|
kcryptd_crypt_write_convert(io);
|
2007-10-20 04:38:58 +07:00
|
|
|
}
|
|
|
|
|
2008-02-08 09:10:52 +07:00
|
|
|
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
|
2007-10-20 04:38:58 +07:00
|
|
|
{
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = io->cc;
|
2007-10-20 04:38:58 +07:00
|
|
|
|
2008-02-08 09:10:52 +07:00
|
|
|
INIT_WORK(&io->work, kcryptd_crypt);
|
|
|
|
queue_work(cc->crypt_queue, &io->work);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Decode key from its hex representation
|
|
|
|
*/
|
|
|
|
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
|
|
|
|
{
|
|
|
|
char buffer[3];
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
buffer[2] = '\0';
|
|
|
|
|
2006-10-03 15:15:37 +07:00
|
|
|
for (i = 0; i < size; i++) {
|
2005-04-17 05:20:36 +07:00
|
|
|
buffer[0] = *hex++;
|
|
|
|
buffer[1] = *hex++;
|
|
|
|
|
2012-07-27 21:07:59 +07:00
|
|
|
if (kstrtou8(buffer, 16, &key[i]))
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*hex != '\0')
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Encode key into its hex representation
|
|
|
|
*/
|
|
|
|
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2006-10-03 15:15:37 +07:00
|
|
|
for (i = 0; i < size; i++) {
|
2005-04-17 05:20:36 +07:00
|
|
|
sprintf(hex, "%02x", *key);
|
|
|
|
hex += 2;
|
|
|
|
key++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
static void crypt_free_tfms(struct crypt_config *cc)
|
2011-01-14 02:59:54 +07:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
if (!cc->tfms)
|
|
|
|
return;
|
|
|
|
|
2011-01-14 02:59:54 +07:00
|
|
|
for (i = 0; i < cc->tfms_count; i++)
|
2012-07-27 21:08:05 +07:00
|
|
|
if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
|
|
|
|
crypto_free_ablkcipher(cc->tfms[i]);
|
|
|
|
cc->tfms[i] = NULL;
|
2011-01-14 02:59:54 +07:00
|
|
|
}
|
2012-07-27 21:08:05 +07:00
|
|
|
|
|
|
|
kfree(cc->tfms);
|
|
|
|
cc->tfms = NULL;
|
2011-01-14 02:59:54 +07:00
|
|
|
}
|
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
|
2011-01-14 02:59:54 +07:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
int err;
|
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!cc->tfms)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2011-01-14 02:59:54 +07:00
|
|
|
for (i = 0; i < cc->tfms_count; i++) {
|
2012-07-27 21:08:05 +07:00
|
|
|
cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
|
|
|
|
if (IS_ERR(cc->tfms[i])) {
|
|
|
|
err = PTR_ERR(cc->tfms[i]);
|
|
|
|
crypt_free_tfms(cc);
|
2011-01-14 02:59:54 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
static int crypt_setkey_allcpus(struct crypt_config *cc)
|
|
|
|
{
|
2011-01-14 02:59:54 +07:00
|
|
|
unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
|
2012-07-27 21:08:05 +07:00
|
|
|
int err = 0, i, r;
|
|
|
|
|
|
|
|
for (i = 0; i < cc->tfms_count; i++) {
|
|
|
|
r = crypto_ablkcipher_setkey(cc->tfms[i],
|
|
|
|
cc->key + (i * subkey_size),
|
|
|
|
subkey_size);
|
|
|
|
if (r)
|
|
|
|
err = r;
|
2011-01-14 02:59:53 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-10-03 15:15:37 +07:00
|
|
|
static int crypt_set_key(struct crypt_config *cc, char *key)
|
|
|
|
{
|
2011-03-24 20:54:27 +07:00
|
|
|
int r = -EINVAL;
|
|
|
|
int key_string_len = strlen(key);
|
|
|
|
|
2011-01-14 02:59:49 +07:00
|
|
|
/* The key size may not be changed. */
|
2011-03-24 20:54:27 +07:00
|
|
|
if (cc->key_size != (key_string_len >> 1))
|
|
|
|
goto out;
|
2006-10-03 15:15:37 +07:00
|
|
|
|
2011-01-14 02:59:49 +07:00
|
|
|
/* Hyphen (which gives a key_size of zero) means there is no key. */
|
|
|
|
if (!cc->key_size && strcmp(key, "-"))
|
2011-03-24 20:54:27 +07:00
|
|
|
goto out;
|
2006-10-03 15:15:37 +07:00
|
|
|
|
2011-01-14 02:59:49 +07:00
|
|
|
if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
|
2011-03-24 20:54:27 +07:00
|
|
|
goto out;
|
2006-10-03 15:15:37 +07:00
|
|
|
|
|
|
|
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
|
|
|
|
|
2011-03-24 20:54:27 +07:00
|
|
|
r = crypt_setkey_allcpus(cc);
|
|
|
|
|
|
|
|
out:
|
|
|
|
/* Hex key string not needed after here, so wipe it. */
|
|
|
|
memset(key, '0', key_string_len);
|
|
|
|
|
|
|
|
return r;
|
2006-10-03 15:15:37 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int crypt_wipe_key(struct crypt_config *cc)
|
|
|
|
{
|
|
|
|
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
|
|
|
|
memset(&cc->key, 0, cc->key_size * sizeof(u8));
|
2011-01-14 02:59:53 +07:00
|
|
|
|
|
|
|
return crypt_setkey_allcpus(cc);
|
2006-10-03 15:15:37 +07:00
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:06 +07:00
|
|
|
static void crypt_dtr(struct dm_target *ti)
|
|
|
|
{
|
|
|
|
struct crypt_config *cc = ti->private;
|
2011-01-14 02:59:53 +07:00
|
|
|
struct crypt_cpu *cpu_cc;
|
|
|
|
int cpu;
|
2010-08-12 10:14:06 +07:00
|
|
|
|
|
|
|
ti->private = NULL;
|
|
|
|
|
|
|
|
if (!cc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (cc->io_queue)
|
|
|
|
destroy_workqueue(cc->io_queue);
|
|
|
|
if (cc->crypt_queue)
|
|
|
|
destroy_workqueue(cc->crypt_queue);
|
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
if (cc->cpu)
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
cpu_cc = per_cpu_ptr(cc->cpu, cpu);
|
|
|
|
if (cpu_cc->req)
|
|
|
|
mempool_free(cpu_cc->req, cc->req_pool);
|
|
|
|
}
|
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
crypt_free_tfms(cc);
|
|
|
|
|
2010-08-12 10:14:06 +07:00
|
|
|
if (cc->bs)
|
|
|
|
bioset_free(cc->bs);
|
|
|
|
|
|
|
|
if (cc->page_pool)
|
|
|
|
mempool_destroy(cc->page_pool);
|
|
|
|
if (cc->req_pool)
|
|
|
|
mempool_destroy(cc->req_pool);
|
|
|
|
if (cc->io_pool)
|
|
|
|
mempool_destroy(cc->io_pool);
|
|
|
|
|
|
|
|
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
|
|
|
|
cc->iv_gen_ops->dtr(cc);
|
|
|
|
|
|
|
|
if (cc->dev)
|
|
|
|
dm_put_device(ti, cc->dev);
|
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
if (cc->cpu)
|
|
|
|
free_percpu(cc->cpu);
|
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
kzfree(cc->cipher);
|
2011-01-14 02:59:52 +07:00
|
|
|
kzfree(cc->cipher_string);
|
2010-08-12 10:14:06 +07:00
|
|
|
|
|
|
|
/* Must zero key material before freeing */
|
|
|
|
kzfree(cc);
|
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
static int crypt_ctr_cipher(struct dm_target *ti,
|
|
|
|
char *cipher_in, char *key)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2010-08-12 10:14:07 +07:00
|
|
|
struct crypt_config *cc = ti->private;
|
2011-01-14 02:59:54 +07:00
|
|
|
char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
|
2010-08-12 10:14:07 +07:00
|
|
|
char *cipher_api = NULL;
|
2012-07-27 21:08:05 +07:00
|
|
|
int ret = -EINVAL;
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-29 00:41:26 +07:00
|
|
|
char dummy;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
/* Convert to crypto api definition? */
|
|
|
|
if (strchr(cipher_in, '(')) {
|
|
|
|
ti->error = "Bad cipher specification";
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:52 +07:00
|
|
|
cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
|
|
|
|
if (!cc->cipher_string)
|
|
|
|
goto bad_mem;
|
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
/*
|
|
|
|
* Legacy dm-crypt cipher specification
|
2011-01-14 02:59:54 +07:00
|
|
|
* cipher[:keycount]-mode-iv:ivopts
|
2010-08-12 10:14:07 +07:00
|
|
|
*/
|
|
|
|
tmp = cipher_in;
|
2011-01-14 02:59:54 +07:00
|
|
|
keycount = strsep(&tmp, "-");
|
|
|
|
cipher = strsep(&keycount, ":");
|
|
|
|
|
|
|
|
if (!keycount)
|
|
|
|
cc->tfms_count = 1;
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-29 00:41:26 +07:00
|
|
|
else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
|
2011-01-14 02:59:54 +07:00
|
|
|
!is_power_of_2(cc->tfms_count)) {
|
|
|
|
ti->error = "Bad cipher key count specification";
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
cc->key_parts = cc->tfms_count;
|
2010-08-12 10:14:07 +07:00
|
|
|
|
|
|
|
cc->cipher = kstrdup(cipher, GFP_KERNEL);
|
|
|
|
if (!cc->cipher)
|
|
|
|
goto bad_mem;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
chainmode = strsep(&tmp, "-");
|
|
|
|
ivopts = strsep(&tmp, "-");
|
|
|
|
ivmode = strsep(&ivopts, ":");
|
|
|
|
|
|
|
|
if (tmp)
|
2010-08-12 10:14:07 +07:00
|
|
|
DMWARN("Ignoring unexpected additional cipher options");
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
|
2011-01-14 02:59:54 +07:00
|
|
|
__alignof__(struct crypt_cpu));
|
2011-01-14 02:59:53 +07:00
|
|
|
if (!cc->cpu) {
|
|
|
|
ti->error = "Cannot allocate per cpu state";
|
|
|
|
goto bad_mem;
|
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:52 +07:00
|
|
|
/*
|
|
|
|
* For compatibility with the original dm-crypt mapping format, if
|
|
|
|
* only the cipher name is supplied, use cbc-plain.
|
|
|
|
*/
|
2010-08-12 10:14:07 +07:00
|
|
|
if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
chainmode = "cbc";
|
|
|
|
ivmode = "plain";
|
|
|
|
}
|
|
|
|
|
2006-08-22 17:29:17 +07:00
|
|
|
if (strcmp(chainmode, "ecb") && !ivmode) {
|
2010-08-12 10:14:07 +07:00
|
|
|
ti->error = "IV mechanism required";
|
|
|
|
return -EINVAL;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
|
|
|
|
if (!cipher_api)
|
|
|
|
goto bad_mem;
|
|
|
|
|
|
|
|
ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
|
|
|
|
"%s(%s)", chainmode, cipher);
|
|
|
|
if (ret < 0) {
|
|
|
|
kfree(cipher_api);
|
|
|
|
goto bad_mem;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
/* Allocate cipher */
|
2012-07-27 21:08:05 +07:00
|
|
|
ret = crypt_alloc_tfms(cc, cipher_api);
|
|
|
|
if (ret < 0) {
|
|
|
|
ti->error = "Error allocating crypto tfm";
|
|
|
|
goto bad;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
/* Initialize and set key */
|
|
|
|
ret = crypt_set_key(cc, key);
|
2010-08-12 10:14:06 +07:00
|
|
|
if (ret < 0) {
|
2009-12-11 06:51:55 +07:00
|
|
|
ti->error = "Error decoding and setting key";
|
2010-08-12 10:14:06 +07:00
|
|
|
goto bad;
|
2009-12-11 06:51:55 +07:00
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
/* Initialize IV */
|
2011-01-14 02:59:53 +07:00
|
|
|
cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
|
2010-08-12 10:14:07 +07:00
|
|
|
if (cc->iv_size)
|
|
|
|
/* at least a 64 bit sector number should fit in our buffer */
|
|
|
|
cc->iv_size = max(cc->iv_size,
|
|
|
|
(unsigned int)(sizeof(u64) / sizeof(u8)));
|
|
|
|
else if (ivmode) {
|
|
|
|
DMWARN("Selected cipher does not support IVs");
|
|
|
|
ivmode = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Choose ivmode, see comments at iv code. */
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ivmode == NULL)
|
|
|
|
cc->iv_gen_ops = NULL;
|
|
|
|
else if (strcmp(ivmode, "plain") == 0)
|
|
|
|
cc->iv_gen_ops = &crypt_iv_plain_ops;
|
2009-12-11 06:52:25 +07:00
|
|
|
else if (strcmp(ivmode, "plain64") == 0)
|
|
|
|
cc->iv_gen_ops = &crypt_iv_plain64_ops;
|
2005-04-17 05:20:36 +07:00
|
|
|
else if (strcmp(ivmode, "essiv") == 0)
|
|
|
|
cc->iv_gen_ops = &crypt_iv_essiv_ops;
|
2006-09-03 05:56:39 +07:00
|
|
|
else if (strcmp(ivmode, "benbi") == 0)
|
|
|
|
cc->iv_gen_ops = &crypt_iv_benbi_ops;
|
2007-05-09 16:32:55 +07:00
|
|
|
else if (strcmp(ivmode, "null") == 0)
|
|
|
|
cc->iv_gen_ops = &crypt_iv_null_ops;
|
2011-01-14 02:59:55 +07:00
|
|
|
else if (strcmp(ivmode, "lmk") == 0) {
|
|
|
|
cc->iv_gen_ops = &crypt_iv_lmk_ops;
|
|
|
|
/* Version 2 and 3 is recognised according
|
|
|
|
* to length of provided multi-key string.
|
|
|
|
* If present (version 3), last key is used as IV seed.
|
|
|
|
*/
|
|
|
|
if (cc->key_size % cc->key_parts)
|
|
|
|
cc->key_parts++;
|
|
|
|
} else {
|
2010-08-12 10:14:07 +07:00
|
|
|
ret = -EINVAL;
|
2006-06-26 14:27:35 +07:00
|
|
|
ti->error = "Invalid IV mode";
|
2010-08-12 10:14:06 +07:00
|
|
|
goto bad;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:06 +07:00
|
|
|
/* Allocate IV */
|
|
|
|
if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
|
|
|
|
ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
|
|
|
|
if (ret < 0) {
|
|
|
|
ti->error = "Error creating IV";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-08-12 10:14:06 +07:00
|
|
|
/* Initialize IV (set keys for ESSIV etc) */
|
|
|
|
if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
|
|
|
|
ret = cc->iv_gen_ops->init(cc);
|
|
|
|
if (ret < 0) {
|
|
|
|
ti->error = "Error initialising IV";
|
|
|
|
goto bad;
|
|
|
|
}
|
2009-12-11 06:51:56 +07:00
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
ret = 0;
|
|
|
|
bad:
|
|
|
|
kfree(cipher_api);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
bad_mem:
|
|
|
|
ti->error = "Cannot allocate cipher strings";
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Construct an encryption mapping:
|
|
|
|
* <cipher> <key> <iv_offset> <dev_path> <start>
|
|
|
|
*/
|
|
|
|
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
|
|
{
|
|
|
|
struct crypt_config *cc;
|
2011-08-02 18:32:08 +07:00
|
|
|
unsigned int key_size, opt_params;
|
2010-08-12 10:14:07 +07:00
|
|
|
unsigned long long tmpll;
|
|
|
|
int ret;
|
2011-08-02 18:32:08 +07:00
|
|
|
struct dm_arg_set as;
|
|
|
|
const char *opt_string;
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-29 00:41:26 +07:00
|
|
|
char dummy;
|
2011-08-02 18:32:08 +07:00
|
|
|
|
|
|
|
static struct dm_arg _args[] = {
|
|
|
|
{0, 1, "Invalid number of feature args"},
|
|
|
|
};
|
2010-08-12 10:14:07 +07:00
|
|
|
|
2011-08-02 18:32:08 +07:00
|
|
|
if (argc < 5) {
|
2010-08-12 10:14:07 +07:00
|
|
|
ti->error = "Not enough arguments";
|
|
|
|
return -EINVAL;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:07 +07:00
|
|
|
key_size = strlen(argv[1]) >> 1;
|
|
|
|
|
|
|
|
cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
|
|
|
|
if (!cc) {
|
|
|
|
ti->error = "Cannot allocate encryption context";
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2011-01-14 02:59:49 +07:00
|
|
|
cc->key_size = key_size;
|
2010-08-12 10:14:07 +07:00
|
|
|
|
|
|
|
ti->private = cc;
|
|
|
|
ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
|
|
|
|
if (ret < 0)
|
|
|
|
goto bad;
|
|
|
|
|
2010-08-12 10:14:06 +07:00
|
|
|
ret = -ENOMEM;
|
2006-03-26 16:37:50 +07:00
|
|
|
cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!cc->io_pool) {
|
2006-06-26 14:27:35 +07:00
|
|
|
ti->error = "Cannot allocate crypt io mempool";
|
2010-08-12 10:14:06 +07:00
|
|
|
goto bad;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-02-08 09:11:07 +07:00
|
|
|
cc->dmreq_start = sizeof(struct ablkcipher_request);
|
2011-01-14 02:59:53 +07:00
|
|
|
cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
|
2008-02-08 09:11:07 +07:00
|
|
|
cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
|
2011-01-14 02:59:53 +07:00
|
|
|
cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
|
2008-02-08 09:11:14 +07:00
|
|
|
~(crypto_tfm_ctx_alignment() - 1);
|
2008-02-08 09:11:07 +07:00
|
|
|
|
|
|
|
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
|
|
|
|
sizeof(struct dm_crypt_request) + cc->iv_size);
|
|
|
|
if (!cc->req_pool) {
|
|
|
|
ti->error = "Cannot allocate crypt request mempool";
|
2010-08-12 10:14:06 +07:00
|
|
|
goto bad;
|
2008-02-08 09:11:07 +07:00
|
|
|
}
|
|
|
|
|
2006-03-26 16:37:45 +07:00
|
|
|
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!cc->page_pool) {
|
2006-06-26 14:27:35 +07:00
|
|
|
ti->error = "Cannot allocate page mempool";
|
2010-08-12 10:14:06 +07:00
|
|
|
goto bad;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-12-10 21:35:05 +07:00
|
|
|
cc->bs = bioset_create(MIN_IOS, 0);
|
2006-10-03 15:15:40 +07:00
|
|
|
if (!cc->bs) {
|
|
|
|
ti->error = "Cannot allocate crypt bioset";
|
2010-08-12 10:14:06 +07:00
|
|
|
goto bad;
|
2006-10-03 15:15:40 +07:00
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:06 +07:00
|
|
|
ret = -EINVAL;
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-29 00:41:26 +07:00
|
|
|
if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
|
2006-06-26 14:27:35 +07:00
|
|
|
ti->error = "Invalid iv_offset sector";
|
2010-08-12 10:14:06 +07:00
|
|
|
goto bad;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-03-27 16:17:48 +07:00
|
|
|
cc->iv_offset = tmpll;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-08-12 10:14:06 +07:00
|
|
|
if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
|
|
|
|
ti->error = "Device lookup failed";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-29 00:41:26 +07:00
|
|
|
if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
|
2006-06-26 14:27:35 +07:00
|
|
|
ti->error = "Invalid device sector";
|
2010-08-12 10:14:06 +07:00
|
|
|
goto bad;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-03-27 16:17:48 +07:00
|
|
|
cc->start = tmpll;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-08-02 18:32:08 +07:00
|
|
|
argv += 5;
|
|
|
|
argc -= 5;
|
|
|
|
|
|
|
|
/* Optional parameters */
|
|
|
|
if (argc) {
|
|
|
|
as.argc = argc;
|
|
|
|
as.argv = argv;
|
|
|
|
|
|
|
|
ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
|
|
|
|
if (ret)
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
opt_string = dm_shift_arg(&as);
|
|
|
|
|
|
|
|
if (opt_params == 1 && opt_string &&
|
|
|
|
!strcasecmp(opt_string, "allow_discards"))
|
|
|
|
ti->num_discard_requests = 1;
|
|
|
|
else if (opt_params) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
ti->error = "Invalid feature arguments";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-12 10:14:06 +07:00
|
|
|
ret = -ENOMEM;
|
2011-01-14 02:59:53 +07:00
|
|
|
cc->io_queue = alloc_workqueue("kcryptd_io",
|
|
|
|
WQ_NON_REENTRANT|
|
|
|
|
WQ_MEM_RECLAIM,
|
|
|
|
1);
|
2007-10-20 04:38:58 +07:00
|
|
|
if (!cc->io_queue) {
|
|
|
|
ti->error = "Couldn't create kcryptd io queue";
|
2010-08-12 10:14:06 +07:00
|
|
|
goto bad;
|
2007-10-20 04:38:58 +07:00
|
|
|
}
|
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
cc->crypt_queue = alloc_workqueue("kcryptd",
|
|
|
|
WQ_NON_REENTRANT|
|
|
|
|
WQ_CPU_INTENSIVE|
|
|
|
|
WQ_MEM_RECLAIM,
|
|
|
|
1);
|
2007-10-20 04:38:58 +07:00
|
|
|
if (!cc->crypt_queue) {
|
2007-10-20 04:38:57 +07:00
|
|
|
ti->error = "Couldn't create kcryptd queue";
|
2010-08-12 10:14:06 +07:00
|
|
|
goto bad;
|
2007-10-20 04:38:57 +07:00
|
|
|
}
|
|
|
|
|
2009-06-22 16:12:23 +07:00
|
|
|
ti->num_flush_requests = 1;
|
2012-07-27 21:08:08 +07:00
|
|
|
ti->discard_zeroes_data_unsupported = true;
|
2011-09-26 05:26:21 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
|
2010-08-12 10:14:06 +07:00
|
|
|
bad:
|
|
|
|
crypt_dtr(ti);
|
|
|
|
return ret;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2012-12-22 03:23:41 +07:00
|
|
|
static int crypt_map(struct dm_target *ti, struct bio *bio)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2007-07-12 23:26:32 +07:00
|
|
|
struct dm_crypt_io *io;
|
2012-07-27 21:08:05 +07:00
|
|
|
struct crypt_config *cc = ti->private;
|
2009-06-22 16:12:23 +07:00
|
|
|
|
2011-08-02 18:32:08 +07:00
|
|
|
/*
|
|
|
|
* If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
|
|
|
|
* - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
|
|
|
|
* - for REQ_DISCARD caller must use flush if IO ordering matters
|
|
|
|
*/
|
|
|
|
if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
|
2009-06-22 16:12:23 +07:00
|
|
|
bio->bi_bdev = cc->dev->bdev;
|
2011-08-02 18:32:08 +07:00
|
|
|
if (bio_sectors(bio))
|
|
|
|
bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
|
2009-06-22 16:12:23 +07:00
|
|
|
return DM_MAPIO_REMAPPED;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2012-07-27 21:08:05 +07:00
|
|
|
io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
|
2007-10-20 04:38:58 +07:00
|
|
|
|
2011-01-14 02:59:53 +07:00
|
|
|
if (bio_data_dir(io->base_bio) == READ) {
|
|
|
|
if (kcryptd_io_read(io, GFP_NOWAIT))
|
|
|
|
kcryptd_queue_io(io);
|
|
|
|
} else
|
2007-10-20 04:38:58 +07:00
|
|
|
kcryptd_queue_crypt(io);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-12-08 17:41:06 +07:00
|
|
|
return DM_MAPIO_SUBMITTED;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int crypt_status(struct dm_target *ti, status_type_t type,
|
2012-07-27 21:08:16 +07:00
|
|
|
unsigned status_flags, char *result, unsigned maxlen)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2010-08-12 10:14:07 +07:00
|
|
|
struct crypt_config *cc = ti->private;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned int sz = 0;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case STATUSTYPE_INFO:
|
|
|
|
result[0] = '\0';
|
|
|
|
break;
|
|
|
|
|
|
|
|
case STATUSTYPE_TABLE:
|
2011-01-14 02:59:52 +07:00
|
|
|
DMEMIT("%s ", cc->cipher_string);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (cc->key_size > 0) {
|
|
|
|
if ((maxlen - sz) < ((cc->key_size << 1) + 1))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
crypt_encode_key(result + sz, cc->key, cc->key_size);
|
|
|
|
sz += cc->key_size << 1;
|
|
|
|
} else {
|
|
|
|
if (sz >= maxlen)
|
|
|
|
return -ENOMEM;
|
|
|
|
result[sz++] = '-';
|
|
|
|
}
|
|
|
|
|
2006-03-27 16:17:48 +07:00
|
|
|
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
|
|
|
|
cc->dev->name, (unsigned long long)cc->start);
|
2011-08-02 18:32:08 +07:00
|
|
|
|
|
|
|
if (ti->num_discard_requests)
|
|
|
|
DMEMIT(" 1 allow_discards");
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-10-03 15:15:37 +07:00
|
|
|
static void crypt_postsuspend(struct dm_target *ti)
|
|
|
|
{
|
|
|
|
struct crypt_config *cc = ti->private;
|
|
|
|
|
|
|
|
set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int crypt_preresume(struct dm_target *ti)
|
|
|
|
{
|
|
|
|
struct crypt_config *cc = ti->private;
|
|
|
|
|
|
|
|
if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
|
|
|
|
DMERR("aborting resume - crypt key is not set.");
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void crypt_resume(struct dm_target *ti)
|
|
|
|
{
|
|
|
|
struct crypt_config *cc = ti->private;
|
|
|
|
|
|
|
|
clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Message interface
|
|
|
|
* key set <key>
|
|
|
|
* key wipe
|
|
|
|
*/
|
|
|
|
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
|
|
|
|
{
|
|
|
|
struct crypt_config *cc = ti->private;
|
2009-12-11 06:51:57 +07:00
|
|
|
int ret = -EINVAL;
|
2006-10-03 15:15:37 +07:00
|
|
|
|
|
|
|
if (argc < 2)
|
|
|
|
goto error;
|
|
|
|
|
2011-08-02 18:32:04 +07:00
|
|
|
if (!strcasecmp(argv[0], "key")) {
|
2006-10-03 15:15:37 +07:00
|
|
|
if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
|
|
|
|
DMWARN("not suspended during key manipulation.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2011-08-02 18:32:04 +07:00
|
|
|
if (argc == 3 && !strcasecmp(argv[1], "set")) {
|
2009-12-11 06:51:57 +07:00
|
|
|
ret = crypt_set_key(cc, argv[2]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (cc->iv_gen_ops && cc->iv_gen_ops->init)
|
|
|
|
ret = cc->iv_gen_ops->init(cc);
|
|
|
|
return ret;
|
|
|
|
}
|
2011-08-02 18:32:04 +07:00
|
|
|
if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
|
2009-12-11 06:51:57 +07:00
|
|
|
if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
|
|
|
|
ret = cc->iv_gen_ops->wipe(cc);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2006-10-03 15:15:37 +07:00
|
|
|
return crypt_wipe_key(cc);
|
2009-12-11 06:51:57 +07:00
|
|
|
}
|
2006-10-03 15:15:37 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
error:
|
|
|
|
DMWARN("unrecognised message received.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2008-07-21 18:00:40 +07:00
|
|
|
static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
|
|
|
|
struct bio_vec *biovec, int max_size)
|
|
|
|
{
|
|
|
|
struct crypt_config *cc = ti->private;
|
|
|
|
struct request_queue *q = bdev_get_queue(cc->dev->bdev);
|
|
|
|
|
|
|
|
if (!q->merge_bvec_fn)
|
|
|
|
return max_size;
|
|
|
|
|
|
|
|
bvm->bi_bdev = cc->dev->bdev;
|
2010-08-12 10:14:11 +07:00
|
|
|
bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
|
2008-07-21 18:00:40 +07:00
|
|
|
|
|
|
|
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
|
|
|
|
}
|
|
|
|
|
2009-06-22 16:12:33 +07:00
|
|
|
static int crypt_iterate_devices(struct dm_target *ti,
|
|
|
|
iterate_devices_callout_fn fn, void *data)
|
|
|
|
{
|
|
|
|
struct crypt_config *cc = ti->private;
|
|
|
|
|
2009-07-24 02:30:42 +07:00
|
|
|
return fn(ti, cc->dev, cc->start, ti->len, data);
|
2009-06-22 16:12:33 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static struct target_type crypt_target = {
|
|
|
|
.name = "crypt",
|
2012-12-22 03:23:41 +07:00
|
|
|
.version = {1, 12, 0},
|
2005-04-17 05:20:36 +07:00
|
|
|
.module = THIS_MODULE,
|
|
|
|
.ctr = crypt_ctr,
|
|
|
|
.dtr = crypt_dtr,
|
|
|
|
.map = crypt_map,
|
|
|
|
.status = crypt_status,
|
2006-10-03 15:15:37 +07:00
|
|
|
.postsuspend = crypt_postsuspend,
|
|
|
|
.preresume = crypt_preresume,
|
|
|
|
.resume = crypt_resume,
|
|
|
|
.message = crypt_message,
|
2008-07-21 18:00:40 +07:00
|
|
|
.merge = crypt_merge,
|
2009-06-22 16:12:33 +07:00
|
|
|
.iterate_devices = crypt_iterate_devices,
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init dm_crypt_init(void)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2007-07-12 23:26:32 +07:00
|
|
|
_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!_crypt_io_pool)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
r = dm_register_target(&crypt_target);
|
|
|
|
if (r < 0) {
|
2006-06-26 14:27:35 +07:00
|
|
|
DMERR("register failed %d", r);
|
2007-10-20 04:38:57 +07:00
|
|
|
kmem_cache_destroy(_crypt_io_pool);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit dm_crypt_exit(void)
|
|
|
|
{
|
2009-01-06 10:04:58 +07:00
|
|
|
dm_unregister_target(&crypt_target);
|
2005-04-17 05:20:36 +07:00
|
|
|
kmem_cache_destroy(_crypt_io_pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(dm_crypt_init);
|
|
|
|
module_exit(dm_crypt_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
|
|
|
|
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
|
|
|
|
MODULE_LICENSE("GPL");
|