mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 01:26:44 +07:00
91e1062592
Fix possible max_phys_segments violation in cloned dm-crypt bio. In write operation dm-crypt needs to allocate new bio request and run crypto operation on this clone. Cloned request has always the same size, but number of physical segments can be increased and violate max_phys_segments restriction. This can lead to data corruption and serious hardware malfunction. This was observed when using XFS over dm-crypt and at least two HBA controller drivers (arcmsr, cciss) recently. Fix it by using bio_add_page() call (which tests for other restrictions too) instead of constructing own biovec. All versions of dm-crypt are affected by this bug. Cc: stable@kernel.org Cc: dm-crypt@saout.de Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
1109 lines
26 KiB
C
1109 lines
26 KiB
C
/*
|
|
* Copyright (C) 2003 Christophe Saout <christophe@saout.de>
|
|
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
|
|
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
|
|
*
|
|
* This file is released under the GPL.
|
|
*/
|
|
|
|
#include <linux/err.h>
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/mempool.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/crypto.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <asm/atomic.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <asm/page.h>
|
|
#include <asm/unaligned.h>
|
|
|
|
#include "dm.h"
|
|
|
|
#define DM_MSG_PREFIX "crypt"
|
|
#define MESG_STR(x) x, sizeof(x)
|
|
|
|
/*
|
|
* per bio private data
|
|
*/
|
|
struct dm_crypt_io {
|
|
struct dm_target *target;
|
|
struct bio *base_bio;
|
|
struct work_struct work;
|
|
atomic_t pending;
|
|
int error;
|
|
};
|
|
|
|
/*
|
|
* context holding the current state of a multi-part conversion
|
|
*/
|
|
struct convert_context {
|
|
struct bio *bio_in;
|
|
struct bio *bio_out;
|
|
unsigned int offset_in;
|
|
unsigned int offset_out;
|
|
unsigned int idx_in;
|
|
unsigned int idx_out;
|
|
sector_t sector;
|
|
int write;
|
|
};
|
|
|
|
struct crypt_config;
|
|
|
|
struct crypt_iv_operations {
|
|
int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
|
|
const char *opts);
|
|
void (*dtr)(struct crypt_config *cc);
|
|
const char *(*status)(struct crypt_config *cc);
|
|
int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
|
|
};
|
|
|
|
/*
|
|
* Crypt: maps a linear range of a block device
|
|
* and encrypts / decrypts at the same time.
|
|
*/
|
|
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
|
|
struct crypt_config {
|
|
struct dm_dev *dev;
|
|
sector_t start;
|
|
|
|
/*
|
|
* pool for per bio private data and
|
|
* for encryption buffer pages
|
|
*/
|
|
mempool_t *io_pool;
|
|
mempool_t *page_pool;
|
|
struct bio_set *bs;
|
|
|
|
struct workqueue_struct *io_queue;
|
|
struct workqueue_struct *crypt_queue;
|
|
/*
|
|
* crypto related data
|
|
*/
|
|
struct crypt_iv_operations *iv_gen_ops;
|
|
char *iv_mode;
|
|
union {
|
|
struct crypto_cipher *essiv_tfm;
|
|
int benbi_shift;
|
|
} iv_gen_private;
|
|
sector_t iv_offset;
|
|
unsigned int iv_size;
|
|
|
|
char cipher[CRYPTO_MAX_ALG_NAME];
|
|
char chainmode[CRYPTO_MAX_ALG_NAME];
|
|
struct crypto_blkcipher *tfm;
|
|
unsigned long flags;
|
|
unsigned int key_size;
|
|
u8 key[0];
|
|
};
|
|
|
|
#define MIN_IOS 16
|
|
#define MIN_POOL_PAGES 32
|
|
#define MIN_BIO_PAGES 8
|
|
|
|
static struct kmem_cache *_crypt_io_pool;
|
|
|
|
static void clone_init(struct dm_crypt_io *, struct bio *);
|
|
|
|
/*
|
|
* Different IV generation algorithms:
|
|
*
|
|
* plain: the initial vector is the 32-bit little-endian version of the sector
|
|
* number, padded with zeros if necessary.
|
|
*
|
|
* essiv: "encrypted sector|salt initial vector", the sector number is
|
|
* encrypted with the bulk cipher using a salt as key. The salt
|
|
* should be derived from the bulk cipher's key via hashing.
|
|
*
|
|
* benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
|
|
* (needed for LRW-32-AES and possible other narrow block modes)
|
|
*
|
|
* null: the initial vector is always zero. Provides compatibility with
|
|
* obsolete loop_fish2 devices. Do not use for new devices.
|
|
*
|
|
* plumb: unimplemented, see:
|
|
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
|
|
*/
|
|
|
|
static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
|
|
{
|
|
memset(iv, 0, cc->iv_size);
|
|
*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
|
|
const char *opts)
|
|
{
|
|
struct crypto_cipher *essiv_tfm;
|
|
struct crypto_hash *hash_tfm;
|
|
struct hash_desc desc;
|
|
struct scatterlist sg;
|
|
unsigned int saltsize;
|
|
u8 *salt;
|
|
int err;
|
|
|
|
if (opts == NULL) {
|
|
ti->error = "Digest algorithm missing for ESSIV mode";
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Hash the cipher key with the given hash algorithm */
|
|
hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
|
|
if (IS_ERR(hash_tfm)) {
|
|
ti->error = "Error initializing ESSIV hash";
|
|
return PTR_ERR(hash_tfm);
|
|
}
|
|
|
|
saltsize = crypto_hash_digestsize(hash_tfm);
|
|
salt = kmalloc(saltsize, GFP_KERNEL);
|
|
if (salt == NULL) {
|
|
ti->error = "Error kmallocing salt storage in ESSIV";
|
|
crypto_free_hash(hash_tfm);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
sg_init_one(&sg, cc->key, cc->key_size);
|
|
desc.tfm = hash_tfm;
|
|
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
|
|
crypto_free_hash(hash_tfm);
|
|
|
|
if (err) {
|
|
ti->error = "Error calculating hash in ESSIV";
|
|
kfree(salt);
|
|
return err;
|
|
}
|
|
|
|
/* Setup the essiv_tfm with the given salt */
|
|
essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
|
|
if (IS_ERR(essiv_tfm)) {
|
|
ti->error = "Error allocating crypto tfm for ESSIV";
|
|
kfree(salt);
|
|
return PTR_ERR(essiv_tfm);
|
|
}
|
|
if (crypto_cipher_blocksize(essiv_tfm) !=
|
|
crypto_blkcipher_ivsize(cc->tfm)) {
|
|
ti->error = "Block size of ESSIV cipher does "
|
|
"not match IV size of block cipher";
|
|
crypto_free_cipher(essiv_tfm);
|
|
kfree(salt);
|
|
return -EINVAL;
|
|
}
|
|
err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
|
|
if (err) {
|
|
ti->error = "Failed to set key for ESSIV cipher";
|
|
crypto_free_cipher(essiv_tfm);
|
|
kfree(salt);
|
|
return err;
|
|
}
|
|
kfree(salt);
|
|
|
|
cc->iv_gen_private.essiv_tfm = essiv_tfm;
|
|
return 0;
|
|
}
|
|
|
|
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
|
|
{
|
|
crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
|
|
cc->iv_gen_private.essiv_tfm = NULL;
|
|
}
|
|
|
|
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
|
|
{
|
|
memset(iv, 0, cc->iv_size);
|
|
*(u64 *)iv = cpu_to_le64(sector);
|
|
crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
|
|
return 0;
|
|
}
|
|
|
|
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
|
|
const char *opts)
|
|
{
|
|
unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
|
|
int log = ilog2(bs);
|
|
|
|
/* we need to calculate how far we must shift the sector count
|
|
* to get the cipher block count, we use this shift in _gen */
|
|
|
|
if (1 << log != bs) {
|
|
ti->error = "cypher blocksize is not a power of 2";
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (log > 9) {
|
|
ti->error = "cypher blocksize is > 512";
|
|
return -EINVAL;
|
|
}
|
|
|
|
cc->iv_gen_private.benbi_shift = 9 - log;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void crypt_iv_benbi_dtr(struct crypt_config *cc)
|
|
{
|
|
}
|
|
|
|
static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
|
|
{
|
|
__be64 val;
|
|
|
|
memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
|
|
|
|
val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
|
|
put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
|
|
{
|
|
memset(iv, 0, cc->iv_size);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct crypt_iv_operations crypt_iv_plain_ops = {
|
|
.generator = crypt_iv_plain_gen
|
|
};
|
|
|
|
static struct crypt_iv_operations crypt_iv_essiv_ops = {
|
|
.ctr = crypt_iv_essiv_ctr,
|
|
.dtr = crypt_iv_essiv_dtr,
|
|
.generator = crypt_iv_essiv_gen
|
|
};
|
|
|
|
static struct crypt_iv_operations crypt_iv_benbi_ops = {
|
|
.ctr = crypt_iv_benbi_ctr,
|
|
.dtr = crypt_iv_benbi_dtr,
|
|
.generator = crypt_iv_benbi_gen
|
|
};
|
|
|
|
static struct crypt_iv_operations crypt_iv_null_ops = {
|
|
.generator = crypt_iv_null_gen
|
|
};
|
|
|
|
static int
|
|
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
|
|
struct scatterlist *in, unsigned int length,
|
|
int write, sector_t sector)
|
|
{
|
|
u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
|
|
struct blkcipher_desc desc = {
|
|
.tfm = cc->tfm,
|
|
.info = iv,
|
|
.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
};
|
|
int r;
|
|
|
|
if (cc->iv_gen_ops) {
|
|
r = cc->iv_gen_ops->generator(cc, iv, sector);
|
|
if (r < 0)
|
|
return r;
|
|
|
|
if (write)
|
|
r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
|
|
else
|
|
r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
|
|
} else {
|
|
if (write)
|
|
r = crypto_blkcipher_encrypt(&desc, out, in, length);
|
|
else
|
|
r = crypto_blkcipher_decrypt(&desc, out, in, length);
|
|
}
|
|
|
|
return r;
|
|
}
|
|
|
|
static void crypt_convert_init(struct crypt_config *cc,
|
|
struct convert_context *ctx,
|
|
struct bio *bio_out, struct bio *bio_in,
|
|
sector_t sector, int write)
|
|
{
|
|
ctx->bio_in = bio_in;
|
|
ctx->bio_out = bio_out;
|
|
ctx->offset_in = 0;
|
|
ctx->offset_out = 0;
|
|
ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
|
|
ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
|
|
ctx->sector = sector + cc->iv_offset;
|
|
ctx->write = write;
|
|
}
|
|
|
|
/*
|
|
* Encrypt / decrypt data from one bio to another one (can be the same one)
|
|
*/
|
|
static int crypt_convert(struct crypt_config *cc,
|
|
struct convert_context *ctx)
|
|
{
|
|
int r = 0;
|
|
|
|
while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
|
|
ctx->idx_out < ctx->bio_out->bi_vcnt) {
|
|
struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
|
|
struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
|
|
struct scatterlist sg_in, sg_out;
|
|
|
|
sg_init_table(&sg_in, 1);
|
|
sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in);
|
|
|
|
sg_init_table(&sg_out, 1);
|
|
sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out);
|
|
|
|
ctx->offset_in += sg_in.length;
|
|
if (ctx->offset_in >= bv_in->bv_len) {
|
|
ctx->offset_in = 0;
|
|
ctx->idx_in++;
|
|
}
|
|
|
|
ctx->offset_out += sg_out.length;
|
|
if (ctx->offset_out >= bv_out->bv_len) {
|
|
ctx->offset_out = 0;
|
|
ctx->idx_out++;
|
|
}
|
|
|
|
r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
|
|
ctx->write, ctx->sector);
|
|
if (r < 0)
|
|
break;
|
|
|
|
ctx->sector++;
|
|
}
|
|
|
|
return r;
|
|
}
|
|
|
|
static void dm_crypt_bio_destructor(struct bio *bio)
|
|
{
|
|
struct dm_crypt_io *io = bio->bi_private;
|
|
struct crypt_config *cc = io->target->private;
|
|
|
|
bio_free(bio, cc->bs);
|
|
}
|
|
|
|
/*
|
|
* Generate a new unfragmented bio with the given size
|
|
* This should never violate the device limitations
|
|
* May return a smaller bio when running out of pages
|
|
*/
|
|
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
|
|
{
|
|
struct crypt_config *cc = io->target->private;
|
|
struct bio *clone;
|
|
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
|
|
unsigned i, len;
|
|
struct page *page;
|
|
|
|
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
|
|
if (!clone)
|
|
return NULL;
|
|
|
|
clone_init(io, clone);
|
|
|
|
for (i = 0; i < nr_iovecs; i++) {
|
|
page = mempool_alloc(cc->page_pool, gfp_mask);
|
|
if (!page)
|
|
break;
|
|
|
|
/*
|
|
* if additional pages cannot be allocated without waiting,
|
|
* return a partially allocated bio, the caller will then try
|
|
* to allocate additional bios while submitting this partial bio
|
|
*/
|
|
if (i == (MIN_BIO_PAGES - 1))
|
|
gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
|
|
|
|
len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
|
|
|
|
if (!bio_add_page(clone, page, len, 0)) {
|
|
mempool_free(page, cc->page_pool);
|
|
break;
|
|
}
|
|
|
|
size -= len;
|
|
}
|
|
|
|
if (!clone->bi_size) {
|
|
bio_put(clone);
|
|
return NULL;
|
|
}
|
|
|
|
return clone;
|
|
}
|
|
|
|
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
|
|
{
|
|
unsigned int i;
|
|
struct bio_vec *bv;
|
|
|
|
for (i = 0; i < clone->bi_vcnt; i++) {
|
|
bv = bio_iovec_idx(clone, i);
|
|
BUG_ON(!bv->bv_page);
|
|
mempool_free(bv->bv_page, cc->page_pool);
|
|
bv->bv_page = NULL;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* One of the bios was finished. Check for completion of
|
|
* the whole request and correctly clean up the buffer.
|
|
*/
|
|
static void crypt_dec_pending(struct dm_crypt_io *io, int error)
|
|
{
|
|
struct crypt_config *cc = (struct crypt_config *) io->target->private;
|
|
|
|
if (error < 0)
|
|
io->error = error;
|
|
|
|
if (!atomic_dec_and_test(&io->pending))
|
|
return;
|
|
|
|
bio_endio(io->base_bio, io->error);
|
|
|
|
mempool_free(io, cc->io_pool);
|
|
}
|
|
|
|
/*
|
|
* kcryptd/kcryptd_io:
|
|
*
|
|
* Needed because it would be very unwise to do decryption in an
|
|
* interrupt context.
|
|
*
|
|
* kcryptd performs the actual encryption or decryption.
|
|
*
|
|
* kcryptd_io performs the IO submission.
|
|
*
|
|
* They must be separated as otherwise the final stages could be
|
|
* starved by new requests which can block in the first stages due
|
|
* to memory allocation.
|
|
*/
|
|
static void kcryptd_do_work(struct work_struct *work);
|
|
static void kcryptd_do_crypt(struct work_struct *work);
|
|
|
|
static void kcryptd_queue_io(struct dm_crypt_io *io)
|
|
{
|
|
struct crypt_config *cc = io->target->private;
|
|
|
|
INIT_WORK(&io->work, kcryptd_do_work);
|
|
queue_work(cc->io_queue, &io->work);
|
|
}
|
|
|
|
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
|
|
{
|
|
struct crypt_config *cc = io->target->private;
|
|
|
|
INIT_WORK(&io->work, kcryptd_do_crypt);
|
|
queue_work(cc->crypt_queue, &io->work);
|
|
}
|
|
|
|
static void crypt_endio(struct bio *clone, int error)
|
|
{
|
|
struct dm_crypt_io *io = clone->bi_private;
|
|
struct crypt_config *cc = io->target->private;
|
|
unsigned read_io = bio_data_dir(clone) == READ;
|
|
|
|
if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
|
|
error = -EIO;
|
|
|
|
/*
|
|
* free the processed pages
|
|
*/
|
|
if (!read_io) {
|
|
crypt_free_buffer_pages(cc, clone);
|
|
goto out;
|
|
}
|
|
|
|
if (unlikely(error))
|
|
goto out;
|
|
|
|
bio_put(clone);
|
|
kcryptd_queue_crypt(io);
|
|
return;
|
|
|
|
out:
|
|
bio_put(clone);
|
|
crypt_dec_pending(io, error);
|
|
}
|
|
|
|
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
|
|
{
|
|
struct crypt_config *cc = io->target->private;
|
|
|
|
clone->bi_private = io;
|
|
clone->bi_end_io = crypt_endio;
|
|
clone->bi_bdev = cc->dev->bdev;
|
|
clone->bi_rw = io->base_bio->bi_rw;
|
|
clone->bi_destructor = dm_crypt_bio_destructor;
|
|
}
|
|
|
|
static void process_read(struct dm_crypt_io *io)
|
|
{
|
|
struct crypt_config *cc = io->target->private;
|
|
struct bio *base_bio = io->base_bio;
|
|
struct bio *clone;
|
|
sector_t sector = base_bio->bi_sector - io->target->begin;
|
|
|
|
atomic_inc(&io->pending);
|
|
|
|
/*
|
|
* The block layer might modify the bvec array, so always
|
|
* copy the required bvecs because we need the original
|
|
* one in order to decrypt the whole bio data *afterwards*.
|
|
*/
|
|
clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
|
|
if (unlikely(!clone)) {
|
|
crypt_dec_pending(io, -ENOMEM);
|
|
return;
|
|
}
|
|
|
|
clone_init(io, clone);
|
|
clone->bi_idx = 0;
|
|
clone->bi_vcnt = bio_segments(base_bio);
|
|
clone->bi_size = base_bio->bi_size;
|
|
clone->bi_sector = cc->start + sector;
|
|
memcpy(clone->bi_io_vec, bio_iovec(base_bio),
|
|
sizeof(struct bio_vec) * clone->bi_vcnt);
|
|
|
|
generic_make_request(clone);
|
|
}
|
|
|
|
static void process_write(struct dm_crypt_io *io)
|
|
{
|
|
struct crypt_config *cc = io->target->private;
|
|
struct bio *base_bio = io->base_bio;
|
|
struct bio *clone;
|
|
struct convert_context ctx;
|
|
unsigned remaining = base_bio->bi_size;
|
|
sector_t sector = base_bio->bi_sector - io->target->begin;
|
|
|
|
atomic_inc(&io->pending);
|
|
|
|
crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
|
|
|
|
/*
|
|
* The allocated buffers can be smaller than the whole bio,
|
|
* so repeat the whole process until all the data can be handled.
|
|
*/
|
|
while (remaining) {
|
|
clone = crypt_alloc_buffer(io, remaining);
|
|
if (unlikely(!clone)) {
|
|
crypt_dec_pending(io, -ENOMEM);
|
|
return;
|
|
}
|
|
|
|
ctx.bio_out = clone;
|
|
ctx.idx_out = 0;
|
|
|
|
if (unlikely(crypt_convert(cc, &ctx) < 0)) {
|
|
crypt_free_buffer_pages(cc, clone);
|
|
bio_put(clone);
|
|
crypt_dec_pending(io, -EIO);
|
|
return;
|
|
}
|
|
|
|
/* crypt_convert should have filled the clone bio */
|
|
BUG_ON(ctx.idx_out < clone->bi_vcnt);
|
|
|
|
clone->bi_sector = cc->start + sector;
|
|
remaining -= clone->bi_size;
|
|
sector += bio_sectors(clone);
|
|
|
|
/* Grab another reference to the io struct
|
|
* before we kick off the request */
|
|
if (remaining)
|
|
atomic_inc(&io->pending);
|
|
|
|
generic_make_request(clone);
|
|
|
|
/* Do not reference clone after this - it
|
|
* may be gone already. */
|
|
|
|
/* out of memory -> run queues */
|
|
if (remaining)
|
|
congestion_wait(WRITE, HZ/100);
|
|
}
|
|
}
|
|
|
|
static void process_read_endio(struct dm_crypt_io *io)
|
|
{
|
|
struct crypt_config *cc = io->target->private;
|
|
struct convert_context ctx;
|
|
|
|
crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
|
|
io->base_bio->bi_sector - io->target->begin, 0);
|
|
|
|
crypt_dec_pending(io, crypt_convert(cc, &ctx));
|
|
}
|
|
|
|
static void kcryptd_do_work(struct work_struct *work)
|
|
{
|
|
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
|
|
|
|
if (bio_data_dir(io->base_bio) == READ)
|
|
process_read(io);
|
|
}
|
|
|
|
static void kcryptd_do_crypt(struct work_struct *work)
|
|
{
|
|
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
|
|
|
|
if (bio_data_dir(io->base_bio) == READ)
|
|
process_read_endio(io);
|
|
else
|
|
process_write(io);
|
|
}
|
|
|
|
/*
|
|
* Decode key from its hex representation
|
|
*/
|
|
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
|
|
{
|
|
char buffer[3];
|
|
char *endp;
|
|
unsigned int i;
|
|
|
|
buffer[2] = '\0';
|
|
|
|
for (i = 0; i < size; i++) {
|
|
buffer[0] = *hex++;
|
|
buffer[1] = *hex++;
|
|
|
|
key[i] = (u8)simple_strtoul(buffer, &endp, 16);
|
|
|
|
if (endp != &buffer[2])
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (*hex != '\0')
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Encode key into its hex representation
|
|
*/
|
|
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
|
|
{
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < size; i++) {
|
|
sprintf(hex, "%02x", *key);
|
|
hex += 2;
|
|
key++;
|
|
}
|
|
}
|
|
|
|
static int crypt_set_key(struct crypt_config *cc, char *key)
|
|
{
|
|
unsigned key_size = strlen(key) >> 1;
|
|
|
|
if (cc->key_size && cc->key_size != key_size)
|
|
return -EINVAL;
|
|
|
|
cc->key_size = key_size; /* initial settings */
|
|
|
|
if ((!key_size && strcmp(key, "-")) ||
|
|
(key_size && crypt_decode_key(cc->key, key, key_size) < 0))
|
|
return -EINVAL;
|
|
|
|
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crypt_wipe_key(struct crypt_config *cc)
|
|
{
|
|
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
|
|
memset(&cc->key, 0, cc->key_size * sizeof(u8));
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Construct an encryption mapping:
|
|
* <cipher> <key> <iv_offset> <dev_path> <start>
|
|
*/
|
|
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
{
|
|
struct crypt_config *cc;
|
|
struct crypto_blkcipher *tfm;
|
|
char *tmp;
|
|
char *cipher;
|
|
char *chainmode;
|
|
char *ivmode;
|
|
char *ivopts;
|
|
unsigned int key_size;
|
|
unsigned long long tmpll;
|
|
|
|
if (argc != 5) {
|
|
ti->error = "Not enough arguments";
|
|
return -EINVAL;
|
|
}
|
|
|
|
tmp = argv[0];
|
|
cipher = strsep(&tmp, "-");
|
|
chainmode = strsep(&tmp, "-");
|
|
ivopts = strsep(&tmp, "-");
|
|
ivmode = strsep(&ivopts, ":");
|
|
|
|
if (tmp)
|
|
DMWARN("Unexpected additional cipher options");
|
|
|
|
key_size = strlen(argv[1]) >> 1;
|
|
|
|
cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
|
|
if (cc == NULL) {
|
|
ti->error =
|
|
"Cannot allocate transparent encryption context";
|
|
return -ENOMEM;
|
|
}
|
|
|
|
if (crypt_set_key(cc, argv[1])) {
|
|
ti->error = "Error decoding key";
|
|
goto bad_cipher;
|
|
}
|
|
|
|
/* Compatiblity mode for old dm-crypt cipher strings */
|
|
if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
|
|
chainmode = "cbc";
|
|
ivmode = "plain";
|
|
}
|
|
|
|
if (strcmp(chainmode, "ecb") && !ivmode) {
|
|
ti->error = "This chaining mode requires an IV mechanism";
|
|
goto bad_cipher;
|
|
}
|
|
|
|
if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
|
|
chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
|
|
ti->error = "Chain mode + cipher name is too long";
|
|
goto bad_cipher;
|
|
}
|
|
|
|
tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
|
|
if (IS_ERR(tfm)) {
|
|
ti->error = "Error allocating crypto tfm";
|
|
goto bad_cipher;
|
|
}
|
|
|
|
strcpy(cc->cipher, cipher);
|
|
strcpy(cc->chainmode, chainmode);
|
|
cc->tfm = tfm;
|
|
|
|
/*
|
|
* Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
|
|
* See comments at iv code
|
|
*/
|
|
|
|
if (ivmode == NULL)
|
|
cc->iv_gen_ops = NULL;
|
|
else if (strcmp(ivmode, "plain") == 0)
|
|
cc->iv_gen_ops = &crypt_iv_plain_ops;
|
|
else if (strcmp(ivmode, "essiv") == 0)
|
|
cc->iv_gen_ops = &crypt_iv_essiv_ops;
|
|
else if (strcmp(ivmode, "benbi") == 0)
|
|
cc->iv_gen_ops = &crypt_iv_benbi_ops;
|
|
else if (strcmp(ivmode, "null") == 0)
|
|
cc->iv_gen_ops = &crypt_iv_null_ops;
|
|
else {
|
|
ti->error = "Invalid IV mode";
|
|
goto bad_ivmode;
|
|
}
|
|
|
|
if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
|
|
cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
|
|
goto bad_ivmode;
|
|
|
|
cc->iv_size = crypto_blkcipher_ivsize(tfm);
|
|
if (cc->iv_size)
|
|
/* at least a 64 bit sector number should fit in our buffer */
|
|
cc->iv_size = max(cc->iv_size,
|
|
(unsigned int)(sizeof(u64) / sizeof(u8)));
|
|
else {
|
|
if (cc->iv_gen_ops) {
|
|
DMWARN("Selected cipher does not support IVs");
|
|
if (cc->iv_gen_ops->dtr)
|
|
cc->iv_gen_ops->dtr(cc);
|
|
cc->iv_gen_ops = NULL;
|
|
}
|
|
}
|
|
|
|
cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
|
|
if (!cc->io_pool) {
|
|
ti->error = "Cannot allocate crypt io mempool";
|
|
goto bad_slab_pool;
|
|
}
|
|
|
|
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
|
|
if (!cc->page_pool) {
|
|
ti->error = "Cannot allocate page mempool";
|
|
goto bad_page_pool;
|
|
}
|
|
|
|
cc->bs = bioset_create(MIN_IOS, MIN_IOS);
|
|
if (!cc->bs) {
|
|
ti->error = "Cannot allocate crypt bioset";
|
|
goto bad_bs;
|
|
}
|
|
|
|
if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
|
|
ti->error = "Error setting key";
|
|
goto bad_device;
|
|
}
|
|
|
|
if (sscanf(argv[2], "%llu", &tmpll) != 1) {
|
|
ti->error = "Invalid iv_offset sector";
|
|
goto bad_device;
|
|
}
|
|
cc->iv_offset = tmpll;
|
|
|
|
if (sscanf(argv[4], "%llu", &tmpll) != 1) {
|
|
ti->error = "Invalid device sector";
|
|
goto bad_device;
|
|
}
|
|
cc->start = tmpll;
|
|
|
|
if (dm_get_device(ti, argv[3], cc->start, ti->len,
|
|
dm_table_get_mode(ti->table), &cc->dev)) {
|
|
ti->error = "Device lookup failed";
|
|
goto bad_device;
|
|
}
|
|
|
|
if (ivmode && cc->iv_gen_ops) {
|
|
if (ivopts)
|
|
*(ivopts - 1) = ':';
|
|
cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
|
|
if (!cc->iv_mode) {
|
|
ti->error = "Error kmallocing iv_mode string";
|
|
goto bad_ivmode_string;
|
|
}
|
|
strcpy(cc->iv_mode, ivmode);
|
|
} else
|
|
cc->iv_mode = NULL;
|
|
|
|
cc->io_queue = create_singlethread_workqueue("kcryptd_io");
|
|
if (!cc->io_queue) {
|
|
ti->error = "Couldn't create kcryptd io queue";
|
|
goto bad_io_queue;
|
|
}
|
|
|
|
cc->crypt_queue = create_singlethread_workqueue("kcryptd");
|
|
if (!cc->crypt_queue) {
|
|
ti->error = "Couldn't create kcryptd queue";
|
|
goto bad_crypt_queue;
|
|
}
|
|
|
|
ti->private = cc;
|
|
return 0;
|
|
|
|
bad_crypt_queue:
|
|
destroy_workqueue(cc->io_queue);
|
|
bad_io_queue:
|
|
kfree(cc->iv_mode);
|
|
bad_ivmode_string:
|
|
dm_put_device(ti, cc->dev);
|
|
bad_device:
|
|
bioset_free(cc->bs);
|
|
bad_bs:
|
|
mempool_destroy(cc->page_pool);
|
|
bad_page_pool:
|
|
mempool_destroy(cc->io_pool);
|
|
bad_slab_pool:
|
|
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
|
|
cc->iv_gen_ops->dtr(cc);
|
|
bad_ivmode:
|
|
crypto_free_blkcipher(tfm);
|
|
bad_cipher:
|
|
/* Must zero key material before freeing */
|
|
memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
|
|
kfree(cc);
|
|
return -EINVAL;
|
|
}
|
|
|
|
static void crypt_dtr(struct dm_target *ti)
|
|
{
|
|
struct crypt_config *cc = (struct crypt_config *) ti->private;
|
|
|
|
destroy_workqueue(cc->io_queue);
|
|
destroy_workqueue(cc->crypt_queue);
|
|
|
|
bioset_free(cc->bs);
|
|
mempool_destroy(cc->page_pool);
|
|
mempool_destroy(cc->io_pool);
|
|
|
|
kfree(cc->iv_mode);
|
|
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
|
|
cc->iv_gen_ops->dtr(cc);
|
|
crypto_free_blkcipher(cc->tfm);
|
|
dm_put_device(ti, cc->dev);
|
|
|
|
/* Must zero key material before freeing */
|
|
memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
|
|
kfree(cc);
|
|
}
|
|
|
|
static int crypt_map(struct dm_target *ti, struct bio *bio,
|
|
union map_info *map_context)
|
|
{
|
|
struct crypt_config *cc = ti->private;
|
|
struct dm_crypt_io *io;
|
|
|
|
io = mempool_alloc(cc->io_pool, GFP_NOIO);
|
|
io->target = ti;
|
|
io->base_bio = bio;
|
|
io->error = 0;
|
|
atomic_set(&io->pending, 0);
|
|
|
|
if (bio_data_dir(io->base_bio) == READ)
|
|
kcryptd_queue_io(io);
|
|
else
|
|
kcryptd_queue_crypt(io);
|
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
}
|
|
|
|
static int crypt_status(struct dm_target *ti, status_type_t type,
|
|
char *result, unsigned int maxlen)
|
|
{
|
|
struct crypt_config *cc = (struct crypt_config *) ti->private;
|
|
unsigned int sz = 0;
|
|
|
|
switch (type) {
|
|
case STATUSTYPE_INFO:
|
|
result[0] = '\0';
|
|
break;
|
|
|
|
case STATUSTYPE_TABLE:
|
|
if (cc->iv_mode)
|
|
DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
|
|
cc->iv_mode);
|
|
else
|
|
DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
|
|
|
|
if (cc->key_size > 0) {
|
|
if ((maxlen - sz) < ((cc->key_size << 1) + 1))
|
|
return -ENOMEM;
|
|
|
|
crypt_encode_key(result + sz, cc->key, cc->key_size);
|
|
sz += cc->key_size << 1;
|
|
} else {
|
|
if (sz >= maxlen)
|
|
return -ENOMEM;
|
|
result[sz++] = '-';
|
|
}
|
|
|
|
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
|
|
cc->dev->name, (unsigned long long)cc->start);
|
|
break;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static void crypt_postsuspend(struct dm_target *ti)
|
|
{
|
|
struct crypt_config *cc = ti->private;
|
|
|
|
set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
|
|
}
|
|
|
|
static int crypt_preresume(struct dm_target *ti)
|
|
{
|
|
struct crypt_config *cc = ti->private;
|
|
|
|
if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
|
|
DMERR("aborting resume - crypt key is not set.");
|
|
return -EAGAIN;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void crypt_resume(struct dm_target *ti)
|
|
{
|
|
struct crypt_config *cc = ti->private;
|
|
|
|
clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
|
|
}
|
|
|
|
/* Message interface
|
|
* key set <key>
|
|
* key wipe
|
|
*/
|
|
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
|
|
{
|
|
struct crypt_config *cc = ti->private;
|
|
|
|
if (argc < 2)
|
|
goto error;
|
|
|
|
if (!strnicmp(argv[0], MESG_STR("key"))) {
|
|
if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
|
|
DMWARN("not suspended during key manipulation.");
|
|
return -EINVAL;
|
|
}
|
|
if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
|
|
return crypt_set_key(cc, argv[2]);
|
|
if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
|
|
return crypt_wipe_key(cc);
|
|
}
|
|
|
|
error:
|
|
DMWARN("unrecognised message received.");
|
|
return -EINVAL;
|
|
}
|
|
|
|
static struct target_type crypt_target = {
|
|
.name = "crypt",
|
|
.version= {1, 5, 0},
|
|
.module = THIS_MODULE,
|
|
.ctr = crypt_ctr,
|
|
.dtr = crypt_dtr,
|
|
.map = crypt_map,
|
|
.status = crypt_status,
|
|
.postsuspend = crypt_postsuspend,
|
|
.preresume = crypt_preresume,
|
|
.resume = crypt_resume,
|
|
.message = crypt_message,
|
|
};
|
|
|
|
static int __init dm_crypt_init(void)
|
|
{
|
|
int r;
|
|
|
|
_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
|
|
if (!_crypt_io_pool)
|
|
return -ENOMEM;
|
|
|
|
r = dm_register_target(&crypt_target);
|
|
if (r < 0) {
|
|
DMERR("register failed %d", r);
|
|
kmem_cache_destroy(_crypt_io_pool);
|
|
}
|
|
|
|
return r;
|
|
}
|
|
|
|
static void __exit dm_crypt_exit(void)
|
|
{
|
|
int r = dm_unregister_target(&crypt_target);
|
|
|
|
if (r < 0)
|
|
DMERR("unregister failed %d", r);
|
|
|
|
kmem_cache_destroy(_crypt_io_pool);
|
|
}
|
|
|
|
module_init(dm_crypt_init);
|
|
module_exit(dm_crypt_exit);
|
|
|
|
MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
|
|
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
|
|
MODULE_LICENSE("GPL");
|