mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 15:45:22 +07:00
488f6682c8
Blk-crypto delegates crypto operations to inline encryption hardware when available. The separately configurable blk-crypto-fallback contains a software fallback to the kernel crypto API - when enabled, blk-crypto will use this fallback for en/decryption when inline encryption hardware is not available. This lets upper layers not have to worry about whether or not the underlying device has support for inline encryption before deciding to specify an encryption context for a bio. It also allows for testing without actual inline encryption hardware - in particular, it makes it possible to test the inline encryption code in ext4 and f2fs simply by running xfstests with the inlinecrypt mount option, which in turn allows for things like the regular upstream regression testing of ext4 to cover the inline encryption code paths. For more details, refer to Documentation/block/inline-encryption.rst. Signed-off-by: Satya Tangirala <satyat@google.com> Reviewed-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
202 lines
5.0 KiB
C
202 lines
5.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright 2019 Google LLC
|
|
*/
|
|
|
|
#ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
|
|
#define __LINUX_BLK_CRYPTO_INTERNAL_H
|
|
|
|
#include <linux/bio.h>
|
|
#include <linux/blkdev.h>
|
|
|
|
/* Represents a crypto mode supported by blk-crypto */
|
|
struct blk_crypto_mode {
|
|
const char *cipher_str; /* crypto API name (for fallback case) */
|
|
unsigned int keysize; /* key size in bytes */
|
|
unsigned int ivsize; /* iv size in bytes */
|
|
};
|
|
|
|
extern const struct blk_crypto_mode blk_crypto_modes[];
|
|
|
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
|
|
|
void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
|
|
unsigned int inc);
|
|
|
|
bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
|
|
|
|
bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
|
|
struct bio_crypt_ctx *bc2);
|
|
|
|
static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
|
|
bio->bi_crypt_context);
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
|
|
bio->bi_iter.bi_size, req->crypt_ctx);
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_merge_rq(struct request *req,
|
|
struct request *next)
|
|
{
|
|
return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
|
|
next->crypt_ctx);
|
|
}
|
|
|
|
static inline void blk_crypto_rq_set_defaults(struct request *rq)
|
|
{
|
|
rq->crypt_ctx = NULL;
|
|
rq->crypt_keyslot = NULL;
|
|
}
|
|
|
|
static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
|
|
{
|
|
return rq->crypt_ctx;
|
|
}
|
|
|
|
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
|
|
|
|
static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
|
|
struct bio *bio)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_merge_rq(struct request *req,
|
|
struct request *next)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
|
|
|
|
static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
|
|
|
|
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
|
|
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
|
|
{
|
|
if (bio_has_crypt_ctx(bio))
|
|
__bio_crypt_advance(bio, bytes);
|
|
}
|
|
|
|
void __bio_crypt_free_ctx(struct bio *bio);
|
|
static inline void bio_crypt_free_ctx(struct bio *bio)
|
|
{
|
|
if (bio_has_crypt_ctx(bio))
|
|
__bio_crypt_free_ctx(bio);
|
|
}
|
|
|
|
static inline void bio_crypt_do_front_merge(struct request *rq,
|
|
struct bio *bio)
|
|
{
|
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
|
if (bio_has_crypt_ctx(bio))
|
|
memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
|
|
sizeof(rq->crypt_ctx->bc_dun));
|
|
#endif
|
|
}
|
|
|
|
bool __blk_crypto_bio_prep(struct bio **bio_ptr);
|
|
static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
|
|
{
|
|
if (bio_has_crypt_ctx(*bio_ptr))
|
|
return __blk_crypto_bio_prep(bio_ptr);
|
|
return true;
|
|
}
|
|
|
|
blk_status_t __blk_crypto_init_request(struct request *rq);
|
|
static inline blk_status_t blk_crypto_init_request(struct request *rq)
|
|
{
|
|
if (blk_crypto_rq_is_encrypted(rq))
|
|
return __blk_crypto_init_request(rq);
|
|
return BLK_STS_OK;
|
|
}
|
|
|
|
void __blk_crypto_free_request(struct request *rq);
|
|
static inline void blk_crypto_free_request(struct request *rq)
|
|
{
|
|
if (blk_crypto_rq_is_encrypted(rq))
|
|
__blk_crypto_free_request(rq);
|
|
}
|
|
|
|
void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
|
|
gfp_t gfp_mask);
|
|
static inline void blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
|
|
gfp_t gfp_mask)
|
|
{
|
|
if (bio_has_crypt_ctx(bio))
|
|
__blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
|
|
}
|
|
|
|
/**
|
|
* blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
|
|
* into a request queue.
|
|
* @rq: the request being queued
|
|
*
|
|
* Return: BLK_STS_OK on success, nonzero on error.
|
|
*/
|
|
static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
|
|
{
|
|
|
|
if (blk_crypto_rq_is_encrypted(rq))
|
|
return blk_crypto_init_request(rq);
|
|
return BLK_STS_OK;
|
|
}
|
|
|
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
|
|
|
|
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
|
|
|
|
bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
|
|
|
|
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
|
|
|
|
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
|
|
|
|
static inline int
|
|
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
|
|
{
|
|
pr_warn_once("crypto API fallback is disabled\n");
|
|
return -ENOPKG;
|
|
}
|
|
|
|
static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
|
|
{
|
|
pr_warn_once("crypto API fallback disabled; failing request.\n");
|
|
(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
|
|
return false;
|
|
}
|
|
|
|
static inline int
|
|
blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
|
|
|
|
#endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
|