mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:00:54 +07:00
block: allow specifying size for extra command data
This mirrors the blk-mq capabilities to allocate extra drivers-specific data behind struct request by setting a cmd_size field, as well as having a constructor / destructor for it. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
5ea708d15a
commit
6d247d7f71
@ -606,17 +606,41 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
EXPORT_SYMBOL(blk_cleanup_queue);
|
||||
|
||||
/* Allocate memory local to the request queue */
|
||||
static void *alloc_request_struct(gfp_t gfp_mask, void *data)
|
||||
static void *alloc_request_simple(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
int nid = (int)(long)data;
|
||||
return kmem_cache_alloc_node(request_cachep, gfp_mask, nid);
|
||||
struct request_queue *q = data;
|
||||
|
||||
return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
|
||||
}
|
||||
|
||||
static void free_request_struct(void *element, void *unused)
|
||||
static void free_request_simple(void *element, void *data)
|
||||
{
|
||||
kmem_cache_free(request_cachep, element);
|
||||
}
|
||||
|
||||
static void *alloc_request_size(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
struct request *rq;
|
||||
|
||||
rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
|
||||
q->node);
|
||||
if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
|
||||
kfree(rq);
|
||||
rq = NULL;
|
||||
}
|
||||
return rq;
|
||||
}
|
||||
|
||||
static void free_request_size(void *element, void *data)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
|
||||
if (q->exit_rq_fn)
|
||||
q->exit_rq_fn(q, element);
|
||||
kfree(element);
|
||||
}
|
||||
|
||||
int blk_init_rl(struct request_list *rl, struct request_queue *q,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
@ -629,10 +653,15 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
|
||||
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
|
||||
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
|
||||
|
||||
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct,
|
||||
free_request_struct,
|
||||
(void *)(long)q->node, gfp_mask,
|
||||
q->node);
|
||||
if (q->cmd_size) {
|
||||
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
|
||||
alloc_request_size, free_request_size,
|
||||
q, gfp_mask, q->node);
|
||||
} else {
|
||||
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
|
||||
alloc_request_simple, free_request_simple,
|
||||
q, gfp_mask, q->node);
|
||||
}
|
||||
if (!rl->rq_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -846,12 +875,15 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
|
||||
|
||||
int blk_init_allocated_queue(struct request_queue *q)
|
||||
{
|
||||
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);
|
||||
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
|
||||
if (!q->fq)
|
||||
return -ENOMEM;
|
||||
|
||||
if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
|
||||
goto out_free_flush_queue;
|
||||
|
||||
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
|
||||
goto fail;
|
||||
goto out_exit_flush_rq;
|
||||
|
||||
INIT_WORK(&q->timeout_work, blk_timeout_work);
|
||||
q->queue_flags |= QUEUE_FLAG_DEFAULT;
|
||||
@ -869,13 +901,16 @@ int blk_init_allocated_queue(struct request_queue *q)
|
||||
/* init elevator */
|
||||
if (elevator_init(q, NULL)) {
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
goto fail;
|
||||
goto out_exit_flush_rq;
|
||||
}
|
||||
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
out_exit_flush_rq:
|
||||
if (q->exit_rq_fn)
|
||||
q->exit_rq_fn(q, q->fq->flush_rq);
|
||||
out_free_flush_queue:
|
||||
blk_free_flush_queue(q->fq);
|
||||
wbt_exit(q);
|
||||
return -ENOMEM;
|
||||
|
@ -547,11 +547,10 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
|
||||
if (!fq)
|
||||
goto fail;
|
||||
|
||||
if (q->mq_ops) {
|
||||
if (q->mq_ops)
|
||||
spin_lock_init(&fq->mq_flush_lock);
|
||||
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
|
||||
}
|
||||
|
||||
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
|
||||
fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
|
||||
if (!fq->flush_rq)
|
||||
goto fail_rq;
|
||||
|
@ -814,10 +814,13 @@ static void blk_release_queue(struct kobject *kobj)
|
||||
if (q->queue_tags)
|
||||
__blk_queue_free_tags(q);
|
||||
|
||||
if (!q->mq_ops)
|
||||
if (!q->mq_ops) {
|
||||
if (q->exit_rq_fn)
|
||||
q->exit_rq_fn(q, q->fq->flush_rq);
|
||||
blk_free_flush_queue(q->fq);
|
||||
else
|
||||
} else {
|
||||
blk_mq_release(q);
|
||||
}
|
||||
|
||||
blk_trace_shutdown(q);
|
||||
|
||||
|
@ -273,6 +273,8 @@ typedef void (softirq_done_fn)(struct request *);
|
||||
typedef int (dma_drain_needed_fn)(struct request *);
|
||||
typedef int (lld_busy_fn) (struct request_queue *q);
|
||||
typedef int (bsg_job_fn) (struct bsg_job *);
|
||||
typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
|
||||
typedef void (exit_rq_fn)(struct request_queue *, struct request *);
|
||||
|
||||
enum blk_eh_timer_return {
|
||||
BLK_EH_NOT_HANDLED,
|
||||
@ -408,6 +410,8 @@ struct request_queue {
|
||||
rq_timed_out_fn *rq_timed_out_fn;
|
||||
dma_drain_needed_fn *dma_drain_needed;
|
||||
lld_busy_fn *lld_busy_fn;
|
||||
init_rq_fn *init_rq_fn;
|
||||
exit_rq_fn *exit_rq_fn;
|
||||
|
||||
const struct blk_mq_ops *mq_ops;
|
||||
|
||||
@ -577,6 +581,9 @@ struct request_queue {
|
||||
#endif
|
||||
|
||||
bool mq_sysfs_init_done;
|
||||
|
||||
size_t cmd_size;
|
||||
void *rq_alloc_data;
|
||||
};
|
||||
|
||||
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
|
||||
|
Loading…
Reference in New Issue
Block a user