mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
e806402130
A lot of the REQ_* flags are only used on struct requests, and only of use to the block layer and a few drivers that dig into struct request internals. This patch adds a new req_flags_t rq_flags field to struct request for them, and thus dramatically shrinks the number of common requests. It also removes the unfortunate situation where we have to fit the fields from the same enum into 32 bits for struct bio and 64 bits for struct request. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Shaun Tancheff <shaun.tancheff@seagate.com> Signed-off-by: Jens Axboe <axboe@fb.com>
134 lines
3.2 KiB
C
134 lines
3.2 KiB
C
/*
|
|
* Functions related to setting various queue properties from drivers
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/blk-mq.h>
|
|
#include <linux/sched/sysctl.h>
|
|
|
|
#include "blk.h"
|
|
|
|
/*
|
|
* for max sense size
|
|
*/
|
|
#include <scsi/scsi_cmnd.h>
|
|
|
|
/**
|
|
* blk_end_sync_rq - executes a completion event on a request
|
|
* @rq: request to complete
|
|
* @error: end I/O status of the request
|
|
*/
|
|
static void blk_end_sync_rq(struct request *rq, int error)
|
|
{
|
|
struct completion *waiting = rq->end_io_data;
|
|
|
|
rq->end_io_data = NULL;
|
|
|
|
/*
|
|
* complete last, if this is a stack request the process (and thus
|
|
* the rq pointer) could be invalid right after this complete()
|
|
*/
|
|
complete(waiting);
|
|
}
|
|
|
|
/**
|
|
* blk_execute_rq_nowait - insert a request into queue for execution
|
|
* @q: queue to insert the request in
|
|
* @bd_disk: matching gendisk
|
|
* @rq: request to insert
|
|
* @at_head: insert request at head or tail of queue
|
|
* @done: I/O completion handler
|
|
*
|
|
* Description:
|
|
* Insert a fully prepared request at the back of the I/O scheduler queue
|
|
* for execution. Don't wait for completion.
|
|
*
|
|
* Note:
|
|
* This function will invoke @done directly if the queue is dead.
|
|
*/
|
|
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|
struct request *rq, int at_head,
|
|
rq_end_io_fn *done)
|
|
{
|
|
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
|
|
|
WARN_ON(irqs_disabled());
|
|
WARN_ON(rq->cmd_type == REQ_TYPE_FS);
|
|
|
|
rq->rq_disk = bd_disk;
|
|
rq->end_io = done;
|
|
|
|
/*
|
|
* don't check dying flag for MQ because the request won't
|
|
* be reused after dying flag is set
|
|
*/
|
|
if (q->mq_ops) {
|
|
blk_mq_insert_request(rq, at_head, true, false);
|
|
return;
|
|
}
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
|
if (unlikely(blk_queue_dying(q))) {
|
|
rq->rq_flags |= RQF_QUIET;
|
|
rq->errors = -ENXIO;
|
|
__blk_end_request_all(rq, rq->errors);
|
|
spin_unlock_irq(q->queue_lock);
|
|
return;
|
|
}
|
|
|
|
__elv_add_request(q, rq, where);
|
|
__blk_run_queue(q);
|
|
spin_unlock_irq(q->queue_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
|
|
|
/**
|
|
* blk_execute_rq - insert a request into queue for execution
|
|
* @q: queue to insert the request in
|
|
* @bd_disk: matching gendisk
|
|
* @rq: request to insert
|
|
* @at_head: insert request at head or tail of queue
|
|
*
|
|
* Description:
|
|
* Insert a fully prepared request at the back of the I/O scheduler queue
|
|
* for execution and wait for completion.
|
|
*/
|
|
int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
|
|
struct request *rq, int at_head)
|
|
{
|
|
DECLARE_COMPLETION_ONSTACK(wait);
|
|
char sense[SCSI_SENSE_BUFFERSIZE];
|
|
int err = 0;
|
|
unsigned long hang_check;
|
|
|
|
if (!rq->sense) {
|
|
memset(sense, 0, sizeof(sense));
|
|
rq->sense = sense;
|
|
rq->sense_len = 0;
|
|
}
|
|
|
|
rq->end_io_data = &wait;
|
|
blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
|
|
|
|
/* Prevent hang_check timer from firing at us during very long I/O */
|
|
hang_check = sysctl_hung_task_timeout_secs;
|
|
if (hang_check)
|
|
while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
|
|
else
|
|
wait_for_completion_io(&wait);
|
|
|
|
if (rq->errors)
|
|
err = -EIO;
|
|
|
|
if (rq->sense == sense) {
|
|
rq->sense = NULL;
|
|
rq->sense_len = 0;
|
|
}
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL(blk_execute_rq);
|