block: get rid of on-stack plugging debug checks

We don't need them anymore, so kill:

- REQ_ON_PLUG checks in various places
- !rq_mergeable() check in plug merging

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
Jens Axboe 2011-05-20 20:52:16 +02:00
parent 0eb8e88572
commit 771949d03b
3 changed files with 0 additions and 33 deletions

View File

@ -569,8 +569,6 @@ int blk_get_queue(struct request_queue *q)
static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (rq->cmd_flags & REQ_ELVPRIV)
elv_put_request(q, rq);
mempool_free(rq, q->rq.rq_pool);
@ -1110,14 +1108,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
/*
* Debug stuff, kill later
*/
if (!rq_mergeable(req)) {
blk_dump_rq_flags(req, "back");
return false;
}
if (!ll_back_merge_fn(q, req, bio))
return false;
@ -1141,14 +1131,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
sector_t sector;
/*
* Debug stuff, kill later
*/
if (!rq_mergeable(req)) {
blk_dump_rq_flags(req, "front");
return false;
}
if (!ll_front_merge_fn(q, req, bio))
return false;
@ -1258,14 +1240,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
el_ret = elv_merge(q, &req, bio);
if (el_ret == ELEVATOR_BACK_MERGE) {
BUG_ON(req->cmd_flags & REQ_ON_PLUG);
if (bio_attempt_back_merge(q, req, bio)) {
if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out_unlock;
}
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
BUG_ON(req->cmd_flags & REQ_ON_PLUG);
if (bio_attempt_front_merge(q, req, bio)) {
if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);
@ -1320,10 +1300,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (__rq->q != q)
plug->should_sort = 1;
}
/*
* Debug flag, kill later
*/
req->cmd_flags |= REQ_ON_PLUG;
list_add_tail(&req->queuelist, &plug->list);
drive_stat_acct(req, 1);
} else {
@ -2749,7 +2725,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
BUG_ON(!rq->q);
if (rq->q != q) {
/*
@ -2761,8 +2736,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
depth = 0;
spin_lock(q->queue_lock);
}
rq->cmd_flags &= ~REQ_ON_PLUG;
/*
* rq is already accounted, so use raw insert
*/

View File

@ -416,8 +416,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
struct list_head *entry;
int stop_flags;
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (q->last_merge == rq)
q->last_merge = NULL;
@ -656,8 +654,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
rq->q = q;
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||

View File

@ -151,7 +151,6 @@ enum rq_flag_bits {
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
__REQ_ON_PLUG, /* on plug list */
__REQ_NR_BITS, /* stops here */
};
@ -192,6 +191,5 @@ enum rq_flag_bits {
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
#define REQ_SECURE (1 << __REQ_SECURE)
#define REQ_ON_PLUG (1 << __REQ_ON_PLUG)
#endif /* __LINUX_BLK_TYPES_H */