mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:50:53 +07:00
block: extend queue bypassing to cover blkcg policies
Extend queue bypassing such that dying queue is always bypassing and blk-throttle is drained on bypass. With blkcg policies updated to test blk_queue_bypass() instead of blk_queue_dead(), this ensures that no bio or request is held by or going through blkcg policies on a bypassing queue. This will be used to implement blkg cleanup on elevator switches and policy changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
d732580b4e
commit
6ecf23afab
@ -372,8 +372,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
if (q->elevator)
|
||||
elv_drain_elevator(q);
|
||||
|
||||
if (drain_all)
|
||||
blk_throtl_drain(q);
|
||||
blk_throtl_drain(q);
|
||||
|
||||
/*
|
||||
* This function might be called on a queue which failed
|
||||
@ -415,8 +414,8 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
*
|
||||
* In bypass mode, only the dispatch FIFO queue of @q is used. This
|
||||
* function makes @q enter bypass mode and drains all requests which were
|
||||
* issued before. On return, it's guaranteed that no request has ELVPRIV
|
||||
* set.
|
||||
* throttled or issued before. On return, it's guaranteed that no request
|
||||
* is being throttled or has ELVPRIV set.
|
||||
*/
|
||||
void blk_queue_bypass_start(struct request_queue *q)
|
||||
{
|
||||
@ -461,6 +460,11 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
|
||||
|
||||
spin_lock_irq(lock);
|
||||
|
||||
/* dead queue is permanently in bypass mode till released */
|
||||
q->bypass_depth++;
|
||||
queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
||||
|
||||
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
||||
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
|
||||
queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
|
@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
|
||||
struct request_queue *q = td->queue;
|
||||
|
||||
/* no throttling for dead queue */
|
||||
if (unlikely(blk_queue_dead(q)))
|
||||
if (unlikely(blk_queue_bypass(q)))
|
||||
return NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
/* Make sure @q is still alive */
|
||||
if (unlikely(blk_queue_dead(q))) {
|
||||
if (unlikely(blk_queue_bypass(q))) {
|
||||
kfree(tg);
|
||||
return NULL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user