block, bfq: split function bfq_better_to_idle

This is a preparatory commit for commits that need to check only one of
the two main reasons for idling. This change should also improve the
quality of the code a little bit, by splitting a function that contains
very long, non-trivial and little related comments.

Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Paolo Valente 2019-01-29 12:06:30 +01:00 committed by Jens Axboe
parent 73d5811849
commit 05c2f5c30b

View File

@ -3404,53 +3404,13 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
bfq_bfqq_budget_timeout(bfqq); bfq_bfqq_budget_timeout(bfqq);
} }
/* static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
* For a queue that becomes empty, device idling is allowed only if struct bfq_queue *bfqq)
* this function returns true for the queue. As a consequence, since
* device idling plays a critical role in both throughput boosting and
* service guarantees, the return value of this function plays a
* critical role in both these aspects as well.
*
* In a nutshell, this function returns true only if idling is
* beneficial for throughput or, even if detrimental for throughput,
* idling is however necessary to preserve service guarantees (low
* latency, desired throughput distribution, ...). In particular, on
* NCQ-capable devices, this function tries to return false, so as to
* help keep the drives' internal queues full, whenever this helps the
* device boost the throughput without causing any service-guarantee
* issue.
*
* In more detail, the return value of this function is obtained by,
* first, computing a number of boolean variables that take into
* account throughput and service-guarantee issues, and, then,
* combining these variables in a logical expression. Most of the
* issues taken into account are not trivial. We discuss these issues
* individually while introducing the variables.
*/
static bool bfq_better_to_idle(struct bfq_queue *bfqq)
{ {
struct bfq_data *bfqd = bfqq->bfqd;
bool rot_without_queueing = bool rot_without_queueing =
!blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
bfqq_sequential_and_IO_bound, bfqq_sequential_and_IO_bound,
idling_boosts_thr, idling_boosts_thr_without_issues, idling_boosts_thr;
idling_needed_for_service_guarantees,
asymmetric_scenario;
if (bfqd->strict_guarantees)
return true;
/*
* Idling is performed only if slice_idle > 0. In addition, we
* do not idle if
* (a) bfqq is async
* (b) bfqq is in the idle io prio class: in this case we do
* not idle because we want to minimize the bandwidth that
* queues in this class can steal to higher-priority queues
*/
if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
bfq_class_idle(bfqq))
return false;
bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
@ -3482,8 +3442,7 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
bfqq_sequential_and_IO_bound); bfqq_sequential_and_IO_bound);
/* /*
* The value of the next variable, * The return value of this function is equal to that of
* idling_boosts_thr_without_issues, is equal to that of
* idling_boosts_thr, unless a special case holds. In this * idling_boosts_thr, unless a special case holds. In this
* special case, described below, idling may cause problems to * special case, described below, idling may cause problems to
* weight-raised queues. * weight-raised queues.
@ -3500,32 +3459,35 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* which enqueue several requests in advance, and further * which enqueue several requests in advance, and further
* reorder internally-queued requests. * reorder internally-queued requests.
* *
* For this reason, we force to false the value of * For this reason, we force to false the return value if
* idling_boosts_thr_without_issues if there are weight-raised * there are weight-raised busy queues. In this case, and if
* busy queues. In this case, and if bfqq is not weight-raised, * bfqq is not weight-raised, this guarantees that the device
* this guarantees that the device is not idled for bfqq (if, * is not idled for bfqq (if, instead, bfqq is weight-raised,
* instead, bfqq is weight-raised, then idling will be * then idling will be guaranteed by another variable, see
* guaranteed by another variable, see below). Combined with * below). Combined with the timestamping rules of BFQ (see
* the timestamping rules of BFQ (see [1] for details), this * [1] for details), this behavior causes bfqq, and hence any
* behavior causes bfqq, and hence any sync non-weight-raised * sync non-weight-raised queue, to get a lower number of
* queue, to get a lower number of requests served, and thus * requests served, and thus to ask for a lower number of
* to ask for a lower number of requests from the request * requests from the request pool, before the busy
* pool, before the busy weight-raised queues get served * weight-raised queues get served again. This often mitigates
* again. This often mitigates starvation problems in the * starvation problems in the presence of heavy write
* presence of heavy write workloads and NCQ, thereby * workloads and NCQ, thereby guaranteeing a higher
* guaranteeing a higher application and system responsiveness * application and system responsiveness in these hostile
* in these hostile scenarios. * scenarios.
*/ */
idling_boosts_thr_without_issues = idling_boosts_thr && return idling_boosts_thr &&
bfqd->wr_busy_queues == 0; bfqd->wr_busy_queues == 0;
}
static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
/* /*
* There is then a case where idling must be performed not * There is a case where idling must be performed not for
* for throughput concerns, but to preserve service * throughput concerns, but to preserve service guarantees.
* guarantees.
* *
* To introduce this case, we can note that allowing the drive * To introduce this case, we can note that allowing the drive
* to enqueue more than one request at a time, and hence * to enqueue more than one request at a time, and thereby
* delegating de facto final scheduling decisions to the * delegating de facto final scheduling decisions to the
* drive's internal scheduler, entails loss of control on the * drive's internal scheduler, entails loss of control on the
* actual request service order. In particular, the critical * actual request service order. In particular, the critical
@ -3682,9 +3644,9 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* to let requests be served in the desired order until all * to let requests be served in the desired order until all
* the requests already queued in the device have been served. * the requests already queued in the device have been served.
*/ */
asymmetric_scenario = (bfqq->wr_coeff > 1 && bool asymmetric_scenario = (bfqq->wr_coeff > 1 &&
bfqd->wr_busy_queues < bfqd->wr_busy_queues <
bfq_tot_busy_queues(bfqd)) || bfq_tot_busy_queues(bfqd)) ||
!bfq_symmetric_scenario(bfqd); !bfq_symmetric_scenario(bfqd);
/* /*
@ -3701,17 +3663,64 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* now establish when idling is actually needed to preserve * now establish when idling is actually needed to preserve
* service guarantees. * service guarantees.
*/ */
idling_needed_for_service_guarantees = return asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); }
/*
* For a queue that becomes empty, device idling is allowed only if
* this function returns true for that queue. As a consequence, since
* device idling plays a critical role for both throughput boosting
* and service guarantees, the return value of this function plays a
* critical role as well.
*
* In a nutshell, this function returns true only if idling is
* beneficial for throughput or, even if detrimental for throughput,
* idling is however necessary to preserve service guarantees (low
* latency, desired throughput distribution, ...). In particular, on
* NCQ-capable devices, this function tries to return false, so as to
* help keep the drives' internal queues full, whenever this helps the
* device boost the throughput without causing any service-guarantee
* issue.
*
* Most of the issues taken into account to get the return value of
* this function are not trivial. We discuss these issues in the two
* functions providing the main pieces of information needed by this
* function.
*/
static bool bfq_better_to_idle(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
if (unlikely(bfqd->strict_guarantees))
return true;
/* /*
* We have now all the components we need to compute the * Idling is performed only if slice_idle > 0. In addition, we
* do not idle if
* (a) bfqq is async
* (b) bfqq is in the idle io prio class: in this case we do
* not idle because we want to minimize the bandwidth that
* queues in this class can steal to higher-priority queues
*/
if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
bfq_class_idle(bfqq))
return false;
idling_boosts_thr_with_no_issue =
idling_boosts_thr_without_issues(bfqd, bfqq);
idling_needed_for_service_guar =
idling_needed_for_service_guarantees(bfqd, bfqq);
/*
* We have now the two components we need to compute the
* return value of the function, which is true only if idling * return value of the function, which is true only if idling
* either boosts the throughput (without issues), or is * either boosts the throughput (without issues), or is
* necessary to preserve service guarantees. * necessary to preserve service guarantees.
*/ */
return idling_boosts_thr_without_issues || return idling_boosts_thr_with_no_issue ||
idling_needed_for_service_guarantees; idling_needed_for_service_guar;
} }
/* /*