mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 22:07:20 +07:00
13369816cb
The blk-iolatency controller measures the time from rq_qos_throttle() to
rq_qos_done_bio() and attributes this time to the first bio that needs
to create the request. This means if a bio is plug-mergeable or
bio-mergeable, it gets to bypass the blk-iolatency controller.
The recent series [1], to tag all bios w/ blkgs undermined how iolatency
was determining which bios it was charging and should process in
rq_qos_done_bio(). Because all bios are being tagged, this caused the
atomic_t for the struct rq_wait inflight count to underflow and result
in a stall.
This patch adds a new flag BIO_TRACKED to let controllers know that a
bio is going through the rq_qos path. blk-iolatency now checks if this
flag is set to see if it should process the bio in rq_qos_done_bio().
Overloading BLK_QUEUE_ENTERED works, but makes the flag rules confusing.
BIO_THROTTLED was another candidate, but the flag is set for all bios
that have gone through blk-throttle code. Overloading a flag comes with
the burden of making sure that when either implementation changes, a
change in setting rules for one doesn't cause a bug in the other. So
here, we unfortunately opt for adding a new flag.
[1] https://lore.kernel.org/lkml/20181205171039.73066-1-dennis@kernel.org/
Fixes: 5cdf2e3fea
("blkcg: associate blkg when associating a device")
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Cc: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
190 lines
4.4 KiB
C
190 lines
4.4 KiB
C
#ifndef RQ_QOS_H
|
|
#define RQ_QOS_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/blk_types.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/wait.h>
|
|
|
|
#include "blk-mq-debugfs.h"
|
|
|
|
struct blk_mq_debugfs_attr;
|
|
|
|
enum rq_qos_id {
|
|
RQ_QOS_WBT,
|
|
RQ_QOS_CGROUP,
|
|
};
|
|
|
|
struct rq_wait {
|
|
wait_queue_head_t wait;
|
|
atomic_t inflight;
|
|
};
|
|
|
|
struct rq_qos {
|
|
struct rq_qos_ops *ops;
|
|
struct request_queue *q;
|
|
enum rq_qos_id id;
|
|
struct rq_qos *next;
|
|
#ifdef CONFIG_BLK_DEBUG_FS
|
|
struct dentry *debugfs_dir;
|
|
#endif
|
|
};
|
|
|
|
struct rq_qos_ops {
|
|
void (*throttle)(struct rq_qos *, struct bio *);
|
|
void (*track)(struct rq_qos *, struct request *, struct bio *);
|
|
void (*issue)(struct rq_qos *, struct request *);
|
|
void (*requeue)(struct rq_qos *, struct request *);
|
|
void (*done)(struct rq_qos *, struct request *);
|
|
void (*done_bio)(struct rq_qos *, struct bio *);
|
|
void (*cleanup)(struct rq_qos *, struct bio *);
|
|
void (*exit)(struct rq_qos *);
|
|
const struct blk_mq_debugfs_attr *debugfs_attrs;
|
|
};
|
|
|
|
struct rq_depth {
|
|
unsigned int max_depth;
|
|
|
|
int scale_step;
|
|
bool scaled_max;
|
|
|
|
unsigned int queue_depth;
|
|
unsigned int default_depth;
|
|
};
|
|
|
|
static inline struct rq_qos *rq_qos_id(struct request_queue *q,
|
|
enum rq_qos_id id)
|
|
{
|
|
struct rq_qos *rqos;
|
|
for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
|
|
if (rqos->id == id)
|
|
break;
|
|
}
|
|
return rqos;
|
|
}
|
|
|
|
static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
|
|
{
|
|
return rq_qos_id(q, RQ_QOS_WBT);
|
|
}
|
|
|
|
static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
|
|
{
|
|
return rq_qos_id(q, RQ_QOS_CGROUP);
|
|
}
|
|
|
|
static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
|
|
{
|
|
switch (id) {
|
|
case RQ_QOS_WBT:
|
|
return "wbt";
|
|
case RQ_QOS_CGROUP:
|
|
return "cgroup";
|
|
}
|
|
return "unknown";
|
|
}
|
|
|
|
static inline void rq_wait_init(struct rq_wait *rq_wait)
|
|
{
|
|
atomic_set(&rq_wait->inflight, 0);
|
|
init_waitqueue_head(&rq_wait->wait);
|
|
}
|
|
|
|
static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
|
|
{
|
|
rqos->next = q->rq_qos;
|
|
q->rq_qos = rqos;
|
|
|
|
if (rqos->ops->debugfs_attrs)
|
|
blk_mq_debugfs_register_rqos(rqos);
|
|
}
|
|
|
|
static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
|
|
{
|
|
struct rq_qos *cur, *prev = NULL;
|
|
for (cur = q->rq_qos; cur; cur = cur->next) {
|
|
if (cur == rqos) {
|
|
if (prev)
|
|
prev->next = rqos->next;
|
|
else
|
|
q->rq_qos = cur;
|
|
break;
|
|
}
|
|
prev = cur;
|
|
}
|
|
|
|
blk_mq_debugfs_unregister_rqos(rqos);
|
|
}
|
|
|
|
typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
|
|
typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
|
|
|
|
void rq_qos_wait(struct rq_wait *rqw, void *private_data,
|
|
acquire_inflight_cb_t *acquire_inflight_cb,
|
|
cleanup_cb_t *cleanup_cb);
|
|
bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
|
|
void rq_depth_scale_up(struct rq_depth *rqd);
|
|
void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
|
|
bool rq_depth_calc_max_depth(struct rq_depth *rqd);
|
|
|
|
void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
|
|
void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
|
|
void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
|
|
void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
|
|
void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
|
|
void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
|
|
void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
|
|
|
|
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
|
|
{
|
|
if (q->rq_qos)
|
|
__rq_qos_cleanup(q->rq_qos, bio);
|
|
}
|
|
|
|
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
|
|
{
|
|
if (q->rq_qos)
|
|
__rq_qos_done(q->rq_qos, rq);
|
|
}
|
|
|
|
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
|
|
{
|
|
if (q->rq_qos)
|
|
__rq_qos_issue(q->rq_qos, rq);
|
|
}
|
|
|
|
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
|
|
{
|
|
if (q->rq_qos)
|
|
__rq_qos_requeue(q->rq_qos, rq);
|
|
}
|
|
|
|
static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
|
|
{
|
|
if (q->rq_qos)
|
|
__rq_qos_done_bio(q->rq_qos, bio);
|
|
}
|
|
|
|
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
|
{
|
|
/*
|
|
* BIO_TRACKED lets controllers know that a bio went through the
|
|
* normal rq_qos path.
|
|
*/
|
|
bio_set_flag(bio, BIO_TRACKED);
|
|
if (q->rq_qos)
|
|
__rq_qos_throttle(q->rq_qos, bio);
|
|
}
|
|
|
|
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
|
|
struct bio *bio)
|
|
{
|
|
if (q->rq_qos)
|
|
__rq_qos_track(q->rq_qos, rq, bio);
|
|
}
|
|
|
|
void rq_qos_exit(struct request_queue *);
|
|
|
|
#endif
|