mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 11:06:40 +07:00
796d5116c4
Hi, Jens, If you recall, I posted an RFC patch for this back in July of last year: http://lkml.org/lkml/2010/7/13/279 The basic problem is that a process can issue a never-ending stream of async direct I/Os to the same sector on a device, thus starving out other I/O in the system (due to the way the alias handling works in both cfq and deadline). The solution I proposed back then was to start dispatching from the fifo after a certain number of aliases had been dispatched. Vivek asked why we had to treat aliases differently at all, and I never had a good answer. So, I put together a simple patch which allows aliases to be added to the rb tree (it adds them to the right, though that doesn't matter as the order isn't guaranteed anyway). I think this is the preferred solution, as it doesn't break up time slices in CFQ or batches in deadline. I've tested it, and it does solve the starvation issue. Let me know what you think. Cheers, Jeff Signed-off-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
213 lines
6.8 KiB
C
213 lines
6.8 KiB
C
#ifndef _LINUX_ELEVATOR_H
|
|
#define _LINUX_ELEVATOR_H
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
|
|
struct bio *);
|
|
|
|
typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
|
|
|
|
typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
|
|
|
|
typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
|
|
|
|
typedef void (elevator_bio_merged_fn) (struct request_queue *,
|
|
struct request *, struct bio *);
|
|
|
|
typedef int (elevator_dispatch_fn) (struct request_queue *, int);
|
|
|
|
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
|
|
typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
|
|
typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
|
|
typedef int (elevator_may_queue_fn) (struct request_queue *, int);
|
|
|
|
typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
|
|
typedef void (elevator_put_req_fn) (struct request *);
|
|
typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
|
|
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
|
|
|
|
typedef void *(elevator_init_fn) (struct request_queue *);
|
|
typedef void (elevator_exit_fn) (struct elevator_queue *);
|
|
|
|
struct elevator_ops
|
|
{
|
|
elevator_merge_fn *elevator_merge_fn;
|
|
elevator_merged_fn *elevator_merged_fn;
|
|
elevator_merge_req_fn *elevator_merge_req_fn;
|
|
elevator_allow_merge_fn *elevator_allow_merge_fn;
|
|
elevator_bio_merged_fn *elevator_bio_merged_fn;
|
|
|
|
elevator_dispatch_fn *elevator_dispatch_fn;
|
|
elevator_add_req_fn *elevator_add_req_fn;
|
|
elevator_activate_req_fn *elevator_activate_req_fn;
|
|
elevator_deactivate_req_fn *elevator_deactivate_req_fn;
|
|
|
|
elevator_completed_req_fn *elevator_completed_req_fn;
|
|
|
|
elevator_request_list_fn *elevator_former_req_fn;
|
|
elevator_request_list_fn *elevator_latter_req_fn;
|
|
|
|
elevator_set_req_fn *elevator_set_req_fn;
|
|
elevator_put_req_fn *elevator_put_req_fn;
|
|
|
|
elevator_may_queue_fn *elevator_may_queue_fn;
|
|
|
|
elevator_init_fn *elevator_init_fn;
|
|
elevator_exit_fn *elevator_exit_fn;
|
|
void (*trim)(struct io_context *);
|
|
};
|
|
|
|
#define ELV_NAME_MAX (16)
|
|
|
|
struct elv_fs_entry {
|
|
struct attribute attr;
|
|
ssize_t (*show)(struct elevator_queue *, char *);
|
|
ssize_t (*store)(struct elevator_queue *, const char *, size_t);
|
|
};
|
|
|
|
/*
|
|
* identifies an elevator type, such as AS or deadline
|
|
*/
|
|
struct elevator_type
|
|
{
|
|
struct list_head list;
|
|
struct elevator_ops ops;
|
|
struct elv_fs_entry *elevator_attrs;
|
|
char elevator_name[ELV_NAME_MAX];
|
|
struct module *elevator_owner;
|
|
};
|
|
|
|
/*
|
|
* each queue has an elevator_queue associated with it
|
|
*/
|
|
struct elevator_queue
|
|
{
|
|
struct elevator_ops *ops;
|
|
void *elevator_data;
|
|
struct kobject kobj;
|
|
struct elevator_type *elevator_type;
|
|
struct mutex sysfs_lock;
|
|
struct hlist_head *hash;
|
|
unsigned int registered:1;
|
|
};
|
|
|
|
/*
|
|
* block elevator interface
|
|
*/
|
|
extern void elv_dispatch_sort(struct request_queue *, struct request *);
|
|
extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
|
|
extern void elv_add_request(struct request_queue *, struct request *, int);
|
|
extern void __elv_add_request(struct request_queue *, struct request *, int);
|
|
extern int elv_merge(struct request_queue *, struct request **, struct bio *);
|
|
extern int elv_try_merge(struct request *, struct bio *);
|
|
extern void elv_merge_requests(struct request_queue *, struct request *,
|
|
struct request *);
|
|
extern void elv_merged_request(struct request_queue *, struct request *, int);
|
|
extern void elv_bio_merged(struct request_queue *q, struct request *,
|
|
struct bio *);
|
|
extern void elv_requeue_request(struct request_queue *, struct request *);
|
|
extern struct request *elv_former_request(struct request_queue *, struct request *);
|
|
extern struct request *elv_latter_request(struct request_queue *, struct request *);
|
|
extern int elv_register_queue(struct request_queue *q);
|
|
extern void elv_unregister_queue(struct request_queue *q);
|
|
extern int elv_may_queue(struct request_queue *, int);
|
|
extern void elv_abort_queue(struct request_queue *);
|
|
extern void elv_completed_request(struct request_queue *, struct request *);
|
|
extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
|
|
extern void elv_put_request(struct request_queue *, struct request *);
|
|
extern void elv_drain_elevator(struct request_queue *);
|
|
|
|
/*
|
|
* io scheduler registration
|
|
*/
|
|
extern void elv_register(struct elevator_type *);
|
|
extern void elv_unregister(struct elevator_type *);
|
|
|
|
/*
|
|
* io scheduler sysfs switching
|
|
*/
|
|
extern ssize_t elv_iosched_show(struct request_queue *, char *);
|
|
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
|
|
|
extern int elevator_init(struct request_queue *, char *);
|
|
extern void elevator_exit(struct elevator_queue *);
|
|
extern int elevator_change(struct request_queue *, const char *);
|
|
extern int elv_rq_merge_ok(struct request *, struct bio *);
|
|
|
|
/*
|
|
* Helper functions.
|
|
*/
|
|
extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
|
|
extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
|
|
|
|
/*
|
|
* rb support functions.
|
|
*/
|
|
extern void elv_rb_add(struct rb_root *, struct request *);
|
|
extern void elv_rb_del(struct rb_root *, struct request *);
|
|
extern struct request *elv_rb_find(struct rb_root *, sector_t);
|
|
|
|
/*
|
|
* Return values from elevator merger
|
|
*/
|
|
#define ELEVATOR_NO_MERGE 0
|
|
#define ELEVATOR_FRONT_MERGE 1
|
|
#define ELEVATOR_BACK_MERGE 2
|
|
|
|
/*
|
|
* Insertion selection
|
|
*/
|
|
#define ELEVATOR_INSERT_FRONT 1
|
|
#define ELEVATOR_INSERT_BACK 2
|
|
#define ELEVATOR_INSERT_SORT 3
|
|
#define ELEVATOR_INSERT_REQUEUE 4
|
|
#define ELEVATOR_INSERT_FLUSH 5
|
|
#define ELEVATOR_INSERT_SORT_MERGE 6
|
|
|
|
/*
|
|
* return values from elevator_may_queue_fn
|
|
*/
|
|
enum {
|
|
ELV_MQUEUE_MAY,
|
|
ELV_MQUEUE_NO,
|
|
ELV_MQUEUE_MUST,
|
|
};
|
|
|
|
#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
|
|
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
|
|
|
|
/*
|
|
* Hack to reuse the csd.list list_head as the fifo time holder while
|
|
* the request is in the io scheduler. Saves an unsigned long in rq.
|
|
*/
|
|
#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
|
|
#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
|
|
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
|
|
#define rq_fifo_clear(rq) do { \
|
|
list_del_init(&(rq)->queuelist); \
|
|
INIT_LIST_HEAD(&(rq)->csd.list); \
|
|
} while (0)
|
|
|
|
/*
|
|
* io context count accounting
|
|
*/
|
|
#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
|
|
#define elv_ioc_count_inc(name) this_cpu_inc(name)
|
|
#define elv_ioc_count_dec(name) this_cpu_dec(name)
|
|
|
|
#define elv_ioc_count_read(name) \
|
|
({ \
|
|
unsigned long __val = 0; \
|
|
int __cpu; \
|
|
smp_wmb(); \
|
|
for_each_possible_cpu(__cpu) \
|
|
__val += per_cpu(name, __cpu); \
|
|
__val; \
|
|
})
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
#endif
|