2008-01-24 14:52:45 +07:00
|
|
|
#ifndef IOCONTEXT_H
|
|
|
|
#define IOCONTEXT_H
|
|
|
|
|
2008-01-24 14:44:49 +07:00
|
|
|
#include <linux/radix-tree.h>
|
|
|
|
|
2008-01-24 14:52:45 +07:00
|
|
|
/*
|
|
|
|
* This is the per-process anticipatory I/O scheduler state.
|
|
|
|
*/
|
|
|
|
struct as_io_context {
|
|
|
|
spinlock_t lock;
|
|
|
|
|
|
|
|
void (*dtor)(struct as_io_context *aic); /* destructor */
|
|
|
|
void (*exit)(struct as_io_context *aic); /* called on task exit */
|
|
|
|
|
|
|
|
unsigned long state;
|
|
|
|
atomic_t nr_queued; /* queued reads & sync writes */
|
|
|
|
atomic_t nr_dispatched; /* number of requests gone to the drivers */
|
|
|
|
|
|
|
|
/* IO History tracking */
|
|
|
|
/* Thinktime */
|
|
|
|
unsigned long last_end_request;
|
|
|
|
unsigned long ttime_total;
|
|
|
|
unsigned long ttime_samples;
|
|
|
|
unsigned long ttime_mean;
|
|
|
|
/* Layout pattern */
|
|
|
|
unsigned int seek_samples;
|
|
|
|
sector_t last_request_pos;
|
|
|
|
u64 seek_total;
|
|
|
|
sector_t seek_mean;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct cfq_queue;
|
|
|
|
struct cfq_io_context {
|
|
|
|
void *key;
|
2008-01-24 14:44:49 +07:00
|
|
|
unsigned long dead_key;
|
2008-01-24 14:52:45 +07:00
|
|
|
|
|
|
|
struct cfq_queue *cfqq[2];
|
|
|
|
|
|
|
|
struct io_context *ioc;
|
|
|
|
|
|
|
|
unsigned long last_end_request;
|
|
|
|
sector_t last_request_pos;
|
|
|
|
|
|
|
|
unsigned long ttime_total;
|
|
|
|
unsigned long ttime_samples;
|
|
|
|
unsigned long ttime_mean;
|
|
|
|
|
|
|
|
unsigned int seek_samples;
|
|
|
|
u64 seek_total;
|
|
|
|
sector_t seek_mean;
|
|
|
|
|
|
|
|
struct list_head queue_list;
|
|
|
|
|
|
|
|
void (*dtor)(struct io_context *); /* destructor */
|
|
|
|
void (*exit)(struct io_context *); /* called on task exit */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2008-01-24 14:53:35 +07:00
|
|
|
* I/O subsystem state of the associated processes. It is refcounted
|
|
|
|
* and kmalloc'ed. These could be shared between processes.
|
2008-01-24 14:52:45 +07:00
|
|
|
*/
|
|
|
|
struct io_context {
|
|
|
|
atomic_t refcount;
|
2008-01-24 14:53:35 +07:00
|
|
|
atomic_t nr_tasks;
|
|
|
|
|
|
|
|
/* all the fields below are protected by this lock */
|
|
|
|
spinlock_t lock;
|
2008-01-24 14:52:45 +07:00
|
|
|
|
|
|
|
unsigned short ioprio;
|
|
|
|
unsigned short ioprio_changed;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For request batching
|
|
|
|
*/
|
|
|
|
unsigned long last_waited; /* Time last woken after wait for request */
|
|
|
|
int nr_batch_requests; /* Number of requests left in the batch */
|
|
|
|
|
|
|
|
struct as_io_context *aic;
|
2008-01-24 14:44:49 +07:00
|
|
|
struct radix_tree_root radix_root;
|
2008-01-24 14:52:45 +07:00
|
|
|
void *ioc_data;
|
|
|
|
};
|
|
|
|
|
2008-01-24 14:53:35 +07:00
|
|
|
static inline struct io_context *ioc_task_link(struct io_context *ioc)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* if ref count is zero, don't allow sharing (ioc is going away, it's
|
|
|
|
* a race).
|
|
|
|
*/
|
|
|
|
if (ioc && atomic_inc_not_zero(&ioc->refcount))
|
|
|
|
return ioc;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-01-24 14:52:45 +07:00
|
|
|
#endif
|