2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* include/linux/backing-dev.h
|
|
|
|
*
|
|
|
|
* low-level device information and state which is propagated up through
|
|
|
|
* to high-level code.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_BACKING_DEV_H
|
|
|
|
#define _LINUX_BACKING_DEV_H
|
|
|
|
|
2007-10-17 13:25:47 +07:00
|
|
|
#include <linux/percpu_counter.h>
|
|
|
|
#include <linux/log2.h>
|
2007-10-17 13:25:50 +07:00
|
|
|
#include <linux/proportions.h>
|
2008-04-30 14:54:32 +07:00
|
|
|
#include <linux/kernel.h>
|
2008-04-30 14:54:37 +07:00
|
|
|
#include <linux/fs.h>
|
2009-09-09 14:08:54 +07:00
|
|
|
#include <linux/sched.h>
|
2010-04-06 19:25:14 +07:00
|
|
|
#include <linux/timer.h>
|
2009-09-09 14:08:54 +07:00
|
|
|
#include <linux/writeback.h>
|
2011-07-27 06:09:06 +07:00
|
|
|
#include <linux/atomic.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-10-20 13:28:16 +07:00
|
|
|
struct page;
|
2008-04-30 14:54:32 +07:00
|
|
|
struct device;
|
2008-04-30 14:54:36 +07:00
|
|
|
struct dentry;
|
2006-10-20 13:28:16 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Bits in backing_dev_info.state
|
|
|
|
*/
|
|
|
|
enum bdi_state {
|
2009-09-09 14:08:54 +07:00
|
|
|
BDI_pending, /* On its way to being activated */
|
|
|
|
BDI_wb_alloc, /* Default embedded wb allocated */
|
2009-04-06 19:48:01 +07:00
|
|
|
BDI_async_congested, /* The async (write) queue is getting full */
|
|
|
|
BDI_sync_congested, /* The sync queue is getting full */
|
2009-09-09 14:10:25 +07:00
|
|
|
BDI_registered, /* bdi_register() was done */
|
2010-08-12 04:17:44 +07:00
|
|
|
BDI_writeback_running, /* Writeback is in progress */
|
2005-04-17 05:20:36 +07:00
|
|
|
BDI_unused, /* Available bits start here */
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef int (congested_fn)(void *, int);
|
|
|
|
|
2007-10-17 13:25:47 +07:00
|
|
|
enum bdi_stat_item {
|
2007-10-17 13:25:47 +07:00
|
|
|
BDI_RECLAIMABLE,
|
2007-10-17 13:25:48 +07:00
|
|
|
BDI_WRITEBACK,
|
2011-01-23 23:07:47 +07:00
|
|
|
BDI_DIRTIED,
|
2010-12-09 11:44:24 +07:00
|
|
|
BDI_WRITTEN,
|
2007-10-17 13:25:47 +07:00
|
|
|
NR_BDI_STAT_ITEMS
|
|
|
|
};
|
|
|
|
|
|
|
|
#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
|
|
|
|
|
2009-09-09 14:08:54 +07:00
|
|
|
struct bdi_writeback {
|
2010-07-25 18:29:18 +07:00
|
|
|
struct backing_dev_info *bdi; /* our parent bdi */
|
2009-09-09 14:08:54 +07:00
|
|
|
unsigned int nr;
|
|
|
|
|
2010-07-25 18:29:18 +07:00
|
|
|
unsigned long last_old_flush; /* last old data flush */
|
|
|
|
unsigned long last_active; /* last time bdi thread was active */
|
2009-09-09 14:08:54 +07:00
|
|
|
|
2010-07-25 18:29:18 +07:00
|
|
|
struct task_struct *task; /* writeback thread */
|
2010-07-25 18:29:22 +07:00
|
|
|
struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
|
2010-07-25 18:29:18 +07:00
|
|
|
struct list_head b_dirty; /* dirty inodes */
|
|
|
|
struct list_head b_io; /* parked for writeback */
|
|
|
|
struct list_head b_more_io; /* parked for more writeback */
|
2011-04-22 07:19:44 +07:00
|
|
|
spinlock_t list_lock; /* protects the b_* lists */
|
2009-09-09 14:08:54 +07:00
|
|
|
};
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
struct backing_dev_info {
|
2009-09-02 14:19:46 +07:00
|
|
|
struct list_head bdi_list;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
|
|
|
|
unsigned long state; /* Always use atomic bitops on this */
|
|
|
|
unsigned int capabilities; /* Device capabilities */
|
|
|
|
congested_fn *congested_fn; /* Function pointer if device is md/dm */
|
|
|
|
void *congested_data; /* Pointer to aux data for congested func */
|
2007-10-17 13:25:47 +07:00
|
|
|
|
2009-06-12 19:45:52 +07:00
|
|
|
char *name;
|
|
|
|
|
2007-10-17 13:25:47 +07:00
|
|
|
struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
|
2007-10-17 13:25:50 +07:00
|
|
|
|
2010-08-30 00:22:30 +07:00
|
|
|
unsigned long bw_time_stamp; /* last time write bw is updated */
|
writeback: dirty rate control
It's all about bdi->dirty_ratelimit, which aims to be (write_bw / N)
when there are N dd tasks.
On write() syscall, use bdi->dirty_ratelimit
============================================
balance_dirty_pages(pages_dirtied)
{
task_ratelimit = bdi->dirty_ratelimit * bdi_position_ratio();
pause = pages_dirtied / task_ratelimit;
sleep(pause);
}
On every 200ms, update bdi->dirty_ratelimit
===========================================
bdi_update_dirty_ratelimit()
{
task_ratelimit = bdi->dirty_ratelimit * bdi_position_ratio();
balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate;
bdi->dirty_ratelimit = balanced_dirty_ratelimit
}
Estimation of balanced bdi->dirty_ratelimit
===========================================
balanced task_ratelimit
-----------------------
balance_dirty_pages() needs to throttle tasks dirtying pages such that
the total amount of dirty pages stays below the specified dirty limit in
order to avoid memory deadlocks. Furthermore we desire fairness in that
tasks get throttled proportionally to the amount of pages they dirty.
IOW we want to throttle tasks such that we match the dirty rate to the
writeout bandwidth, this yields a stable amount of dirty pages:
dirty_rate == write_bw (1)
The fairness requirement gives us:
task_ratelimit = balanced_dirty_ratelimit
== write_bw / N (2)
where N is the number of dd tasks. We don't know N beforehand, but
still can estimate balanced_dirty_ratelimit within 200ms.
Start by throttling each dd task at rate
task_ratelimit = task_ratelimit_0 (3)
(any non-zero initial value is OK)
After 200ms, we measured
dirty_rate = # of pages dirtied by all dd's / 200ms
write_bw = # of pages written to the disk / 200ms
For the aggressive dd dirtiers, the equality holds
dirty_rate == N * task_rate
== N * task_ratelimit_0 (4)
Or
task_ratelimit_0 == dirty_rate / N (5)
Now we conclude that the balanced task ratelimit can be estimated by
write_bw
balanced_dirty_ratelimit = task_ratelimit_0 * ---------- (6)
dirty_rate
Because with (4) and (5) we can get the desired equality (1):
write_bw
balanced_dirty_ratelimit == (dirty_rate / N) * ----------
dirty_rate
== write_bw / N
Then using the balanced task ratelimit we can compute task pause times like:
task_pause = task->nr_dirtied / task_ratelimit
task_ratelimit with position control
------------------------------------
However, while the above gives us means of matching the dirty rate to
the writeout bandwidth, it at best provides us with a stable dirty page
count (assuming a static system). In order to control the dirty page
count such that it is high enough to provide performance, but does not
exceed the specified limit we need another control.
The dirty position control works by extending (2) to
task_ratelimit = balanced_dirty_ratelimit * pos_ratio (7)
where pos_ratio is a negative feedback function that subjects to
1) f(setpoint) = 1.0
2) df/dx < 0
That is, if the dirty pages are ABOVE the setpoint, we throttle each
task a bit more HEAVY than balanced_dirty_ratelimit, so that the dirty
pages are created less fast than they are cleaned, thus DROP to the
setpoints (and the reverse).
Based on (7) and the assumption that both dirty_ratelimit and pos_ratio
remains CONSTANT for the past 200ms, we get
task_ratelimit_0 = balanced_dirty_ratelimit * pos_ratio (8)
Putting (8) into (6), we get the formula used in
bdi_update_dirty_ratelimit():
write_bw
balanced_dirty_ratelimit *= pos_ratio * ---------- (9)
dirty_rate
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
2011-06-12 23:51:31 +07:00
|
|
|
unsigned long dirtied_stamp;
|
2010-08-30 00:22:30 +07:00
|
|
|
unsigned long written_stamp; /* pages written at bw_time_stamp */
|
|
|
|
unsigned long write_bandwidth; /* the estimated write bandwidth */
|
|
|
|
unsigned long avg_write_bandwidth; /* further smoothed write bw */
|
|
|
|
|
writeback: dirty rate control
It's all about bdi->dirty_ratelimit, which aims to be (write_bw / N)
when there are N dd tasks.
On write() syscall, use bdi->dirty_ratelimit
============================================
balance_dirty_pages(pages_dirtied)
{
task_ratelimit = bdi->dirty_ratelimit * bdi_position_ratio();
pause = pages_dirtied / task_ratelimit;
sleep(pause);
}
On every 200ms, update bdi->dirty_ratelimit
===========================================
bdi_update_dirty_ratelimit()
{
task_ratelimit = bdi->dirty_ratelimit * bdi_position_ratio();
balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate;
bdi->dirty_ratelimit = balanced_dirty_ratelimit
}
Estimation of balanced bdi->dirty_ratelimit
===========================================
balanced task_ratelimit
-----------------------
balance_dirty_pages() needs to throttle tasks dirtying pages such that
the total amount of dirty pages stays below the specified dirty limit in
order to avoid memory deadlocks. Furthermore we desire fairness in that
tasks get throttled proportionally to the amount of pages they dirty.
IOW we want to throttle tasks such that we match the dirty rate to the
writeout bandwidth, this yields a stable amount of dirty pages:
dirty_rate == write_bw (1)
The fairness requirement gives us:
task_ratelimit = balanced_dirty_ratelimit
== write_bw / N (2)
where N is the number of dd tasks. We don't know N beforehand, but
still can estimate balanced_dirty_ratelimit within 200ms.
Start by throttling each dd task at rate
task_ratelimit = task_ratelimit_0 (3)
(any non-zero initial value is OK)
After 200ms, we measured
dirty_rate = # of pages dirtied by all dd's / 200ms
write_bw = # of pages written to the disk / 200ms
For the aggressive dd dirtiers, the equality holds
dirty_rate == N * task_rate
== N * task_ratelimit_0 (4)
Or
task_ratelimit_0 == dirty_rate / N (5)
Now we conclude that the balanced task ratelimit can be estimated by
write_bw
balanced_dirty_ratelimit = task_ratelimit_0 * ---------- (6)
dirty_rate
Because with (4) and (5) we can get the desired equality (1):
write_bw
balanced_dirty_ratelimit == (dirty_rate / N) * ----------
dirty_rate
== write_bw / N
Then using the balanced task ratelimit we can compute task pause times like:
task_pause = task->nr_dirtied / task_ratelimit
task_ratelimit with position control
------------------------------------
However, while the above gives us means of matching the dirty rate to
the writeout bandwidth, it at best provides us with a stable dirty page
count (assuming a static system). In order to control the dirty page
count such that it is high enough to provide performance, but does not
exceed the specified limit we need another control.
The dirty position control works by extending (2) to
task_ratelimit = balanced_dirty_ratelimit * pos_ratio (7)
where pos_ratio is a negative feedback function that subjects to
1) f(setpoint) = 1.0
2) df/dx < 0
That is, if the dirty pages are ABOVE the setpoint, we throttle each
task a bit more HEAVY than balanced_dirty_ratelimit, so that the dirty
pages are created less fast than they are cleaned, thus DROP to the
setpoints (and the reverse).
Based on (7) and the assumption that both dirty_ratelimit and pos_ratio
remains CONSTANT for the past 200ms, we get
task_ratelimit_0 = balanced_dirty_ratelimit * pos_ratio (8)
Putting (8) into (6), we get the formula used in
bdi_update_dirty_ratelimit():
write_bw
balanced_dirty_ratelimit *= pos_ratio * ---------- (9)
dirty_rate
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
2011-06-12 23:51:31 +07:00
|
|
|
/*
|
|
|
|
* The base dirty throttle rate, re-calculated on every 200ms.
|
|
|
|
* All the bdi tasks' dirty rate will be curbed under it.
|
|
|
|
*/
|
|
|
|
unsigned long dirty_ratelimit;
|
|
|
|
|
2007-10-17 13:25:50 +07:00
|
|
|
struct prop_local_percpu completions;
|
|
|
|
int dirty_exceeded;
|
2008-04-30 14:54:32 +07:00
|
|
|
|
2008-04-30 14:54:35 +07:00
|
|
|
unsigned int min_ratio;
|
2008-04-30 14:54:36 +07:00
|
|
|
unsigned int max_ratio, max_prop_frac;
|
2008-04-30 14:54:35 +07:00
|
|
|
|
2009-09-09 14:08:54 +07:00
|
|
|
struct bdi_writeback wb; /* default writeback info for this bdi */
|
2010-06-20 04:08:06 +07:00
|
|
|
spinlock_t wb_lock; /* protects work_list */
|
2008-04-30 14:54:36 +07:00
|
|
|
|
2009-09-09 14:08:54 +07:00
|
|
|
struct list_head work_list;
|
|
|
|
|
|
|
|
struct device *dev;
|
2009-09-02 14:19:46 +07:00
|
|
|
|
2010-04-06 19:25:14 +07:00
|
|
|
struct timer_list laptop_mode_wb_timer;
|
|
|
|
|
2008-04-30 14:54:36 +07:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
struct dentry *debug_dir;
|
|
|
|
struct dentry *debug_stats;
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2007-10-17 13:25:47 +07:00
|
|
|
int bdi_init(struct backing_dev_info *bdi);
|
|
|
|
void bdi_destroy(struct backing_dev_info *bdi);
|
|
|
|
|
2008-04-30 14:54:32 +07:00
|
|
|
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
|
|
|
|
const char *fmt, ...);
|
|
|
|
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
|
|
|
|
void bdi_unregister(struct backing_dev_info *bdi);
|
2010-04-22 16:37:01 +07:00
|
|
|
int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
|
2010-06-08 23:15:15 +07:00
|
|
|
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
|
|
|
|
void bdi_start_background_writeback(struct backing_dev_info *bdi);
|
2010-06-20 04:08:22 +07:00
|
|
|
int bdi_writeback_thread(void *data);
|
2009-09-09 14:08:54 +07:00
|
|
|
int bdi_has_dirty_io(struct backing_dev_info *bdi);
|
2010-05-22 01:00:35 +07:00
|
|
|
void bdi_arm_supers_timer(void);
|
2010-07-25 18:29:22 +07:00
|
|
|
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
|
2011-04-22 07:19:44 +07:00
|
|
|
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
|
2008-04-30 14:54:32 +07:00
|
|
|
|
2009-09-09 14:08:54 +07:00
|
|
|
extern spinlock_t bdi_lock;
|
2009-09-02 14:19:46 +07:00
|
|
|
extern struct list_head bdi_list;
|
2010-10-27 04:22:03 +07:00
|
|
|
extern struct list_head bdi_pending_list;
|
2009-09-02 14:19:46 +07:00
|
|
|
|
2009-09-09 14:08:54 +07:00
|
|
|
static inline int wb_has_dirty_io(struct bdi_writeback *wb)
|
|
|
|
{
|
|
|
|
return !list_empty(&wb->b_dirty) ||
|
|
|
|
!list_empty(&wb->b_io) ||
|
|
|
|
!list_empty(&wb->b_more_io);
|
|
|
|
}
|
|
|
|
|
2007-10-17 13:25:47 +07:00
|
|
|
static inline void __add_bdi_stat(struct backing_dev_info *bdi,
|
|
|
|
enum bdi_stat_item item, s64 amount)
|
|
|
|
{
|
|
|
|
__percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
|
|
|
|
enum bdi_stat_item item)
|
|
|
|
{
|
|
|
|
__add_bdi_stat(bdi, item, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void inc_bdi_stat(struct backing_dev_info *bdi,
|
|
|
|
enum bdi_stat_item item)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
__inc_bdi_stat(bdi, item);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
|
|
|
|
enum bdi_stat_item item)
|
|
|
|
{
|
|
|
|
__add_bdi_stat(bdi, item, -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dec_bdi_stat(struct backing_dev_info *bdi,
|
|
|
|
enum bdi_stat_item item)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
__dec_bdi_stat(bdi, item);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline s64 bdi_stat(struct backing_dev_info *bdi,
|
|
|
|
enum bdi_stat_item item)
|
|
|
|
{
|
|
|
|
return percpu_counter_read_positive(&bdi->bdi_stat[item]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
|
|
|
|
enum bdi_stat_item item)
|
|
|
|
{
|
|
|
|
return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
|
|
|
|
enum bdi_stat_item item)
|
2007-10-17 13:25:46 +07:00
|
|
|
{
|
2007-10-17 13:25:47 +07:00
|
|
|
s64 sum;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
sum = __bdi_stat_sum(bdi, item);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
|
|
return sum;
|
2007-10-17 13:25:46 +07:00
|
|
|
}
|
|
|
|
|
2008-04-30 14:54:37 +07:00
|
|
|
extern void bdi_writeout_inc(struct backing_dev_info *bdi);
|
|
|
|
|
2007-10-17 13:25:47 +07:00
|
|
|
/*
|
|
|
|
* maximal error of a stat counter.
|
|
|
|
*/
|
|
|
|
static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
|
2007-10-17 13:25:46 +07:00
|
|
|
{
|
2007-10-17 13:25:47 +07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
return nr_cpu_ids * BDI_STAT_BATCH;
|
|
|
|
#else
|
|
|
|
return 1;
|
|
|
|
#endif
|
2007-10-17 13:25:46 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-04-30 14:54:35 +07:00
|
|
|
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
|
2008-04-30 14:54:36 +07:00
|
|
|
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
|
2008-04-30 14:54:35 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Flags in backing_dev_info::capability
|
2008-04-30 14:54:37 +07:00
|
|
|
*
|
|
|
|
* The first three flags control whether dirty pages will contribute to the
|
|
|
|
* VM's accounting and whether writepages() should be called for dirty pages
|
|
|
|
* (something that would not, for example, be appropriate for ramfs)
|
|
|
|
*
|
|
|
|
* WARNING: these flags are closely related and should not normally be
|
|
|
|
* used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
|
|
|
|
* three flags into a single convenience macro.
|
|
|
|
*
|
|
|
|
* BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
|
|
|
|
* BDI_CAP_NO_WRITEBACK: Don't write pages back
|
|
|
|
* BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
|
|
|
|
*
|
|
|
|
* These flags let !MMU mmap() govern direct device mapping vs immediate
|
|
|
|
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
|
|
|
|
*
|
|
|
|
* BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
|
|
|
|
* BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
|
|
|
|
* BDI_CAP_READ_MAP: Can be mapped for reading
|
|
|
|
* BDI_CAP_WRITE_MAP: Can be mapped for writing
|
|
|
|
* BDI_CAP_EXEC_MAP: Can be mapped for execution
|
2008-10-19 10:26:32 +07:00
|
|
|
*
|
|
|
|
* BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2008-04-30 14:54:37 +07:00
|
|
|
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
|
|
|
|
#define BDI_CAP_NO_WRITEBACK 0x00000002
|
|
|
|
#define BDI_CAP_MAP_COPY 0x00000004
|
|
|
|
#define BDI_CAP_MAP_DIRECT 0x00000008
|
|
|
|
#define BDI_CAP_READ_MAP 0x00000010
|
|
|
|
#define BDI_CAP_WRITE_MAP 0x00000020
|
|
|
|
#define BDI_CAP_EXEC_MAP 0x00000040
|
|
|
|
#define BDI_CAP_NO_ACCT_WB 0x00000080
|
2008-10-19 10:26:32 +07:00
|
|
|
#define BDI_CAP_SWAP_BACKED 0x00000100
|
2008-04-30 14:54:37 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#define BDI_CAP_VMFLAGS \
|
|
|
|
(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
|
|
|
|
|
2008-04-30 14:54:37 +07:00
|
|
|
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
|
|
|
|
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#if defined(VM_MAYREAD) && \
|
|
|
|
(BDI_CAP_READ_MAP != VM_MAYREAD || \
|
|
|
|
BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
|
|
|
|
BDI_CAP_EXEC_MAP != VM_MAYEXEC)
|
|
|
|
#error please change backing_dev_info::capabilities flags
|
|
|
|
#endif
|
|
|
|
|
|
|
|
extern struct backing_dev_info default_backing_dev_info;
|
2010-04-25 13:54:42 +07:00
|
|
|
extern struct backing_dev_info noop_backing_dev_info;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
int writeback_in_progress(struct backing_dev_info *bdi);
|
|
|
|
|
|
|
|
static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
|
|
|
|
{
|
|
|
|
if (bdi->congested_fn)
|
|
|
|
return bdi->congested_fn(bdi->congested_data, bdi_bits);
|
|
|
|
return (bdi->state & bdi_bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int bdi_read_congested(struct backing_dev_info *bdi)
|
|
|
|
{
|
2009-04-06 19:48:01 +07:00
|
|
|
return bdi_congested(bdi, 1 << BDI_sync_congested);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int bdi_write_congested(struct backing_dev_info *bdi)
|
|
|
|
{
|
2009-04-06 19:48:01 +07:00
|
|
|
return bdi_congested(bdi, 1 << BDI_async_congested);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int bdi_rw_congested(struct backing_dev_info *bdi)
|
|
|
|
{
|
2009-04-06 19:48:01 +07:00
|
|
|
return bdi_congested(bdi, (1 << BDI_sync_congested) |
|
|
|
|
(1 << BDI_async_congested));
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2009-07-11 21:06:54 +07:00
|
|
|
enum {
|
|
|
|
BLK_RW_ASYNC = 0,
|
|
|
|
BLK_RW_SYNC = 1,
|
|
|
|
};
|
|
|
|
|
2009-07-09 19:52:32 +07:00
|
|
|
void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
|
|
|
|
void set_bdi_congested(struct backing_dev_info *bdi, int sync);
|
|
|
|
long congestion_wait(int sync, long timeout);
|
2010-10-27 04:21:45 +07:00
|
|
|
long wait_iff_congested(struct zone *zone, int sync, long timeout);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-04-30 14:54:37 +07:00
|
|
|
static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
|
|
|
|
{
|
|
|
|
return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
|
|
|
|
{
|
|
|
|
return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-04-30 14:54:37 +07:00
|
|
|
static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
|
|
|
|
{
|
|
|
|
/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
|
|
|
|
return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
|
|
|
|
BDI_CAP_NO_WRITEBACK));
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-10-19 10:26:32 +07:00
|
|
|
static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
|
|
|
|
{
|
|
|
|
return bdi->capabilities & BDI_CAP_SWAP_BACKED;
|
|
|
|
}
|
|
|
|
|
2009-09-09 14:08:54 +07:00
|
|
|
static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
|
|
|
|
{
|
|
|
|
return bdi == &default_backing_dev_info;
|
|
|
|
}
|
|
|
|
|
2008-04-30 14:54:37 +07:00
|
|
|
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
|
|
|
|
{
|
|
|
|
return bdi_cap_writeback_dirty(mapping->backing_dev_info);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-04-30 14:54:37 +07:00
|
|
|
static inline bool mapping_cap_account_dirty(struct address_space *mapping)
|
|
|
|
{
|
|
|
|
return bdi_cap_account_dirty(mapping->backing_dev_info);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-10-19 10:26:32 +07:00
|
|
|
static inline bool mapping_cap_swap_backed(struct address_space *mapping)
|
|
|
|
{
|
|
|
|
return bdi_cap_swap_backed(mapping->backing_dev_info);
|
|
|
|
}
|
|
|
|
|
2009-09-09 14:08:54 +07:00
|
|
|
static inline int bdi_sched_wait(void *word)
|
|
|
|
{
|
|
|
|
schedule();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif /* _LINUX_BACKING_DEV_H */
|