aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/writeback.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/writeback.h')
-rw-r--r--include/linux/writeback.h55
1 files changed, 43 insertions, 12 deletions
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 17e7ccc322a5..f1bfa12ea246 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -7,9 +7,39 @@
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/fs.h> 8#include <linux/fs.h>
9 9
10struct backing_dev_info; 10/*
11 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
12 *
13 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
14 *
15 * The 1/16 region above the global dirty limit will be put to maximum pauses:
16 *
17 * (limit, limit + limit/DIRTY_MAXPAUSE_AREA)
18 *
19 * The 1/16 region above the max-pause region, dirty exceeded bdi's will be put
20 * to loops:
21 *
22 * (limit + limit/DIRTY_MAXPAUSE_AREA, limit + limit/DIRTY_PASSGOOD_AREA)
23 *
24 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
25 * time) for the dirty pages to drop, unless written enough pages.
26 *
27 * The global dirty threshold is normally equal to the global dirty limit,
28 * except when the system suddenly allocates a lot of anonymous memory and
29 * knocks down the global dirty threshold quickly, in which case the global
30 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
31 */
32#define DIRTY_SCOPE 8
33#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
34#define DIRTY_MAXPAUSE_AREA 16
35#define DIRTY_PASSGOOD_AREA 8
11 36
12extern spinlock_t inode_wb_list_lock; 37/*
38 * 4MB minimal write chunk size
39 */
40#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
41
42struct backing_dev_info;
13 43
14/* 44/*
15 * fs/fs-writeback.c 45 * fs/fs-writeback.c
@@ -26,11 +56,6 @@ enum writeback_sync_modes {
26 */ 56 */
27struct writeback_control { 57struct writeback_control {
28 enum writeback_sync_modes sync_mode; 58 enum writeback_sync_modes sync_mode;
29 unsigned long *older_than_this; /* If !NULL, only write back inodes
30 older than this */
31 unsigned long wb_start; /* Time writeback_inodes_wb was
32 called. This is needed to avoid
33 extra jobs and livelock */
34 long nr_to_write; /* Write this many pages, and decrement 59 long nr_to_write; /* Write this many pages, and decrement
35 this for each page written */ 60 this for each page written */
36 long pages_skipped; /* Pages which were not written */ 61 long pages_skipped; /* Pages which were not written */
@@ -43,13 +68,11 @@ struct writeback_control {
43 loff_t range_start; 68 loff_t range_start;
44 loff_t range_end; 69 loff_t range_end;
45 70
46 unsigned nonblocking:1; /* Don't get stuck on request queues */
47 unsigned encountered_congestion:1; /* An output: a queue is full */
48 unsigned for_kupdate:1; /* A kupdate writeback */ 71 unsigned for_kupdate:1; /* A kupdate writeback */
49 unsigned for_background:1; /* A background writeback */ 72 unsigned for_background:1; /* A background writeback */
73 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
50 unsigned for_reclaim:1; /* Invoked from the page allocator */ 74 unsigned for_reclaim:1; /* Invoked from the page allocator */
51 unsigned range_cyclic:1; /* range_start is cyclic */ 75 unsigned range_cyclic:1; /* range_start is cyclic */
52 unsigned more_io:1; /* more io to be dispatched */
53}; 76};
54 77
55/* 78/*
@@ -62,8 +85,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr);
62int writeback_inodes_sb_if_idle(struct super_block *); 85int writeback_inodes_sb_if_idle(struct super_block *);
63int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr); 86int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr);
64void sync_inodes_sb(struct super_block *); 87void sync_inodes_sb(struct super_block *);
65void writeback_inodes_wb(struct bdi_writeback *wb, 88long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages);
66 struct writeback_control *wbc);
67long wb_do_writeback(struct bdi_writeback *wb, int force_wait); 89long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
68void wakeup_flusher_threads(long nr_pages); 90void wakeup_flusher_threads(long nr_pages);
69 91
@@ -94,6 +116,8 @@ static inline void laptop_sync_completion(void) { }
94#endif 116#endif
95void throttle_vm_writeout(gfp_t gfp_mask); 117void throttle_vm_writeout(gfp_t gfp_mask);
96 118
119extern unsigned long global_dirty_limit;
120
97/* These are exported to sysctl. */ 121/* These are exported to sysctl. */
98extern int dirty_background_ratio; 122extern int dirty_background_ratio;
99extern unsigned long dirty_background_bytes; 123extern unsigned long dirty_background_bytes;
@@ -128,6 +152,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
128unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, 152unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
129 unsigned long dirty); 153 unsigned long dirty);
130 154
155void __bdi_update_bandwidth(struct backing_dev_info *bdi,
156 unsigned long thresh,
157 unsigned long dirty,
158 unsigned long bdi_thresh,
159 unsigned long bdi_dirty,
160 unsigned long start_time);
161
131void page_writeback_init(void); 162void page_writeback_init(void);
132void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, 163void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
133 unsigned long nr_pages_dirtied); 164 unsigned long nr_pages_dirtied);