diff options
Diffstat (limited to 'include/linux/writeback.h')
| -rw-r--r-- | include/linux/writeback.h | 44 |
1 files changed, 32 insertions, 12 deletions
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 17e7ccc322a5..2b8963ff0f35 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -7,9 +7,28 @@ | |||
| 7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
| 8 | #include <linux/fs.h> | 8 | #include <linux/fs.h> |
| 9 | 9 | ||
| 10 | struct backing_dev_info; | 10 | /* |
| 11 | * The 1/4 region under the global dirty thresh is for smooth dirty throttling: | ||
| 12 | * | ||
| 13 | * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) | ||
| 14 | * | ||
| 15 | * Further beyond, all dirtier tasks will enter a loop waiting (possibly long | ||
| 16 | * time) for the dirty pages to drop, unless written enough pages. | ||
| 17 | * | ||
| 18 | * The global dirty threshold is normally equal to the global dirty limit, | ||
| 19 | * except when the system suddenly allocates a lot of anonymous memory and | ||
| 20 | * knocks down the global dirty threshold quickly, in which case the global | ||
| 21 | * dirty limit will follow down slowly to prevent livelocking all dirtier tasks. | ||
| 22 | */ | ||
| 23 | #define DIRTY_SCOPE 8 | ||
| 24 | #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) | ||
| 11 | 25 | ||
| 12 | extern spinlock_t inode_wb_list_lock; | 26 | /* |
| 27 | * 4MB minimal write chunk size | ||
| 28 | */ | ||
| 29 | #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) | ||
| 30 | |||
| 31 | struct backing_dev_info; | ||
| 13 | 32 | ||
| 14 | /* | 33 | /* |
| 15 | * fs/fs-writeback.c | 34 | * fs/fs-writeback.c |
| @@ -26,11 +45,6 @@ enum writeback_sync_modes { | |||
| 26 | */ | 45 | */ |
| 27 | struct writeback_control { | 46 | struct writeback_control { |
| 28 | enum writeback_sync_modes sync_mode; | 47 | enum writeback_sync_modes sync_mode; |
| 29 | unsigned long *older_than_this; /* If !NULL, only write back inodes | ||
| 30 | older than this */ | ||
| 31 | unsigned long wb_start; /* Time writeback_inodes_wb was | ||
| 32 | called. This is needed to avoid | ||
| 33 | extra jobs and livelock */ | ||
| 34 | long nr_to_write; /* Write this many pages, and decrement | 48 | long nr_to_write; /* Write this many pages, and decrement |
| 35 | this for each page written */ | 49 | this for each page written */ |
| 36 | long pages_skipped; /* Pages which were not written */ | 50 | long pages_skipped; /* Pages which were not written */ |
| @@ -43,13 +57,11 @@ struct writeback_control { | |||
| 43 | loff_t range_start; | 57 | loff_t range_start; |
| 44 | loff_t range_end; | 58 | loff_t range_end; |
| 45 | 59 | ||
| 46 | unsigned nonblocking:1; /* Don't get stuck on request queues */ | ||
| 47 | unsigned encountered_congestion:1; /* An output: a queue is full */ | ||
| 48 | unsigned for_kupdate:1; /* A kupdate writeback */ | 60 | unsigned for_kupdate:1; /* A kupdate writeback */ |
| 49 | unsigned for_background:1; /* A background writeback */ | 61 | unsigned for_background:1; /* A background writeback */ |
| 62 | unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ | ||
| 50 | unsigned for_reclaim:1; /* Invoked from the page allocator */ | 63 | unsigned for_reclaim:1; /* Invoked from the page allocator */ |
| 51 | unsigned range_cyclic:1; /* range_start is cyclic */ | 64 | unsigned range_cyclic:1; /* range_start is cyclic */ |
| 52 | unsigned more_io:1; /* more io to be dispatched */ | ||
| 53 | }; | 65 | }; |
| 54 | 66 | ||
| 55 | /* | 67 | /* |
| @@ -62,8 +74,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr); | |||
| 62 | int writeback_inodes_sb_if_idle(struct super_block *); | 74 | int writeback_inodes_sb_if_idle(struct super_block *); |
| 63 | int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr); | 75 | int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr); |
| 64 | void sync_inodes_sb(struct super_block *); | 76 | void sync_inodes_sb(struct super_block *); |
| 65 | void writeback_inodes_wb(struct bdi_writeback *wb, | 77 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages); |
| 66 | struct writeback_control *wbc); | ||
| 67 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait); | 78 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait); |
| 68 | void wakeup_flusher_threads(long nr_pages); | 79 | void wakeup_flusher_threads(long nr_pages); |
| 69 | 80 | ||
| @@ -94,6 +105,8 @@ static inline void laptop_sync_completion(void) { } | |||
| 94 | #endif | 105 | #endif |
| 95 | void throttle_vm_writeout(gfp_t gfp_mask); | 106 | void throttle_vm_writeout(gfp_t gfp_mask); |
| 96 | 107 | ||
| 108 | extern unsigned long global_dirty_limit; | ||
| 109 | |||
| 97 | /* These are exported to sysctl. */ | 110 | /* These are exported to sysctl. */ |
| 98 | extern int dirty_background_ratio; | 111 | extern int dirty_background_ratio; |
| 99 | extern unsigned long dirty_background_bytes; | 112 | extern unsigned long dirty_background_bytes; |
| @@ -128,6 +141,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); | |||
| 128 | unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, | 141 | unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, |
| 129 | unsigned long dirty); | 142 | unsigned long dirty); |
| 130 | 143 | ||
| 144 | void __bdi_update_bandwidth(struct backing_dev_info *bdi, | ||
| 145 | unsigned long thresh, | ||
| 146 | unsigned long dirty, | ||
| 147 | unsigned long bdi_thresh, | ||
| 148 | unsigned long bdi_dirty, | ||
| 149 | unsigned long start_time); | ||
| 150 | |||
| 131 | void page_writeback_init(void); | 151 | void page_writeback_init(void); |
| 132 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | 152 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
| 133 | unsigned long nr_pages_dirtied); | 153 | unsigned long nr_pages_dirtied); |
