diff options
author | Andrew Morton <akpm@osdl.org> | 2006-03-24 06:18:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-24 10:33:26 -0500 |
commit | fa5a734e406b53761fcc5ee22366006f71112c2d (patch) | |
tree | 003a238b9207e38f747bfb119a30fb52f1cd5ae9 | |
parent | 8a14342683b1e3adcf5f78660a42fcbd95b44a35 (diff) |
[PATCH] balance_dirty_pages_ratelimited: take nr_pages arg
Modify balance_dirty_pages_ratelimited() so that it can take a
number-of-pages-which-I-just-dirtied argument. For msync().
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/writeback.h | 10 | ||||
-rw-r--r-- | mm/page-writeback.c | 24 |
2 files changed, 24 insertions, 10 deletions
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 609565961494..56f92fcbe94a 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -99,7 +99,15 @@ int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *, | |||
99 | void __user *, size_t *, loff_t *); | 99 | void __user *, size_t *, loff_t *); |
100 | 100 | ||
101 | void page_writeback_init(void); | 101 | void page_writeback_init(void); |
102 | void balance_dirty_pages_ratelimited(struct address_space *mapping); | 102 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
103 | unsigned long nr_pages_dirtied); | ||
104 | |||
105 | static inline void | ||
106 | balance_dirty_pages_ratelimited(struct address_space *mapping) | ||
107 | { | ||
108 | balance_dirty_pages_ratelimited_nr(mapping, 1); | ||
109 | } | ||
110 | |||
103 | int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); | 111 | int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); |
104 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); | 112 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); |
105 | int sync_page_range(struct inode *inode, struct address_space *mapping, | 113 | int sync_page_range(struct inode *inode, struct address_space *mapping, |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c1052ee79f01..c67ddc464721 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -256,8 +256,9 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | /** | 258 | /** |
259 | * balance_dirty_pages_ratelimited - balance dirty memory state | 259 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state |
260 | * @mapping: address_space which was dirtied | 260 | * @mapping: address_space which was dirtied |
261 | * @nr_pages: number of pages which the caller has just dirtied | ||
261 | * | 262 | * |
262 | * Processes which are dirtying memory should call in here once for each page | 263 | * Processes which are dirtying memory should call in here once for each page |
263 | * which was newly dirtied. The function will periodically check the system's | 264 | * which was newly dirtied. The function will periodically check the system's |
@@ -268,10 +269,12 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
268 | * limit we decrease the ratelimiting by a lot, to prevent individual processes | 269 | * limit we decrease the ratelimiting by a lot, to prevent individual processes |
269 | * from overshooting the limit by (ratelimit_pages) each. | 270 | * from overshooting the limit by (ratelimit_pages) each. |
270 | */ | 271 | */ |
271 | void balance_dirty_pages_ratelimited(struct address_space *mapping) | 272 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
273 | unsigned long nr_pages_dirtied) | ||
272 | { | 274 | { |
273 | static DEFINE_PER_CPU(int, ratelimits) = 0; | 275 | static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; |
274 | long ratelimit; | 276 | unsigned long ratelimit; |
277 | unsigned long *p; | ||
275 | 278 | ||
276 | ratelimit = ratelimit_pages; | 279 | ratelimit = ratelimit_pages; |
277 | if (dirty_exceeded) | 280 | if (dirty_exceeded) |
@@ -281,15 +284,18 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping) | |||
281 | * Check the rate limiting. Also, we do not want to throttle real-time | 284 | * Check the rate limiting. Also, we do not want to throttle real-time |
282 | * tasks in balance_dirty_pages(). Period. | 285 | * tasks in balance_dirty_pages(). Period. |
283 | */ | 286 | */ |
284 | if (get_cpu_var(ratelimits)++ >= ratelimit) { | 287 | preempt_disable(); |
285 | __get_cpu_var(ratelimits) = 0; | 288 | p = &__get_cpu_var(ratelimits); |
286 | put_cpu_var(ratelimits); | 289 | *p += nr_pages_dirtied; |
290 | if (unlikely(*p >= ratelimit)) { | ||
291 | *p = 0; | ||
292 | preempt_enable(); | ||
287 | balance_dirty_pages(mapping); | 293 | balance_dirty_pages(mapping); |
288 | return; | 294 | return; |
289 | } | 295 | } |
290 | put_cpu_var(ratelimits); | 296 | preempt_enable(); |
291 | } | 297 | } |
292 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited); | 298 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); |
293 | 299 | ||
294 | void throttle_vm_writeout(void) | 300 | void throttle_vm_writeout(void) |
295 | { | 301 | { |