diff options
author | Andrew Morton <akpm@osdl.org> | 2006-03-24 06:18:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-24 10:33:26 -0500 |
commit | fa5a734e406b53761fcc5ee22366006f71112c2d (patch) | |
tree | 003a238b9207e38f747bfb119a30fb52f1cd5ae9 /mm/page-writeback.c | |
parent | 8a14342683b1e3adcf5f78660a42fcbd95b44a35 (diff) |
[PATCH] balance_dirty_pages_ratelimited: take nr_pages arg
Modify balance_dirty_pages_ratelimited() so that it can take a
number-of-pages-which-I-just-dirtied argument. For msync().
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c1052ee79f01..c67ddc464721 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -256,8 +256,9 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | /** | 258 | /** |
259 | * balance_dirty_pages_ratelimited - balance dirty memory state | 259 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state |
260 | * @mapping: address_space which was dirtied | 260 | * @mapping: address_space which was dirtied |
261 | * @nr_pages: number of pages which the caller has just dirtied | ||
261 | * | 262 | * |
262 | * Processes which are dirtying memory should call in here once for each page | 263 | * Processes which are dirtying memory should call in here once for each page |
263 | * which was newly dirtied. The function will periodically check the system's | 264 | * which was newly dirtied. The function will periodically check the system's |
@@ -268,10 +269,12 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
268 | * limit we decrease the ratelimiting by a lot, to prevent individual processes | 269 | * limit we decrease the ratelimiting by a lot, to prevent individual processes |
269 | * from overshooting the limit by (ratelimit_pages) each. | 270 | * from overshooting the limit by (ratelimit_pages) each. |
270 | */ | 271 | */ |
271 | void balance_dirty_pages_ratelimited(struct address_space *mapping) | 272 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
273 | unsigned long nr_pages_dirtied) | ||
272 | { | 274 | { |
273 | static DEFINE_PER_CPU(int, ratelimits) = 0; | 275 | static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; |
274 | long ratelimit; | 276 | unsigned long ratelimit; |
277 | unsigned long *p; | ||
275 | 278 | ||
276 | ratelimit = ratelimit_pages; | 279 | ratelimit = ratelimit_pages; |
277 | if (dirty_exceeded) | 280 | if (dirty_exceeded) |
@@ -281,15 +284,18 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping) | |||
281 | * Check the rate limiting. Also, we do not want to throttle real-time | 284 | * Check the rate limiting. Also, we do not want to throttle real-time |
282 | * tasks in balance_dirty_pages(). Period. | 285 | * tasks in balance_dirty_pages(). Period. |
283 | */ | 286 | */ |
284 | if (get_cpu_var(ratelimits)++ >= ratelimit) { | 287 | preempt_disable(); |
285 | __get_cpu_var(ratelimits) = 0; | 288 | p = &__get_cpu_var(ratelimits); |
286 | put_cpu_var(ratelimits); | 289 | *p += nr_pages_dirtied; |
290 | if (unlikely(*p >= ratelimit)) { | ||
291 | *p = 0; | ||
292 | preempt_enable(); | ||
287 | balance_dirty_pages(mapping); | 293 | balance_dirty_pages(mapping); |
288 | return; | 294 | return; |
289 | } | 295 | } |
290 | put_cpu_var(ratelimits); | 296 | preempt_enable(); |
291 | } | 297 | } |
292 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited); | 298 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); |
293 | 299 | ||
294 | void throttle_vm_writeout(void) | 300 | void throttle_vm_writeout(void) |
295 | { | 301 | { |