aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c1052ee79f01..c67ddc464721 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -256,8 +256,9 @@ static void balance_dirty_pages(struct address_space *mapping)
256} 256}
257 257
258/** 258/**
259 * balance_dirty_pages_ratelimited - balance dirty memory state 259 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
260 * @mapping: address_space which was dirtied 260 * @mapping: address_space which was dirtied
261 * @nr_pages: number of pages which the caller has just dirtied
261 * 262 *
262 * Processes which are dirtying memory should call in here once for each page 263 * Processes which are dirtying memory should call in here once for each page
263 * which was newly dirtied. The function will periodically check the system's 264 * which was newly dirtied. The function will periodically check the system's
@@ -268,10 +269,12 @@ static void balance_dirty_pages(struct address_space *mapping)
268 * limit we decrease the ratelimiting by a lot, to prevent individual processes 269 * limit we decrease the ratelimiting by a lot, to prevent individual processes
269 * from overshooting the limit by (ratelimit_pages) each. 270 * from overshooting the limit by (ratelimit_pages) each.
270 */ 271 */
271void balance_dirty_pages_ratelimited(struct address_space *mapping) 272void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
273 unsigned long nr_pages_dirtied)
272{ 274{
273 static DEFINE_PER_CPU(int, ratelimits) = 0; 275 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
274 long ratelimit; 276 unsigned long ratelimit;
277 unsigned long *p;
275 278
276 ratelimit = ratelimit_pages; 279 ratelimit = ratelimit_pages;
277 if (dirty_exceeded) 280 if (dirty_exceeded)
@@ -281,15 +284,18 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
281 * Check the rate limiting. Also, we do not want to throttle real-time 284 * Check the rate limiting. Also, we do not want to throttle real-time
282 * tasks in balance_dirty_pages(). Period. 285 * tasks in balance_dirty_pages(). Period.
283 */ 286 */
284 if (get_cpu_var(ratelimits)++ >= ratelimit) { 287 preempt_disable();
285 __get_cpu_var(ratelimits) = 0; 288 p = &__get_cpu_var(ratelimits);
286 put_cpu_var(ratelimits); 289 *p += nr_pages_dirtied;
290 if (unlikely(*p >= ratelimit)) {
291 *p = 0;
292 preempt_enable();
287 balance_dirty_pages(mapping); 293 balance_dirty_pages(mapping);
288 return; 294 return;
289 } 295 }
290 put_cpu_var(ratelimits); 296 preempt_enable();
291} 297}
292EXPORT_SYMBOL(balance_dirty_pages_ratelimited); 298EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
293 299
294void throttle_vm_writeout(void) 300void throttle_vm_writeout(void)
295{ 301{