aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2010-08-11 17:17:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-12 11:43:29 -0400
commit16c4042f08919f447d6b2a55679546c9b97c7264 (patch)
tree0248b64d46237854ebe67efe8c742cb5878d8611 /mm/page-writeback.c
parente50e37201ae2e7d6a52e87815759e6481f0bcfb9 (diff)
writeback: avoid unnecessary calculation of bdi dirty thresholds
Split get_dirty_limits() into global_dirty_limits()+bdi_dirty_limit(), so that the latter can be avoided when under global dirty background threshold (which is the normal state for most systems). Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c75
1 files changed, 38 insertions, 37 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2cf69a5e46e6..1ea13ef350a8 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -267,10 +267,11 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
267 * 267 *
268 * dirty -= (dirty/8) * p_{t} 268 * dirty -= (dirty/8) * p_{t}
269 */ 269 */
270static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty) 270static unsigned long task_dirty_limit(struct task_struct *tsk,
271 unsigned long bdi_dirty)
271{ 272{
272 long numerator, denominator; 273 long numerator, denominator;
273 unsigned long dirty = *pdirty; 274 unsigned long dirty = bdi_dirty;
274 u64 inv = dirty >> 3; 275 u64 inv = dirty >> 3;
275 276
276 task_dirties_fraction(tsk, &numerator, &denominator); 277 task_dirties_fraction(tsk, &numerator, &denominator);
@@ -278,10 +279,8 @@ static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
278 do_div(inv, denominator); 279 do_div(inv, denominator);
279 280
280 dirty -= inv; 281 dirty -= inv;
281 if (dirty < *pdirty/2)
282 dirty = *pdirty/2;
283 282
284 *pdirty = dirty; 283 return max(dirty, bdi_dirty/2);
285} 284}
286 285
287/* 286/*
@@ -391,9 +390,7 @@ unsigned long determine_dirtyable_memory(void)
391 return x + 1; /* Ensure that we never return 0 */ 390 return x + 1; /* Ensure that we never return 0 */
392} 391}
393 392
394void 393void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
395get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
396 unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
397{ 394{
398 unsigned long background; 395 unsigned long background;
399 unsigned long dirty; 396 unsigned long dirty;
@@ -425,26 +422,28 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
425 } 422 }
426 *pbackground = background; 423 *pbackground = background;
427 *pdirty = dirty; 424 *pdirty = dirty;
425}
428 426
429 if (bdi) { 427unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
430 u64 bdi_dirty; 428 unsigned long dirty)
431 long numerator, denominator; 429{
430 u64 bdi_dirty;
431 long numerator, denominator;
432 432
433 /* 433 /*
434 * Calculate this BDI's share of the dirty ratio. 434 * Calculate this BDI's share of the dirty ratio.
435 */ 435 */
436 bdi_writeout_fraction(bdi, &numerator, &denominator); 436 bdi_writeout_fraction(bdi, &numerator, &denominator);
437 437
438 bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; 438 bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
439 bdi_dirty *= numerator; 439 bdi_dirty *= numerator;
440 do_div(bdi_dirty, denominator); 440 do_div(bdi_dirty, denominator);
441 bdi_dirty += (dirty * bdi->min_ratio) / 100;
442 if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
443 bdi_dirty = dirty * bdi->max_ratio / 100;
444 441
445 *pbdi_dirty = bdi_dirty; 442 bdi_dirty += (dirty * bdi->min_ratio) / 100;
446 task_dirty_limit(current, pbdi_dirty); 443 if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
447 } 444 bdi_dirty = dirty * bdi->max_ratio / 100;
445
446 return bdi_dirty;
448} 447}
449 448
450/* 449/*
@@ -475,13 +474,24 @@ static void balance_dirty_pages(struct address_space *mapping,
475 .range_cyclic = 1, 474 .range_cyclic = 1,
476 }; 475 };
477 476
478 get_dirty_limits(&background_thresh, &dirty_thresh,
479 &bdi_thresh, bdi);
480
481 nr_reclaimable = global_page_state(NR_FILE_DIRTY) + 477 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
482 global_page_state(NR_UNSTABLE_NFS); 478 global_page_state(NR_UNSTABLE_NFS);
483 nr_writeback = global_page_state(NR_WRITEBACK); 479 nr_writeback = global_page_state(NR_WRITEBACK);
484 480
481 global_dirty_limits(&background_thresh, &dirty_thresh);
482
483 /*
484 * Throttle it only when the background writeback cannot
485 * catch-up. This avoids (excessively) small writeouts
486 * when the bdi limits are ramping up.
487 */
488 if (nr_reclaimable + nr_writeback <
489 (background_thresh + dirty_thresh) / 2)
490 break;
491
492 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
493 bdi_thresh = task_dirty_limit(current, bdi_thresh);
494
485 /* 495 /*
486 * In order to avoid the stacked BDI deadlock we need 496 * In order to avoid the stacked BDI deadlock we need
487 * to ensure we accurately count the 'dirty' pages when 497 * to ensure we accurately count the 'dirty' pages when
@@ -513,15 +523,6 @@ static void balance_dirty_pages(struct address_space *mapping,
513 if (!dirty_exceeded) 523 if (!dirty_exceeded)
514 break; 524 break;
515 525
516 /*
517 * Throttle it only when the background writeback cannot
518 * catch-up. This avoids (excessively) small writeouts
519 * when the bdi limits are ramping up.
520 */
521 if (nr_reclaimable + nr_writeback <
522 (background_thresh + dirty_thresh) / 2)
523 break;
524
525 if (!bdi->dirty_exceeded) 526 if (!bdi->dirty_exceeded)
526 bdi->dirty_exceeded = 1; 527 bdi->dirty_exceeded = 1;
527 528
@@ -634,7 +635,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
634 unsigned long dirty_thresh; 635 unsigned long dirty_thresh;
635 636
636 for ( ; ; ) { 637 for ( ; ; ) {
637 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); 638 global_dirty_limits(&background_thresh, &dirty_thresh);
638 639
639 /* 640 /*
640 * Boost the allowable dirty threshold a bit for page 641 * Boost the allowable dirty threshold a bit for page