aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2010-08-11 17:17:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-12 11:43:30 -0400
commit1babe18385d3976043c04237ce837f3736197eb4 (patch)
treec766bb0022ec5188cd7e991fc1f9ad51687e8aca /mm/page-writeback.c
parent16c4042f08919f447d6b2a55679546c9b97c7264 (diff)
writeback: add comment to the dirty limit functions
Document global_dirty_limits() and bdi_dirty_limit(). Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c31
1 files changed, 28 insertions, 3 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 1ea13ef350a8..20890d80c7ef 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -261,11 +261,18 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
261} 261}
262 262
263/* 263/*
264 * scale the dirty limit 264 * task_dirty_limit - scale down dirty throttling threshold for one task
265 * 265 *
266 * task specific dirty limit: 266 * task specific dirty limit:
267 * 267 *
268 * dirty -= (dirty/8) * p_{t} 268 * dirty -= (dirty/8) * p_{t}
269 *
270 * To protect light/slow dirtying tasks from heavier/fast ones, we start
271 * throttling individual tasks before reaching the bdi dirty limit.
272 * Relatively low thresholds will be allocated to heavy dirtiers. So when
273 * dirty pages grow large, heavy dirtiers will be throttled first, which will
274 * effectively curb the growth of dirty pages. Light dirtiers with high enough
275 * dirty threshold may never get throttled.
269 */ 276 */
270static unsigned long task_dirty_limit(struct task_struct *tsk, 277static unsigned long task_dirty_limit(struct task_struct *tsk,
271 unsigned long bdi_dirty) 278 unsigned long bdi_dirty)
@@ -390,6 +397,15 @@ unsigned long determine_dirtyable_memory(void)
390 return x + 1; /* Ensure that we never return 0 */ 397 return x + 1; /* Ensure that we never return 0 */
391} 398}
392 399
400/**
401 * global_dirty_limits - background-writeback and dirty-throttling thresholds
402 *
403 * Calculate the dirty thresholds based on sysctl parameters
404 * - vm.dirty_background_ratio or vm.dirty_background_bytes
405 * - vm.dirty_ratio or vm.dirty_bytes
406 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
407 * runtime tasks.
408 */
393void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) 409void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
394{ 410{
395 unsigned long background; 411 unsigned long background;
@@ -424,8 +440,17 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
424 *pdirty = dirty; 440 *pdirty = dirty;
425} 441}
426 442
427unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, 443/**
428 unsigned long dirty) 444 * bdi_dirty_limit - @bdi's share of dirty throttling threshold
445 *
446 * Allocate high/low dirty limits to fast/slow devices, in order to prevent
447 * - starving fast devices
448 * - piling up dirty pages (that will take long time to sync) on slow devices
449 *
450 * The bdi's share of dirty limit will be adapting to its throughput and
451 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
452 */
453unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
429{ 454{
430 u64 bdi_dirty; 455 u64 bdi_dirty;
431 long numerator, denominator; 456 long numerator, denominator;