aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2012-01-10 18:07:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 19:30:43 -0500
commitccafa2879fb8d13b8031337a8743eac4189e5d6e (patch)
tree0202fd26218faba5751de0906c430f422b0ccbac /mm/page-writeback.c
parentab8fabd46f811d5153d8a0cd2fac9a0d41fb593d (diff)
mm: writeback: cleanups in preparation for per-zone dirty limits
The next patch will introduce per-zone dirty limiting functions in addition to the traditional global dirty limiting. Rename determine_dirtyable_memory() to global_dirtyable_memory() before adding the zone-specific version, and fix up its documentation. Also, move the functions to determine the dirtyable memory and the function to calculate the dirty limit based on that together so that their relationship is more apparent and that they can be commented on as a group. Signed-off-by: Johannes Weiner <jweiner@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: Mel Gorman <mel@suse.de> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Jan Kara <jack@suse.cz> Cc: Shaohua Li <shaohua.li@intel.com> Cc: Rik van Riel <riel@redhat.com> Cc: Chris Mason <chris.mason@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c93
1 files changed, 47 insertions, 46 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 9ab6de82d8e6..433fa990fe8b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -146,6 +146,7 @@ static struct prop_descriptor vm_completions;
146 * We make sure that the background writeout level is below the adjusted 146 * We make sure that the background writeout level is below the adjusted
147 * clamping level. 147 * clamping level.
148 */ 148 */
149
149static unsigned long highmem_dirtyable_memory(unsigned long total) 150static unsigned long highmem_dirtyable_memory(unsigned long total)
150{ 151{
151#ifdef CONFIG_HIGHMEM 152#ifdef CONFIG_HIGHMEM
@@ -172,12 +173,12 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
172} 173}
173 174
174/** 175/**
175 * determine_dirtyable_memory - amount of memory that may be used 176 * global_dirtyable_memory - number of globally dirtyable pages
176 * 177 *
177 * Returns the numebr of pages that can currently be freed and used 178 * Returns the global number of pages potentially available for dirty
178 * by the kernel for direct mappings. 179 * page cache. This is the base value for the global dirty limits.
179 */ 180 */
180static unsigned long determine_dirtyable_memory(void) 181unsigned long global_dirtyable_memory(void)
181{ 182{
182 unsigned long x; 183 unsigned long x;
183 184
@@ -191,6 +192,47 @@ static unsigned long determine_dirtyable_memory(void)
191} 192}
192 193
193/* 194/*
195 * global_dirty_limits - background-writeback and dirty-throttling thresholds
196 *
197 * Calculate the dirty thresholds based on sysctl parameters
198 * - vm.dirty_background_ratio or vm.dirty_background_bytes
199 * - vm.dirty_ratio or vm.dirty_bytes
200 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
201 * real-time tasks.
202 */
203void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
204{
205 unsigned long background;
206 unsigned long dirty;
207 unsigned long uninitialized_var(available_memory);
208 struct task_struct *tsk;
209
210 if (!vm_dirty_bytes || !dirty_background_bytes)
211 available_memory = global_dirtyable_memory();
212
213 if (vm_dirty_bytes)
214 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
215 else
216 dirty = (vm_dirty_ratio * available_memory) / 100;
217
218 if (dirty_background_bytes)
219 background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
220 else
221 background = (dirty_background_ratio * available_memory) / 100;
222
223 if (background >= dirty)
224 background = dirty / 2;
225 tsk = current;
226 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
227 background += background / 4;
228 dirty += dirty / 4;
229 }
230 *pbackground = background;
231 *pdirty = dirty;
232 trace_global_dirty_state(background, dirty);
233}
234
235/*
194 * couple the period to the dirty_ratio: 236 * couple the period to the dirty_ratio:
195 * 237 *
196 * period/2 ~ roundup_pow_of_two(dirty limit) 238 * period/2 ~ roundup_pow_of_two(dirty limit)
@@ -202,7 +244,7 @@ static int calc_period_shift(void)
202 if (vm_dirty_bytes) 244 if (vm_dirty_bytes)
203 dirty_total = vm_dirty_bytes / PAGE_SIZE; 245 dirty_total = vm_dirty_bytes / PAGE_SIZE;
204 else 246 else
205 dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / 247 dirty_total = (vm_dirty_ratio * global_dirtyable_memory()) /
206 100; 248 100;
207 return 2 + ilog2(dirty_total - 1); 249 return 2 + ilog2(dirty_total - 1);
208} 250}
@@ -362,47 +404,6 @@ static unsigned long hard_dirty_limit(unsigned long thresh)
362 return max(thresh, global_dirty_limit); 404 return max(thresh, global_dirty_limit);
363} 405}
364 406
365/*
366 * global_dirty_limits - background-writeback and dirty-throttling thresholds
367 *
368 * Calculate the dirty thresholds based on sysctl parameters
369 * - vm.dirty_background_ratio or vm.dirty_background_bytes
370 * - vm.dirty_ratio or vm.dirty_bytes
371 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
372 * real-time tasks.
373 */
374void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
375{
376 unsigned long background;
377 unsigned long dirty;
378 unsigned long uninitialized_var(available_memory);
379 struct task_struct *tsk;
380
381 if (!vm_dirty_bytes || !dirty_background_bytes)
382 available_memory = determine_dirtyable_memory();
383
384 if (vm_dirty_bytes)
385 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
386 else
387 dirty = (vm_dirty_ratio * available_memory) / 100;
388
389 if (dirty_background_bytes)
390 background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
391 else
392 background = (dirty_background_ratio * available_memory) / 100;
393
394 if (background >= dirty)
395 background = dirty / 2;
396 tsk = current;
397 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
398 background += background / 4;
399 dirty += dirty / 4;
400 }
401 *pbackground = background;
402 *pdirty = dirty;
403 trace_global_dirty_state(background, dirty);
404}
405
406/** 407/**
407 * bdi_dirty_limit - @bdi's share of dirty throttling threshold 408 * bdi_dirty_limit - @bdi's share of dirty throttling threshold
408 * @bdi: the backing_dev_info to query 409 * @bdi: the backing_dev_info to query