aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2012-01-10 18:06:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 19:30:41 -0500
commit1edf223485c42c99655dcd001db1e46ad5e5d2d7 (patch)
tree33b93dc8f2a249806150b5792ac1787688bf6b74 /mm/page-writeback.c
parente4e11180dfa545233e5145919b75b7fac88638df (diff)
mm/page-writeback.c: make determine_dirtyable_memory static again
The tracing ring-buffer used this function briefly, but not anymore. Make it local to the writeback code again. Also, move the function so that no forward declaration needs to be reintroduced. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c122
1 files changed, 60 insertions, 62 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 8616ef3025a4..c081bf62202b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -130,6 +130,66 @@ unsigned long global_dirty_limit;
130static struct prop_descriptor vm_completions; 130static struct prop_descriptor vm_completions;
131 131
132/* 132/*
133 * Work out the current dirty-memory clamping and background writeout
134 * thresholds.
135 *
136 * The main aim here is to lower them aggressively if there is a lot of mapped
137 * memory around. To avoid stressing page reclaim with lots of unreclaimable
138 * pages. It is better to clamp down on writers than to start swapping, and
139 * performing lots of scanning.
140 *
141 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
142 *
143 * We don't permit the clamping level to fall below 5% - that is getting rather
144 * excessive.
145 *
146 * We make sure that the background writeout level is below the adjusted
147 * clamping level.
148 */
149static unsigned long highmem_dirtyable_memory(unsigned long total)
150{
151#ifdef CONFIG_HIGHMEM
152 int node;
153 unsigned long x = 0;
154
155 for_each_node_state(node, N_HIGH_MEMORY) {
156 struct zone *z =
157 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
158
159 x += zone_page_state(z, NR_FREE_PAGES) +
160 zone_reclaimable_pages(z);
161 }
162 /*
163 * Make sure that the number of highmem pages is never larger
164 * than the number of the total dirtyable memory. This can only
165 * occur in very strange VM situations but we want to make sure
166 * that this does not occur.
167 */
168 return min(x, total);
169#else
170 return 0;
171#endif
172}
173
174/**
175 * determine_dirtyable_memory - amount of memory that may be used
176 *
177 * Returns the numebr of pages that can currently be freed and used
178 * by the kernel for direct mappings.
179 */
180static unsigned long determine_dirtyable_memory(void)
181{
182 unsigned long x;
183
184 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
185
186 if (!vm_highmem_is_dirtyable)
187 x -= highmem_dirtyable_memory(x);
188
189 return x + 1; /* Ensure that we never return 0 */
190}
191
192/*
133 * couple the period to the dirty_ratio: 193 * couple the period to the dirty_ratio:
134 * 194 *
135 * period/2 ~ roundup_pow_of_two(dirty limit) 195 * period/2 ~ roundup_pow_of_two(dirty limit)
@@ -196,7 +256,6 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
196 return ret; 256 return ret;
197} 257}
198 258
199
200int dirty_bytes_handler(struct ctl_table *table, int write, 259int dirty_bytes_handler(struct ctl_table *table, int write,
201 void __user *buffer, size_t *lenp, 260 void __user *buffer, size_t *lenp,
202 loff_t *ppos) 261 loff_t *ppos)
@@ -291,67 +350,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
291} 350}
292EXPORT_SYMBOL(bdi_set_max_ratio); 351EXPORT_SYMBOL(bdi_set_max_ratio);
293 352
294/*
295 * Work out the current dirty-memory clamping and background writeout
296 * thresholds.
297 *
298 * The main aim here is to lower them aggressively if there is a lot of mapped
299 * memory around. To avoid stressing page reclaim with lots of unreclaimable
300 * pages. It is better to clamp down on writers than to start swapping, and
301 * performing lots of scanning.
302 *
303 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
304 *
305 * We don't permit the clamping level to fall below 5% - that is getting rather
306 * excessive.
307 *
308 * We make sure that the background writeout level is below the adjusted
309 * clamping level.
310 */
311
312static unsigned long highmem_dirtyable_memory(unsigned long total)
313{
314#ifdef CONFIG_HIGHMEM
315 int node;
316 unsigned long x = 0;
317
318 for_each_node_state(node, N_HIGH_MEMORY) {
319 struct zone *z =
320 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
321
322 x += zone_page_state(z, NR_FREE_PAGES) +
323 zone_reclaimable_pages(z);
324 }
325 /*
326 * Make sure that the number of highmem pages is never larger
327 * than the number of the total dirtyable memory. This can only
328 * occur in very strange VM situations but we want to make sure
329 * that this does not occur.
330 */
331 return min(x, total);
332#else
333 return 0;
334#endif
335}
336
337/**
338 * determine_dirtyable_memory - amount of memory that may be used
339 *
340 * Returns the numebr of pages that can currently be freed and used
341 * by the kernel for direct mappings.
342 */
343unsigned long determine_dirtyable_memory(void)
344{
345 unsigned long x;
346
347 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
348
349 if (!vm_highmem_is_dirtyable)
350 x -= highmem_dirtyable_memory(x);
351
352 return x + 1; /* Ensure that we never return 0 */
353}
354
355static unsigned long dirty_freerun_ceiling(unsigned long thresh, 353static unsigned long dirty_freerun_ceiling(unsigned long thresh,
356 unsigned long bg_thresh) 354 unsigned long bg_thresh)
357{ 355{