aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2011-01-13 18:45:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:31 -0500
commitb44129b30652c8771db2265939bb8b463724043d (patch)
treed5b669ff4faea020b03e894706f49d5d1ae56907 /mm
parent88f5acf88ae6a9778f6d25d0d5d7ec2d57764a97 (diff)
mm: vmstat: use a single setter function and callback for adjusting percpu thresholds
reduce_pgdat_percpu_threshold() and restore_pgdat_percpu_threshold() exist to adjust the per-cpu vmstat thresholds while kswapd is awake to avoid errors due to counter drift. The functions duplicate some code so this patch replaces them with a single set_pgdat_percpu_threshold() that takes a callback function to calculate the desired threshold as a parameter. [akpm@linux-foundation.org: readability tweak] [kosaki.motohiro@jp.fujitsu.com: set_pgdat_percpu_threshold(): don't use for_each_online_cpu] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c19
-rw-r--r--mm/vmstat.c36
2 files changed, 24 insertions, 31 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5da4295e7d67..86f8c3418795 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2448,9 +2448,24 @@ static int kswapd(void *p)
2448 */ 2448 */
2449 if (!sleeping_prematurely(pgdat, order, remaining)) { 2449 if (!sleeping_prematurely(pgdat, order, remaining)) {
2450 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 2450 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2451 restore_pgdat_percpu_threshold(pgdat); 2451
2452 /*
2453 * vmstat counters are not perfectly
2454 * accurate and the estimated value
2455 * for counters such as NR_FREE_PAGES
2456 * can deviate from the true value by
2457 * nr_online_cpus * threshold. To
2458 * avoid the zone watermarks being
2459 * breached while under pressure, we
2460 * reduce the per-cpu vmstat threshold
2461 * while kswapd is awake and restore
2462 * them before going back to sleep.
2463 */
2464 set_pgdat_percpu_threshold(pgdat,
2465 calculate_normal_threshold);
2452 schedule(); 2466 schedule();
2453 reduce_pgdat_percpu_threshold(pgdat); 2467 set_pgdat_percpu_threshold(pgdat,
2468 calculate_pressure_threshold);
2454 } else { 2469 } else {
2455 if (remaining) 2470 if (remaining)
2456 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2471 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index bc0f095791b4..751a65e00aac 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -83,7 +83,7 @@ EXPORT_SYMBOL(vm_stat);
83 83
84#ifdef CONFIG_SMP 84#ifdef CONFIG_SMP
85 85
86static int calculate_pressure_threshold(struct zone *zone) 86int calculate_pressure_threshold(struct zone *zone)
87{ 87{
88 int threshold; 88 int threshold;
89 int watermark_distance; 89 int watermark_distance;
@@ -107,7 +107,7 @@ static int calculate_pressure_threshold(struct zone *zone)
107 return threshold; 107 return threshold;
108} 108}
109 109
110static int calculate_threshold(struct zone *zone) 110int calculate_normal_threshold(struct zone *zone)
111{ 111{
112 int threshold; 112 int threshold;
113 int mem; /* memory in 128 MB units */ 113 int mem; /* memory in 128 MB units */
@@ -166,7 +166,7 @@ static void refresh_zone_stat_thresholds(void)
166 for_each_populated_zone(zone) { 166 for_each_populated_zone(zone) {
167 unsigned long max_drift, tolerate_drift; 167 unsigned long max_drift, tolerate_drift;
168 168
169 threshold = calculate_threshold(zone); 169 threshold = calculate_normal_threshold(zone);
170 170
171 for_each_online_cpu(cpu) 171 for_each_online_cpu(cpu)
172 per_cpu_ptr(zone->pageset, cpu)->stat_threshold 172 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
@@ -185,46 +185,24 @@ static void refresh_zone_stat_thresholds(void)
185 } 185 }
186} 186}
187 187
188void reduce_pgdat_percpu_threshold(pg_data_t *pgdat) 188void set_pgdat_percpu_threshold(pg_data_t *pgdat,
189 int (*calculate_pressure)(struct zone *))
189{ 190{
190 struct zone *zone; 191 struct zone *zone;
191 int cpu; 192 int cpu;
192 int threshold; 193 int threshold;
193 int i; 194 int i;
194 195
195 get_online_cpus();
196 for (i = 0; i < pgdat->nr_zones; i++) {
197 zone = &pgdat->node_zones[i];
198 if (!zone->percpu_drift_mark)
199 continue;
200
201 threshold = calculate_pressure_threshold(zone);
202 for_each_online_cpu(cpu)
203 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
204 = threshold;
205 }
206 put_online_cpus();
207}
208
209void restore_pgdat_percpu_threshold(pg_data_t *pgdat)
210{
211 struct zone *zone;
212 int cpu;
213 int threshold;
214 int i;
215
216 get_online_cpus();
217 for (i = 0; i < pgdat->nr_zones; i++) { 196 for (i = 0; i < pgdat->nr_zones; i++) {
218 zone = &pgdat->node_zones[i]; 197 zone = &pgdat->node_zones[i];
219 if (!zone->percpu_drift_mark) 198 if (!zone->percpu_drift_mark)
220 continue; 199 continue;
221 200
222 threshold = calculate_threshold(zone); 201 threshold = (*calculate_pressure)(zone);
223 for_each_online_cpu(cpu) 202 for_each_possible_cpu(cpu)
224 per_cpu_ptr(zone->pageset, cpu)->stat_threshold 203 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
225 = threshold; 204 = threshold;
226 } 205 }
227 put_online_cpus();
228} 206}
229 207
230/* 208/*