aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-01 00:27:34 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-01 14:39:08 -0400
commita302eb4e4602d6444ae75a0e516fb2f2c62d6642 (patch)
tree8f477d121e8bd31611b1ae3c3658f5c71684f788
parentb63fe1ba4409774738c971d4e6f0b12b54cc2c65 (diff)
[PATCH] ZVC: Overstep counters
Increments and decrements are usually grouped rather than mixed. We can optimize the inc and dec functions for that case. Increment and decrement the counters by 50% more than the threshold in those cases and set the differential accordingly. This decreases the need to update the atomic counters. The idea came originally from Andrew Morton. The overstepping alone was sufficient to address the contention issue found when updating the global and the per zone counters from 160 processors. Also remove some code in dec_zone_page_state. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/vmstat.c20
1 files changed, 5 insertions, 15 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index dfdf24133901..3799a0f7543a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -190,8 +190,8 @@ static void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
190 (*p)++; 190 (*p)++;
191 191
192 if (unlikely(*p > STAT_THRESHOLD)) { 192 if (unlikely(*p > STAT_THRESHOLD)) {
193 zone_page_state_add(*p, zone, item); 193 zone_page_state_add(*p + STAT_THRESHOLD / 2, zone, item);
194 *p = 0; 194 *p = -STAT_THRESHOLD / 2;
195 } 195 }
196} 196}
197 197
@@ -209,8 +209,8 @@ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
209 (*p)--; 209 (*p)--;
210 210
211 if (unlikely(*p < -STAT_THRESHOLD)) { 211 if (unlikely(*p < -STAT_THRESHOLD)) {
212 zone_page_state_add(*p, zone, item); 212 zone_page_state_add(*p - STAT_THRESHOLD / 2, zone, item);
213 *p = 0; 213 *p = STAT_THRESHOLD /2;
214 } 214 }
215} 215}
216EXPORT_SYMBOL(__dec_zone_page_state); 216EXPORT_SYMBOL(__dec_zone_page_state);
@@ -239,19 +239,9 @@ EXPORT_SYMBOL(inc_zone_page_state);
239void dec_zone_page_state(struct page *page, enum zone_stat_item item) 239void dec_zone_page_state(struct page *page, enum zone_stat_item item)
240{ 240{
241 unsigned long flags; 241 unsigned long flags;
242 struct zone *zone;
243 s8 *p;
244 242
245 zone = page_zone(page);
246 local_irq_save(flags); 243 local_irq_save(flags);
247 p = diff_pointer(zone, item); 244 __dec_zone_page_state(page, item);
248
249 (*p)--;
250
251 if (unlikely(*p < -STAT_THRESHOLD)) {
252 zone_page_state_add(*p, zone, item);
253 *p = 0;
254 }
255 local_irq_restore(flags); 245 local_irq_restore(flags);
256} 246}
257EXPORT_SYMBOL(dec_zone_page_state); 247EXPORT_SYMBOL(dec_zone_page_state);