summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorCody P Schafer <cody@linux.vnet.ibm.com>2013-07-03 18:01:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 19:07:27 -0400
commit0a647f3811d6af56405a819341ceac23e31d4572 (patch)
treeba96d8341d2de9379d5acbb799cb6da6b5a16b41 /mm/page_alloc.c
parent998d39cb236fe464af86a3492a24d2f67ee1efc2 (diff)
mm/page_alloc: convert zone_pcp_update() to rely on memory barriers instead of stop_machine()
zone_pcp_update()'s goal is to adjust the ->high and ->mark members of a percpu pageset based on a zone's ->managed_pages. We don't need to drain the entire percpu pageset just to modify these fields. This lets us avoid calling setup_pageset() (and the draining required to call it) and instead allows simply setting the fields' values (with some attention paid to memory barriers to prevent the relationship between ->batch and ->high from being thrown off). This does change the behavior of zone_pcp_update() as the percpu pagesets will not be drained when zone_pcp_update() is called (they will end up being shrunk, not completely drained, later when a 0-order page is freed in free_hot_cold_page()). Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com> Cc: Gilad Ben-Yossef <gilad@benyossef.com> Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c33
1 files changed, 9 insertions, 24 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 97b8f861e63d..8125263be60f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6085,33 +6085,18 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
6085#endif 6085#endif
6086 6086
6087#ifdef CONFIG_MEMORY_HOTPLUG 6087#ifdef CONFIG_MEMORY_HOTPLUG
6088static int __meminit __zone_pcp_update(void *data) 6088/*
6089{ 6089 * The zone indicated has a new number of managed_pages; batch sizes and percpu
6090 struct zone *zone = data; 6090 * page high values need to be recalulated.
6091 int cpu; 6091 */
6092 unsigned long batch = zone_batchsize(zone), flags;
6093
6094 for_each_possible_cpu(cpu) {
6095 struct per_cpu_pageset *pset;
6096 struct per_cpu_pages *pcp;
6097
6098 pset = per_cpu_ptr(zone->pageset, cpu);
6099 pcp = &pset->pcp;
6100
6101 local_irq_save(flags);
6102 if (pcp->count > 0)
6103 free_pcppages_bulk(zone, pcp->count, pcp);
6104 drain_zonestat(zone, pset);
6105 setup_pageset(pset, batch);
6106 local_irq_restore(flags);
6107 }
6108 return 0;
6109}
6110
6111void __meminit zone_pcp_update(struct zone *zone) 6092void __meminit zone_pcp_update(struct zone *zone)
6112{ 6093{
6094 unsigned cpu;
6095 unsigned long batch;
6113 mutex_lock(&pcp_batch_high_lock); 6096 mutex_lock(&pcp_batch_high_lock);
6114 stop_machine(__zone_pcp_update, zone, NULL); 6097 batch = zone_batchsize(zone);
6098 for_each_possible_cpu(cpu)
6099 pageset_set_batch(per_cpu_ptr(zone->pageset, cpu), batch);
6115 mutex_unlock(&pcp_batch_high_lock); 6100 mutex_unlock(&pcp_batch_high_lock);
6116} 6101}
6117#endif 6102#endif