diff options
author | Cody P Schafer <cody@linux.vnet.ibm.com> | 2013-07-03 18:01:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 19:07:27 -0400 |
commit | 169f6c1999ca6d0c5e06e8d810817ed3d1ebf017 (patch) | |
tree | 7b09525d53302021a32ce8f3a6671ee3428be614 /mm/page_alloc.c | |
parent | 3664033c56f211a3dcf28d9d68c604ed447d8d79 (diff) |
mm/page_alloc: don't re-init pageset in zone_pcp_update()
When memory hotplug is triggered, we call pageset_init() on
per-cpu-pagesets which both contain pages and are in use, causing both the
leakage of those pages and (potentially) bad behaviour if a page is
allocated from a pageset while it is being cleared.
Avoid this by factoring out pageset_set_high_and_batch() (which contains
all needed logic too set a pageset's ->high and ->batch inrespective of
system state) from zone_pageset_init() and using the new
pageset_set_high_and_batch() instead of zone_pageset_init() in
zone_pcp_update().
Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 03a3f943d98e..fab9506273be 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4104,11 +4104,9 @@ static void pageset_set_high(struct per_cpu_pageset *p, | |||
4104 | pageset_update(&p->pcp, high, batch); | 4104 | pageset_update(&p->pcp, high, batch); |
4105 | } | 4105 | } |
4106 | 4106 | ||
4107 | static void __meminit zone_pageset_init(struct zone *zone, int cpu) | 4107 | static void __meminit pageset_set_high_and_batch(struct zone *zone, |
4108 | struct per_cpu_pageset *pcp) | ||
4108 | { | 4109 | { |
4109 | struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); | ||
4110 | |||
4111 | pageset_init(pcp); | ||
4112 | if (percpu_pagelist_fraction) | 4110 | if (percpu_pagelist_fraction) |
4113 | pageset_set_high(pcp, | 4111 | pageset_set_high(pcp, |
4114 | (zone->managed_pages / | 4112 | (zone->managed_pages / |
@@ -4117,6 +4115,14 @@ static void __meminit zone_pageset_init(struct zone *zone, int cpu) | |||
4117 | pageset_set_batch(pcp, zone_batchsize(zone)); | 4115 | pageset_set_batch(pcp, zone_batchsize(zone)); |
4118 | } | 4116 | } |
4119 | 4117 | ||
4118 | static void __meminit zone_pageset_init(struct zone *zone, int cpu) | ||
4119 | { | ||
4120 | struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); | ||
4121 | |||
4122 | pageset_init(pcp); | ||
4123 | pageset_set_high_and_batch(zone, pcp); | ||
4124 | } | ||
4125 | |||
4120 | static void __meminit setup_zone_pageset(struct zone *zone) | 4126 | static void __meminit setup_zone_pageset(struct zone *zone) |
4121 | { | 4127 | { |
4122 | int cpu; | 4128 | int cpu; |
@@ -6100,7 +6106,8 @@ void __meminit zone_pcp_update(struct zone *zone) | |||
6100 | unsigned cpu; | 6106 | unsigned cpu; |
6101 | mutex_lock(&pcp_batch_high_lock); | 6107 | mutex_lock(&pcp_batch_high_lock); |
6102 | for_each_possible_cpu(cpu) | 6108 | for_each_possible_cpu(cpu) |
6103 | zone_pageset_init(zone, cpu); | 6109 | pageset_set_high_and_batch(zone, |
6110 | per_cpu_ptr(zone->pageset, cpu)); | ||
6104 | mutex_unlock(&pcp_batch_high_lock); | 6111 | mutex_unlock(&pcp_batch_high_lock); |
6105 | } | 6112 | } |
6106 | #endif | 6113 | #endif |