aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-27 11:42:53 -0400
committerLuis Henriques <luis.henriques@canonical.com>2012-05-25 12:24:47 -0400
commit33e74f387676ebb12b2aab8efa98e2af7a8d2c28 (patch)
tree5260d0cb14dcf7b2c3599ff6e02f4ff7f8bdcf9c
parenta5ef3b2f52f3f965417bafb57a0f68cf078e7211 (diff)
percpu: pcpu_embed_first_chunk() should free unused parts after all allocs are complete
BugLink: http://bugs.launchpad.net/bugs/1002880 commit 42b64281453249dac52861f9b97d18552a7ec62b upstream. pcpu_embed_first_chunk() allocates memory for each node, copies percpu data and frees unused portions of it before proceeding to the next group. This assumes that allocations for different nodes doesn't overlap; however, depending on memory topology, the bootmem allocator may end up allocating memory from a different node than the requested one which may overlap with the portion freed from one of the previous percpu areas. This leads to percpu groups for different nodes overlapping which is a serious bug. This patch separates out copy & partial free from the allocation loop such that all allocations are complete before partial frees happen. This also fixes overlapping frees which could happen on allocation failure path - out_free_areas path frees whole groups but the groups could have portions freed at that point. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: "Pavel V. Panteleev" <pp_84@mail.ru> Tested-by: "Pavel V. Panteleev" <pp_84@mail.ru> LKML-Reference: <E1SNhwY-0007ui-V7.pp_84-mail-ru@f220.mail.ru> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
-rw-r--r--mm/percpu.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 0ae7a09141e..af0cc7a58f9 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1630,6 +1630,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1630 areas[group] = ptr; 1630 areas[group] = ptr;
1631 1631
1632 base = min(ptr, base); 1632 base = min(ptr, base);
1633 }
1634
1635 /*
1636 * Copy data and free unused parts. This should happen after all
1637 * allocations are complete; otherwise, we may end up with
1638 * overlapping groups.
1639 */
1640 for (group = 0; group < ai->nr_groups; group++) {
1641 struct pcpu_group_info *gi = &ai->groups[group];
1642 void *ptr = areas[group];
1633 1643
1634 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1644 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1635 if (gi->cpu_map[i] == NR_CPUS) { 1645 if (gi->cpu_map[i] == NR_CPUS) {