aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-08-15 16:06:06 -0400
committerTejun Heo <tj@kernel.org>2014-08-15 16:06:06 -0400
commitf0d279654dea22b7a6ad34b9334aee80cda62cde (patch)
treea4f81f1c04ffced7ac5a5630487d27d575a181c8
parentc9d26423e56ce1ab4d786f92aebecf859d419293 (diff)
percpu: fix pcpu_alloc_pages() failure path
When pcpu_alloc_pages() fails midway, pcpu_free_pages() is invoked to free what has already been allocated. The invocation is across the whole requested range and pcpu_free_pages() will try to free all non-NULL pages; unfortunately, this is incorrect as pcpu_get_pages_and_bitmap(), unlike what its comment suggests, doesn't clear the pages array and thus the array may have entries from the previous invocations making the partial failure path free incorrect pages. Fix it by open-coding the partial freeing of the already allocated pages. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: stable@vger.kernel.org
-rw-r--r--mm/percpu-vm.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 3707c71ae4cd..8d9bb2c00c68 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
108 int page_start, int page_end) 108 int page_start, int page_end)
109{ 109{
110 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; 110 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
111 unsigned int cpu; 111 unsigned int cpu, tcpu;
112 int i; 112 int i;
113 113
114 for_each_possible_cpu(cpu) { 114 for_each_possible_cpu(cpu) {
@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
116 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; 116 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
117 117
118 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); 118 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
119 if (!*pagep) { 119 if (!*pagep)
120 pcpu_free_pages(chunk, pages, populated, 120 goto err;
121 page_start, page_end);
122 return -ENOMEM;
123 }
124 } 121 }
125 } 122 }
126 return 0; 123 return 0;
124
125err:
126 while (--i >= page_start)
127 __free_page(pages[pcpu_page_idx(cpu, i)]);
128
129 for_each_possible_cpu(tcpu) {
130 if (tcpu == cpu)
131 break;
132 for (i = page_start; i < page_end; i++)
133 __free_page(pages[pcpu_page_idx(tcpu, i)]);
134 }
135 return -ENOMEM;
127} 136}
128 137
129/** 138/**