aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2013-01-11 17:31:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-01-11 17:54:55 -0500
commit10d73e655cef6e86ea8589dca3df4e495e4900b0 (patch)
treea6bb3c7f80ec2a4b7eb65e714d6fe4426be26fcc /mm
parentc060f943d0929f3e429c5d9522290584f6281d6e (diff)
mm: bootmem: fix free_all_bootmem_core() with odd bitmap alignment
Currently free_all_bootmem_core ignores that node_min_pfn may be not multiple of BITS_PER_LONG. Eg commit 6dccdcbe2c3e ("mm: bootmem: fix checking the bitmap when finally freeing bootmem") shifts vec by lower bits of start instead of lower bits of idx. Also if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) assumes that vec bit 0 corresponds to start pfn, which is only true when node_min_pfn is a multiple of BITS_PER_LONG. Also loop in the else clause can double-free pages (e.g. with node_min_pfn == start == 1, map[0] == ~0 on 32-bit machine page 32 will be double-freed). This bug causes the following message during xtensa kernel boot: bootmem::free_all_bootmem_core nid=0 start=1 end=8000 BUG: Bad page state in process swapper pfn:00001 page:d04bd020 count:0 mapcount:-127 mapping: (null) index:0x2 page flags: 0x0() Call Trace: bad_page+0x8c/0x9c free_pages_prepare+0x5e/0x88 free_hot_cold_page+0xc/0xa0 __free_pages+0x24/0x38 __free_pages_bootmem+0x54/0x56 free_all_bootmem_core$part$11+0xeb/0x138 free_all_bootmem+0x46/0x58 mem_init+0x25/0xa4 start_kernel+0x11e/0x25c should_never_return+0x0/0x3be7 The fix is the following: - always align vec so that its bit 0 corresponds to start - provide BITS_PER_LONG bits in vec, if those bits are available in the map - don't free pages past next start position in the else clause. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Cc: Gavin Shan <shangw@linux.vnet.ibm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Tejun Heo <tj@kernel.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Prasad Koya <prasad.koya@gmail.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 1324cd74faec..b93376c39b61 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
185 185
186 while (start < end) { 186 while (start < end) {
187 unsigned long *map, idx, vec; 187 unsigned long *map, idx, vec;
188 unsigned shift;
188 189
189 map = bdata->node_bootmem_map; 190 map = bdata->node_bootmem_map;
190 idx = start - bdata->node_min_pfn; 191 idx = start - bdata->node_min_pfn;
192 shift = idx & (BITS_PER_LONG - 1);
193 /*
194 * vec holds at most BITS_PER_LONG map bits,
195 * bit 0 corresponds to start.
196 */
191 vec = ~map[idx / BITS_PER_LONG]; 197 vec = ~map[idx / BITS_PER_LONG];
198
199 if (shift) {
200 vec >>= shift;
201 if (end - start >= BITS_PER_LONG)
202 vec |= ~map[idx / BITS_PER_LONG + 1] <<
203 (BITS_PER_LONG - shift);
204 }
192 /* 205 /*
193 * If we have a properly aligned and fully unreserved 206 * If we have a properly aligned and fully unreserved
194 * BITS_PER_LONG block of pages in front of us, free 207 * BITS_PER_LONG block of pages in front of us, free
@@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
201 count += BITS_PER_LONG; 214 count += BITS_PER_LONG;
202 start += BITS_PER_LONG; 215 start += BITS_PER_LONG;
203 } else { 216 } else {
204 unsigned long off = 0; 217 unsigned long cur = start;
205 218
206 vec >>= start & (BITS_PER_LONG - 1); 219 start = ALIGN(start + 1, BITS_PER_LONG);
207 while (vec) { 220 while (vec && cur != start) {
208 if (vec & 1) { 221 if (vec & 1) {
209 page = pfn_to_page(start + off); 222 page = pfn_to_page(cur);
210 __free_pages_bootmem(page, 0); 223 __free_pages_bootmem(page, 0);
211 count++; 224 count++;
212 } 225 }
213 vec >>= 1; 226 vec >>= 1;
214 off++; 227 ++cur;
215 } 228 }
216 start = ALIGN(start + 1, BITS_PER_LONG);
217 } 229 }
218 } 230 }
219 231