aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2012-01-10 18:08:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 19:30:45 -0500
commit799f933a82d878d7f15215473c5561ce984ada75 (patch)
treea096ac8de8da31385026c3d51eec292815438ac4 /mm
parent560a036b3a3733e33424385c0a0c799dee454d05 (diff)
mm: bootmem: try harder to free pages in bulk
The loop that frees pages to the page allocator while bootstrapping tries to free higher-order blocks only when the starting address is aligned to that block size. Otherwise it will free all pages on that node one-by-one. Change it to free individual pages up to the first aligned block and then try higher-order frees from there. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 1aea171539ac..668e94df8cf2 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -171,7 +171,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
171 171
172static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 172static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
173{ 173{
174 int aligned;
175 struct page *page; 174 struct page *page;
176 unsigned long start, end, pages, count = 0; 175 unsigned long start, end, pages, count = 0;
177 176
@@ -181,14 +180,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
181 start = bdata->node_min_pfn; 180 start = bdata->node_min_pfn;
182 end = bdata->node_low_pfn; 181 end = bdata->node_low_pfn;
183 182
184 /* 183 bdebug("nid=%td start=%lx end=%lx\n",
185 * If the start is aligned to the machines wordsize, we might 184 bdata - bootmem_node_data, start, end);
186 * be able to free pages in bulks of that order.
187 */
188 aligned = !(start & (BITS_PER_LONG - 1));
189
190 bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
191 bdata - bootmem_node_data, start, end, aligned);
192 185
193 while (start < end) { 186 while (start < end) {
194 unsigned long *map, idx, vec; 187 unsigned long *map, idx, vec;
@@ -196,12 +189,17 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
196 map = bdata->node_bootmem_map; 189 map = bdata->node_bootmem_map;
197 idx = start - bdata->node_min_pfn; 190 idx = start - bdata->node_min_pfn;
198 vec = ~map[idx / BITS_PER_LONG]; 191 vec = ~map[idx / BITS_PER_LONG];
199 192 /*
200 if (aligned && vec == ~0UL) { 193 * If we have a properly aligned and fully unreserved
194 * BITS_PER_LONG block of pages in front of us, free
195 * it in one go.
196 */
197 if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
201 int order = ilog2(BITS_PER_LONG); 198 int order = ilog2(BITS_PER_LONG);
202 199
203 __free_pages_bootmem(pfn_to_page(start), order); 200 __free_pages_bootmem(pfn_to_page(start), order);
204 count += BITS_PER_LONG; 201 count += BITS_PER_LONG;
202 start += BITS_PER_LONG;
205 } else { 203 } else {
206 unsigned long off = 0; 204 unsigned long off = 0;
207 205
@@ -214,8 +212,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
214 vec >>= 1; 212 vec >>= 1;
215 off++; 213 off++;
216 } 214 }
215 start = ALIGN(start + 1, BITS_PER_LONG);
217 } 216 }
218 start += BITS_PER_LONG;
219 } 217 }
220 218
221 page = virt_to_page(bdata->node_bootmem_map); 219 page = virt_to_page(bdata->node_bootmem_map);