aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/bootmem.c83
1 files changed, 38 insertions, 45 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 9da7d4097810..300d126ec533 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -144,66 +144,59 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
144 144
145static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 145static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
146{ 146{
147 int aligned;
147 struct page *page; 148 struct page *page;
148 unsigned long pfn; 149 unsigned long start, end, pages, count = 0;
149 unsigned long i, count; 150
150 unsigned long idx, pages; 151 if (!bdata->node_bootmem_map)
151 unsigned long *map; 152 return 0;
152 int gofast = 0; 153
153 154 start = PFN_DOWN(bdata->node_boot_start);
154 BUG_ON(!bdata->node_bootmem_map); 155 end = bdata->node_low_pfn;
155 156
156 count = 0;
157 /* first extant page of the node */
158 pfn = PFN_DOWN(bdata->node_boot_start);
159 idx = bdata->node_low_pfn - pfn;
160 map = bdata->node_bootmem_map;
161 /* 157 /*
162 * Check if we are aligned to BITS_PER_LONG pages. If so, we might 158 * If the start is aligned to the machines wordsize, we might
163 * be able to free page orders of that size at once. 159 * be able to free pages in bulks of that order.
164 */ 160 */
165 if (!(pfn & (BITS_PER_LONG-1))) 161 aligned = !(start & (BITS_PER_LONG - 1));
166 gofast = 1; 162
163 bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
164 bdata - bootmem_node_data, start, end, aligned);
165
166 while (start < end) {
167 unsigned long *map, idx, vec;
167 168
168 for (i = 0; i < idx; ) { 169 map = bdata->node_bootmem_map;
169 unsigned long v = ~map[i / BITS_PER_LONG]; 170 idx = start - PFN_DOWN(bdata->node_boot_start);
171 vec = ~map[idx / BITS_PER_LONG];
170 172
171 if (gofast && v == ~0UL) { 173 if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
172 int order; 174 int order = ilog2(BITS_PER_LONG);
173 175
174 page = pfn_to_page(pfn); 176 __free_pages_bootmem(pfn_to_page(start), order);
175 count += BITS_PER_LONG; 177 count += BITS_PER_LONG;
176 order = ffs(BITS_PER_LONG) - 1; 178 } else {
177 __free_pages_bootmem(page, order); 179 unsigned long off = 0;
178 i += BITS_PER_LONG; 180
179 page += BITS_PER_LONG; 181 while (vec && off < BITS_PER_LONG) {
180 } else if (v) { 182 if (vec & 1) {
181 unsigned long m; 183 page = pfn_to_page(start + off);
182
183 page = pfn_to_page(pfn);
184 for (m = 1; m && i < idx; m<<=1, page++, i++) {
185 if (v & m) {
186 count++;
187 __free_pages_bootmem(page, 0); 184 __free_pages_bootmem(page, 0);
185 count++;
188 } 186 }
187 vec >>= 1;
188 off++;
189 } 189 }
190 } else {
191 i += BITS_PER_LONG;
192 } 190 }
193 pfn += BITS_PER_LONG; 191 start += BITS_PER_LONG;
194 } 192 }
195 193
196 /*
197 * Now free the allocator bitmap itself, it's not
198 * needed anymore:
199 */
200 page = virt_to_page(bdata->node_bootmem_map); 194 page = virt_to_page(bdata->node_bootmem_map);
201 pages = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start); 195 pages = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
202 idx = bootmem_bootmap_pages(pages); 196 pages = bootmem_bootmap_pages(pages);
203 for (i = 0; i < idx; i++, page++) 197 count += pages;
204 __free_pages_bootmem(page, 0); 198 while (pages--)
205 count += i; 199 __free_pages_bootmem(page++, 0);
206 bdata->node_bootmem_map = NULL;
207 200
208 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); 201 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
209 202