diff options
Diffstat (limited to 'mm/bootmem.c')
-rw-r--r-- | mm/bootmem.c | 400 |
1 files changed, 400 insertions, 0 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c new file mode 100644 index 000000000000..260e703850d8 --- /dev/null +++ b/mm/bootmem.c | |||
@@ -0,0 +1,400 @@ | |||
1 | /* | ||
2 | * linux/mm/bootmem.c | ||
3 | * | ||
4 | * Copyright (C) 1999 Ingo Molnar | ||
5 | * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 | ||
6 | * | ||
7 | * simple boot-time physical memory area allocator and | ||
8 | * free memory collector. It's used to deal with reserved | ||
9 | * system memory and memory holes as well. | ||
10 | */ | ||
11 | |||
12 | #include <linux/mm.h> | ||
13 | #include <linux/kernel_stat.h> | ||
14 | #include <linux/swap.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/bootmem.h> | ||
18 | #include <linux/mmzone.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <asm/dma.h> | ||
21 | #include <asm/io.h> | ||
22 | #include "internal.h" | ||
23 | |||
24 | /* | ||
25 | * Access to this subsystem has to be serialized externally. (this is | ||
26 | * true for the boot process anyway) | ||
27 | */ | ||
28 | unsigned long max_low_pfn; | ||
29 | unsigned long min_low_pfn; | ||
30 | unsigned long max_pfn; | ||
31 | |||
32 | EXPORT_SYMBOL(max_pfn); /* This is exported so | ||
33 | * dma_get_required_mask(), which uses | ||
34 | * it, can be an inline function */ | ||
35 | |||
36 | /* return the number of _pages_ that will be allocated for the boot bitmap */ | ||
37 | unsigned long __init bootmem_bootmap_pages (unsigned long pages) | ||
38 | { | ||
39 | unsigned long mapsize; | ||
40 | |||
41 | mapsize = (pages+7)/8; | ||
42 | mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK; | ||
43 | mapsize >>= PAGE_SHIFT; | ||
44 | |||
45 | return mapsize; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Called once to set up the allocator itself. | ||
50 | */ | ||
51 | static unsigned long __init init_bootmem_core (pg_data_t *pgdat, | ||
52 | unsigned long mapstart, unsigned long start, unsigned long end) | ||
53 | { | ||
54 | bootmem_data_t *bdata = pgdat->bdata; | ||
55 | unsigned long mapsize = ((end - start)+7)/8; | ||
56 | |||
57 | pgdat->pgdat_next = pgdat_list; | ||
58 | pgdat_list = pgdat; | ||
59 | |||
60 | mapsize = (mapsize + (sizeof(long) - 1UL)) & ~(sizeof(long) - 1UL); | ||
61 | bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); | ||
62 | bdata->node_boot_start = (start << PAGE_SHIFT); | ||
63 | bdata->node_low_pfn = end; | ||
64 | |||
65 | /* | ||
66 | * Initially all pages are reserved - setup_arch() has to | ||
67 | * register free RAM areas explicitly. | ||
68 | */ | ||
69 | memset(bdata->node_bootmem_map, 0xff, mapsize); | ||
70 | |||
71 | return mapsize; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Marks a particular physical memory range as unallocatable. Usable RAM | ||
76 | * might be used for boot-time allocations - or it might get added | ||
77 | * to the free page pool later on. | ||
78 | */ | ||
79 | static void __init reserve_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size) | ||
80 | { | ||
81 | unsigned long i; | ||
82 | /* | ||
83 | * round up, partially reserved pages are considered | ||
84 | * fully reserved. | ||
85 | */ | ||
86 | unsigned long sidx = (addr - bdata->node_boot_start)/PAGE_SIZE; | ||
87 | unsigned long eidx = (addr + size - bdata->node_boot_start + | ||
88 | PAGE_SIZE-1)/PAGE_SIZE; | ||
89 | unsigned long end = (addr + size + PAGE_SIZE-1)/PAGE_SIZE; | ||
90 | |||
91 | BUG_ON(!size); | ||
92 | BUG_ON(sidx >= eidx); | ||
93 | BUG_ON((addr >> PAGE_SHIFT) >= bdata->node_low_pfn); | ||
94 | BUG_ON(end > bdata->node_low_pfn); | ||
95 | |||
96 | for (i = sidx; i < eidx; i++) | ||
97 | if (test_and_set_bit(i, bdata->node_bootmem_map)) { | ||
98 | #ifdef CONFIG_DEBUG_BOOTMEM | ||
99 | printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE); | ||
100 | #endif | ||
101 | } | ||
102 | } | ||
103 | |||
104 | static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size) | ||
105 | { | ||
106 | unsigned long i; | ||
107 | unsigned long start; | ||
108 | /* | ||
109 | * round down end of usable mem, partially free pages are | ||
110 | * considered reserved. | ||
111 | */ | ||
112 | unsigned long sidx; | ||
113 | unsigned long eidx = (addr + size - bdata->node_boot_start)/PAGE_SIZE; | ||
114 | unsigned long end = (addr + size)/PAGE_SIZE; | ||
115 | |||
116 | BUG_ON(!size); | ||
117 | BUG_ON(end > bdata->node_low_pfn); | ||
118 | |||
119 | if (addr < bdata->last_success) | ||
120 | bdata->last_success = addr; | ||
121 | |||
122 | /* | ||
123 | * Round up the beginning of the address. | ||
124 | */ | ||
125 | start = (addr + PAGE_SIZE-1) / PAGE_SIZE; | ||
126 | sidx = start - (bdata->node_boot_start/PAGE_SIZE); | ||
127 | |||
128 | for (i = sidx; i < eidx; i++) { | ||
129 | if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) | ||
130 | BUG(); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * We 'merge' subsequent allocations to save space. We might 'lose' | ||
136 | * some fraction of a page if allocations cannot be satisfied due to | ||
137 | * size constraints on boxes where there is physical RAM space | ||
138 | * fragmentation - in these cases (mostly large memory boxes) this | ||
139 | * is not a problem. | ||
140 | * | ||
141 | * On low memory boxes we get it right in 100% of the cases. | ||
142 | * | ||
143 | * alignment has to be a power of 2 value. | ||
144 | * | ||
145 | * NOTE: This function is _not_ reentrant. | ||
146 | */ | ||
147 | static void * __init | ||
148 | __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, | ||
149 | unsigned long align, unsigned long goal) | ||
150 | { | ||
151 | unsigned long offset, remaining_size, areasize, preferred; | ||
152 | unsigned long i, start = 0, incr, eidx; | ||
153 | void *ret; | ||
154 | |||
155 | if(!size) { | ||
156 | printk("__alloc_bootmem_core(): zero-sized request\n"); | ||
157 | BUG(); | ||
158 | } | ||
159 | BUG_ON(align & (align-1)); | ||
160 | |||
161 | eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT); | ||
162 | offset = 0; | ||
163 | if (align && | ||
164 | (bdata->node_boot_start & (align - 1UL)) != 0) | ||
165 | offset = (align - (bdata->node_boot_start & (align - 1UL))); | ||
166 | offset >>= PAGE_SHIFT; | ||
167 | |||
168 | /* | ||
169 | * We try to allocate bootmem pages above 'goal' | ||
170 | * first, then we try to allocate lower pages. | ||
171 | */ | ||
172 | if (goal && (goal >= bdata->node_boot_start) && | ||
173 | ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) { | ||
174 | preferred = goal - bdata->node_boot_start; | ||
175 | |||
176 | if (bdata->last_success >= preferred) | ||
177 | preferred = bdata->last_success; | ||
178 | } else | ||
179 | preferred = 0; | ||
180 | |||
181 | preferred = ((preferred + align - 1) & ~(align - 1)) >> PAGE_SHIFT; | ||
182 | preferred += offset; | ||
183 | areasize = (size+PAGE_SIZE-1)/PAGE_SIZE; | ||
184 | incr = align >> PAGE_SHIFT ? : 1; | ||
185 | |||
186 | restart_scan: | ||
187 | for (i = preferred; i < eidx; i += incr) { | ||
188 | unsigned long j; | ||
189 | i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i); | ||
190 | i = ALIGN(i, incr); | ||
191 | if (test_bit(i, bdata->node_bootmem_map)) | ||
192 | continue; | ||
193 | for (j = i + 1; j < i + areasize; ++j) { | ||
194 | if (j >= eidx) | ||
195 | goto fail_block; | ||
196 | if (test_bit (j, bdata->node_bootmem_map)) | ||
197 | goto fail_block; | ||
198 | } | ||
199 | start = i; | ||
200 | goto found; | ||
201 | fail_block: | ||
202 | i = ALIGN(j, incr); | ||
203 | } | ||
204 | |||
205 | if (preferred > offset) { | ||
206 | preferred = offset; | ||
207 | goto restart_scan; | ||
208 | } | ||
209 | return NULL; | ||
210 | |||
211 | found: | ||
212 | bdata->last_success = start << PAGE_SHIFT; | ||
213 | BUG_ON(start >= eidx); | ||
214 | |||
215 | /* | ||
216 | * Is the next page of the previous allocation-end the start | ||
217 | * of this allocation's buffer? If yes then we can 'merge' | ||
218 | * the previous partial page with this allocation. | ||
219 | */ | ||
220 | if (align < PAGE_SIZE && | ||
221 | bdata->last_offset && bdata->last_pos+1 == start) { | ||
222 | offset = (bdata->last_offset+align-1) & ~(align-1); | ||
223 | BUG_ON(offset > PAGE_SIZE); | ||
224 | remaining_size = PAGE_SIZE-offset; | ||
225 | if (size < remaining_size) { | ||
226 | areasize = 0; | ||
227 | /* last_pos unchanged */ | ||
228 | bdata->last_offset = offset+size; | ||
229 | ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset + | ||
230 | bdata->node_boot_start); | ||
231 | } else { | ||
232 | remaining_size = size - remaining_size; | ||
233 | areasize = (remaining_size+PAGE_SIZE-1)/PAGE_SIZE; | ||
234 | ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset + | ||
235 | bdata->node_boot_start); | ||
236 | bdata->last_pos = start+areasize-1; | ||
237 | bdata->last_offset = remaining_size; | ||
238 | } | ||
239 | bdata->last_offset &= ~PAGE_MASK; | ||
240 | } else { | ||
241 | bdata->last_pos = start + areasize - 1; | ||
242 | bdata->last_offset = size & ~PAGE_MASK; | ||
243 | ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start); | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Reserve the area now: | ||
248 | */ | ||
249 | for (i = start; i < start+areasize; i++) | ||
250 | if (unlikely(test_and_set_bit(i, bdata->node_bootmem_map))) | ||
251 | BUG(); | ||
252 | memset(ret, 0, size); | ||
253 | return ret; | ||
254 | } | ||
255 | |||
256 | static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat) | ||
257 | { | ||
258 | struct page *page; | ||
259 | bootmem_data_t *bdata = pgdat->bdata; | ||
260 | unsigned long i, count, total = 0; | ||
261 | unsigned long idx; | ||
262 | unsigned long *map; | ||
263 | int gofast = 0; | ||
264 | |||
265 | BUG_ON(!bdata->node_bootmem_map); | ||
266 | |||
267 | count = 0; | ||
268 | /* first extant page of the node */ | ||
269 | page = virt_to_page(phys_to_virt(bdata->node_boot_start)); | ||
270 | idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT); | ||
271 | map = bdata->node_bootmem_map; | ||
272 | /* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */ | ||
273 | if (bdata->node_boot_start == 0 || | ||
274 | ffs(bdata->node_boot_start) - PAGE_SHIFT > ffs(BITS_PER_LONG)) | ||
275 | gofast = 1; | ||
276 | for (i = 0; i < idx; ) { | ||
277 | unsigned long v = ~map[i / BITS_PER_LONG]; | ||
278 | if (gofast && v == ~0UL) { | ||
279 | int j, order; | ||
280 | |||
281 | count += BITS_PER_LONG; | ||
282 | __ClearPageReserved(page); | ||
283 | order = ffs(BITS_PER_LONG) - 1; | ||
284 | set_page_refs(page, order); | ||
285 | for (j = 1; j < BITS_PER_LONG; j++) { | ||
286 | if (j + 16 < BITS_PER_LONG) | ||
287 | prefetchw(page + j + 16); | ||
288 | __ClearPageReserved(page + j); | ||
289 | } | ||
290 | __free_pages(page, order); | ||
291 | i += BITS_PER_LONG; | ||
292 | page += BITS_PER_LONG; | ||
293 | } else if (v) { | ||
294 | unsigned long m; | ||
295 | for (m = 1; m && i < idx; m<<=1, page++, i++) { | ||
296 | if (v & m) { | ||
297 | count++; | ||
298 | __ClearPageReserved(page); | ||
299 | set_page_refs(page, 0); | ||
300 | __free_page(page); | ||
301 | } | ||
302 | } | ||
303 | } else { | ||
304 | i+=BITS_PER_LONG; | ||
305 | page += BITS_PER_LONG; | ||
306 | } | ||
307 | } | ||
308 | total += count; | ||
309 | |||
310 | /* | ||
311 | * Now free the allocator bitmap itself, it's not | ||
312 | * needed anymore: | ||
313 | */ | ||
314 | page = virt_to_page(bdata->node_bootmem_map); | ||
315 | count = 0; | ||
316 | for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) { | ||
317 | count++; | ||
318 | __ClearPageReserved(page); | ||
319 | set_page_count(page, 1); | ||
320 | __free_page(page); | ||
321 | } | ||
322 | total += count; | ||
323 | bdata->node_bootmem_map = NULL; | ||
324 | |||
325 | return total; | ||
326 | } | ||
327 | |||
328 | unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn) | ||
329 | { | ||
330 | return(init_bootmem_core(pgdat, freepfn, startpfn, endpfn)); | ||
331 | } | ||
332 | |||
333 | void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size) | ||
334 | { | ||
335 | reserve_bootmem_core(pgdat->bdata, physaddr, size); | ||
336 | } | ||
337 | |||
338 | void __init free_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size) | ||
339 | { | ||
340 | free_bootmem_core(pgdat->bdata, physaddr, size); | ||
341 | } | ||
342 | |||
343 | unsigned long __init free_all_bootmem_node (pg_data_t *pgdat) | ||
344 | { | ||
345 | return(free_all_bootmem_core(pgdat)); | ||
346 | } | ||
347 | |||
348 | unsigned long __init init_bootmem (unsigned long start, unsigned long pages) | ||
349 | { | ||
350 | max_low_pfn = pages; | ||
351 | min_low_pfn = start; | ||
352 | return(init_bootmem_core(NODE_DATA(0), start, 0, pages)); | ||
353 | } | ||
354 | |||
355 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | ||
356 | void __init reserve_bootmem (unsigned long addr, unsigned long size) | ||
357 | { | ||
358 | reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size); | ||
359 | } | ||
360 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | ||
361 | |||
362 | void __init free_bootmem (unsigned long addr, unsigned long size) | ||
363 | { | ||
364 | free_bootmem_core(NODE_DATA(0)->bdata, addr, size); | ||
365 | } | ||
366 | |||
367 | unsigned long __init free_all_bootmem (void) | ||
368 | { | ||
369 | return(free_all_bootmem_core(NODE_DATA(0))); | ||
370 | } | ||
371 | |||
372 | void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal) | ||
373 | { | ||
374 | pg_data_t *pgdat = pgdat_list; | ||
375 | void *ptr; | ||
376 | |||
377 | for_each_pgdat(pgdat) | ||
378 | if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, | ||
379 | align, goal))) | ||
380 | return(ptr); | ||
381 | |||
382 | /* | ||
383 | * Whoops, we cannot satisfy the allocation request. | ||
384 | */ | ||
385 | printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); | ||
386 | panic("Out of memory"); | ||
387 | return NULL; | ||
388 | } | ||
389 | |||
390 | void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) | ||
391 | { | ||
392 | void *ptr; | ||
393 | |||
394 | ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal); | ||
395 | if (ptr) | ||
396 | return (ptr); | ||
397 | |||
398 | return __alloc_bootmem(size, align, goal); | ||
399 | } | ||
400 | |||