diff options
author | Haavard Skinnemoen <hskinnemoen@atmel.com> | 2007-03-21 11:02:57 -0400 |
---|---|---|
committer | Haavard Skinnemoen <hskinnemoen@atmel.com> | 2007-04-27 07:44:14 -0400 |
commit | d8011768e6bdd0d9de5cc7bdbd3077b4b4fab8c7 (patch) | |
tree | 0d03566388d439edf87432424d4377dbfcbe053f /arch | |
parent | 5539f59ac40473730806580f212c4eac6e769f01 (diff) |
[AVR32] Simplify early handling of memory regions
Use struct resource to specify both physical memory regions and
reserved regions and push everything into the same framework,
including kernel code/data and initrd memory. This allows us to get
rid of many special cases in the bootmem initialization and will also
make it easier to implement more robust handling of framebuffer
memory later.
Signed-off-by: Haavard Skinnemoen <hskinnemoen@atmel.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/avr32/kernel/setup.c | 495 |
1 files changed, 232 insertions, 263 deletions
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index 53a1ff0cb05c..d0a35a1b6a66 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c | |||
@@ -32,13 +32,6 @@ | |||
32 | extern int root_mountflags; | 32 | extern int root_mountflags; |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Bootloader-provided information about physical memory | ||
36 | */ | ||
37 | struct tag_mem_range *mem_phys; | ||
38 | struct tag_mem_range *mem_reserved; | ||
39 | struct tag_mem_range *mem_ramdisk; | ||
40 | |||
41 | /* | ||
42 | * Initialize loops_per_jiffy as 5000000 (500MIPS). | 35 | * Initialize loops_per_jiffy as 5000000 (500MIPS). |
43 | * Better make it too large than too small... | 36 | * Better make it too large than too small... |
44 | */ | 37 | */ |
@@ -50,32 +43,153 @@ EXPORT_SYMBOL(boot_cpu_data); | |||
50 | static char __initdata command_line[COMMAND_LINE_SIZE]; | 43 | static char __initdata command_line[COMMAND_LINE_SIZE]; |
51 | 44 | ||
52 | /* | 45 | /* |
53 | * Should be more than enough, but if you have a _really_ complex | 46 | * Standard memory resources |
54 | * setup, you might need to increase the size of this... | ||
55 | */ | 47 | */ |
56 | static struct tag_mem_range __initdata mem_range_cache[32]; | 48 | static struct resource __initdata kernel_data = { |
57 | static unsigned mem_range_next_free; | 49 | .name = "Kernel data", |
50 | .start = 0, | ||
51 | .end = 0, | ||
52 | .flags = IORESOURCE_MEM, | ||
53 | }; | ||
54 | static struct resource __initdata kernel_code = { | ||
55 | .name = "Kernel code", | ||
56 | .start = 0, | ||
57 | .end = 0, | ||
58 | .flags = IORESOURCE_MEM, | ||
59 | .sibling = &kernel_data, | ||
60 | }; | ||
58 | 61 | ||
59 | /* | 62 | /* |
60 | * Standard memory resources | 63 | * Available system RAM and reserved regions as singly linked |
64 | * lists. These lists are traversed using the sibling pointer in | ||
65 | * struct resource and are kept sorted at all times. | ||
61 | */ | 66 | */ |
62 | static struct resource mem_res[] = { | 67 | static struct resource *__initdata system_ram; |
63 | { | 68 | static struct resource *__initdata reserved = &kernel_code; |
64 | .name = "Kernel code", | 69 | |
65 | .start = 0, | 70 | /* |
66 | .end = 0, | 71 | * We need to allocate these before the bootmem allocator is up and |
67 | .flags = IORESOURCE_MEM | 72 | * running, so we need this "cache". 32 entries are probably enough |
68 | }, | 73 | * for all but the most insanely complex systems. |
69 | { | 74 | */ |
70 | .name = "Kernel data", | 75 | static struct resource __initdata res_cache[32]; |
71 | .start = 0, | 76 | static unsigned int __initdata res_cache_next_free; |
72 | .end = 0, | 77 | |
73 | .flags = IORESOURCE_MEM, | 78 | static void __init resource_init(void) |
74 | }, | 79 | { |
75 | }; | 80 | struct resource *mem, *res; |
81 | struct resource *new; | ||
82 | |||
83 | kernel_code.start = __pa(init_mm.start_code); | ||
84 | |||
85 | for (mem = system_ram; mem; mem = mem->sibling) { | ||
86 | new = alloc_bootmem_low(sizeof(struct resource)); | ||
87 | memcpy(new, mem, sizeof(struct resource)); | ||
88 | |||
89 | new->sibling = NULL; | ||
90 | if (request_resource(&iomem_resource, new)) | ||
91 | printk(KERN_WARNING "Bad RAM resource %08x-%08x\n", | ||
92 | mem->start, mem->end); | ||
93 | } | ||
94 | |||
95 | for (res = reserved; res; res = res->sibling) { | ||
96 | new = alloc_bootmem_low(sizeof(struct resource)); | ||
97 | memcpy(new, res, sizeof(struct resource)); | ||
98 | |||
99 | new->sibling = NULL; | ||
100 | if (insert_resource(&iomem_resource, new)) | ||
101 | printk(KERN_WARNING | ||
102 | "Bad reserved resource %s (%08x-%08x)\n", | ||
103 | res->name, res->start, res->end); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | static void __init | ||
108 | add_physical_memory(resource_size_t start, resource_size_t end) | ||
109 | { | ||
110 | struct resource *new, *next, **pprev; | ||
111 | |||
112 | for (pprev = &system_ram, next = system_ram; next; | ||
113 | pprev = &next->sibling, next = next->sibling) { | ||
114 | if (end < next->start) | ||
115 | break; | ||
116 | if (start <= next->end) { | ||
117 | printk(KERN_WARNING | ||
118 | "Warning: Physical memory map is broken\n"); | ||
119 | printk(KERN_WARNING | ||
120 | "Warning: %08x-%08x overlaps %08x-%08x\n", | ||
121 | start, end, next->start, next->end); | ||
122 | return; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | if (res_cache_next_free >= ARRAY_SIZE(res_cache)) { | ||
127 | printk(KERN_WARNING | ||
128 | "Warning: Failed to add physical memory %08x-%08x\n", | ||
129 | start, end); | ||
130 | return; | ||
131 | } | ||
132 | |||
133 | new = &res_cache[res_cache_next_free++]; | ||
134 | new->start = start; | ||
135 | new->end = end; | ||
136 | new->name = "System RAM"; | ||
137 | new->flags = IORESOURCE_MEM; | ||
138 | |||
139 | *pprev = new; | ||
140 | } | ||
141 | |||
142 | static int __init | ||
143 | add_reserved_region(resource_size_t start, resource_size_t end, | ||
144 | const char *name) | ||
145 | { | ||
146 | struct resource *new, *next, **pprev; | ||
76 | 147 | ||
77 | #define kernel_code mem_res[0] | 148 | if (end < start) |
78 | #define kernel_data mem_res[1] | 149 | return -EINVAL; |
150 | |||
151 | if (res_cache_next_free >= ARRAY_SIZE(res_cache)) | ||
152 | return -ENOMEM; | ||
153 | |||
154 | for (pprev = &reserved, next = reserved; next; | ||
155 | pprev = &next->sibling, next = next->sibling) { | ||
156 | if (end < next->start) | ||
157 | break; | ||
158 | if (start <= next->end) | ||
159 | return -EBUSY; | ||
160 | } | ||
161 | |||
162 | new = &res_cache[res_cache_next_free++]; | ||
163 | new->start = start; | ||
164 | new->end = end; | ||
165 | new->name = name; | ||
166 | new->flags = IORESOURCE_MEM; | ||
167 | |||
168 | *pprev = new; | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static unsigned long __init | ||
174 | find_free_region(const struct resource *mem, resource_size_t size, | ||
175 | resource_size_t align) | ||
176 | { | ||
177 | struct resource *res; | ||
178 | unsigned long target; | ||
179 | |||
180 | target = ALIGN(mem->start, align); | ||
181 | for (res = reserved; res; res = res->sibling) { | ||
182 | if ((target + size) <= res->start) | ||
183 | break; | ||
184 | if (target <= res->end) | ||
185 | target = ALIGN(res->end + 1, align); | ||
186 | } | ||
187 | |||
188 | if ((target + size) > (mem->end + 1)) | ||
189 | return mem->end + 1; | ||
190 | |||
191 | return target; | ||
192 | } | ||
79 | 193 | ||
80 | /* | 194 | /* |
81 | * Early framebuffer allocation. Works as follows: | 195 | * Early framebuffer allocation. Works as follows: |
@@ -112,42 +226,6 @@ static int __init early_parse_fbmem(char *p) | |||
112 | } | 226 | } |
113 | early_param("fbmem", early_parse_fbmem); | 227 | early_param("fbmem", early_parse_fbmem); |
114 | 228 | ||
115 | static inline void __init resource_init(void) | ||
116 | { | ||
117 | struct tag_mem_range *region; | ||
118 | |||
119 | kernel_code.start = __pa(init_mm.start_code); | ||
120 | kernel_code.end = __pa(init_mm.end_code - 1); | ||
121 | kernel_data.start = __pa(init_mm.end_code); | ||
122 | kernel_data.end = __pa(init_mm.brk - 1); | ||
123 | |||
124 | for (region = mem_phys; region; region = region->next) { | ||
125 | struct resource *res; | ||
126 | unsigned long phys_start, phys_end; | ||
127 | |||
128 | if (region->size == 0) | ||
129 | continue; | ||
130 | |||
131 | phys_start = region->addr; | ||
132 | phys_end = phys_start + region->size - 1; | ||
133 | |||
134 | res = alloc_bootmem_low(sizeof(*res)); | ||
135 | res->name = "System RAM"; | ||
136 | res->start = phys_start; | ||
137 | res->end = phys_end; | ||
138 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
139 | |||
140 | request_resource (&iomem_resource, res); | ||
141 | |||
142 | if (kernel_code.start >= res->start && | ||
143 | kernel_code.end <= res->end) | ||
144 | request_resource (res, &kernel_code); | ||
145 | if (kernel_data.start >= res->start && | ||
146 | kernel_data.end <= res->end) | ||
147 | request_resource (res, &kernel_data); | ||
148 | } | ||
149 | } | ||
150 | |||
151 | static int __init parse_tag_core(struct tag *tag) | 229 | static int __init parse_tag_core(struct tag *tag) |
152 | { | 230 | { |
153 | if (tag->hdr.size > 2) { | 231 | if (tag->hdr.size > 2) { |
@@ -159,11 +237,9 @@ static int __init parse_tag_core(struct tag *tag) | |||
159 | } | 237 | } |
160 | __tagtable(ATAG_CORE, parse_tag_core); | 238 | __tagtable(ATAG_CORE, parse_tag_core); |
161 | 239 | ||
162 | static int __init parse_tag_mem_range(struct tag *tag, | 240 | static int __init parse_tag_mem(struct tag *tag) |
163 | struct tag_mem_range **root) | ||
164 | { | 241 | { |
165 | struct tag_mem_range *cur, **pprev; | 242 | unsigned long start, end; |
166 | struct tag_mem_range *new; | ||
167 | 243 | ||
168 | /* | 244 | /* |
169 | * Ignore zero-sized entries. If we're running standalone, the | 245 | * Ignore zero-sized entries. If we're running standalone, the |
@@ -173,34 +249,53 @@ static int __init parse_tag_mem_range(struct tag *tag, | |||
173 | if (tag->u.mem_range.size == 0) | 249 | if (tag->u.mem_range.size == 0) |
174 | return 0; | 250 | return 0; |
175 | 251 | ||
176 | /* | 252 | start = tag->u.mem_range.addr; |
177 | * Copy the data so the bootmem init code doesn't need to care | 253 | end = tag->u.mem_range.addr + tag->u.mem_range.size - 1; |
178 | * about it. | ||
179 | */ | ||
180 | if (mem_range_next_free >= ARRAY_SIZE(mem_range_cache)) | ||
181 | panic("Physical memory map too complex!\n"); | ||
182 | 254 | ||
183 | new = &mem_range_cache[mem_range_next_free++]; | 255 | add_physical_memory(start, end); |
184 | *new = tag->u.mem_range; | 256 | return 0; |
257 | } | ||
258 | __tagtable(ATAG_MEM, parse_tag_mem); | ||
185 | 259 | ||
186 | pprev = root; | 260 | static int __init parse_tag_rdimg(struct tag *tag) |
187 | cur = *root; | 261 | { |
188 | while (cur) { | 262 | #ifdef CONFIG_INITRD |
189 | pprev = &cur->next; | 263 | struct tag_mem_range *mem = &tag->u.mem_range; |
190 | cur = cur->next; | 264 | int ret; |
265 | |||
266 | if (initrd_start) { | ||
267 | printk(KERN_WARNING | ||
268 | "Warning: Only the first initrd image will be used\n"); | ||
269 | return 0; | ||
191 | } | 270 | } |
192 | 271 | ||
193 | *pprev = new; | 272 | ret = add_reserved_region(mem->start, mem->start + mem->size - 1, |
194 | new->next = NULL; | 273 | "initrd"); |
274 | if (ret) { | ||
275 | printk(KERN_WARNING | ||
276 | "Warning: Failed to reserve initrd memory\n"); | ||
277 | return ret; | ||
278 | } | ||
279 | |||
280 | initrd_start = (unsigned long)__va(mem->addr); | ||
281 | initrd_end = initrd_start + mem->size; | ||
282 | #else | ||
283 | printk(KERN_WARNING "RAM disk image present, but " | ||
284 | "no initrd support in kernel, ignoring\n"); | ||
285 | #endif | ||
195 | 286 | ||
196 | return 0; | 287 | return 0; |
197 | } | 288 | } |
289 | __tagtable(ATAG_RDIMG, parse_tag_rdimg); | ||
198 | 290 | ||
199 | static int __init parse_tag_mem(struct tag *tag) | 291 | static int __init parse_tag_rsvd_mem(struct tag *tag) |
200 | { | 292 | { |
201 | return parse_tag_mem_range(tag, &mem_phys); | 293 | struct tag_mem_range *mem = &tag->u.mem_range; |
294 | |||
295 | return add_reserved_region(mem->addr, mem->addr + mem->size - 1, | ||
296 | "Reserved"); | ||
202 | } | 297 | } |
203 | __tagtable(ATAG_MEM, parse_tag_mem); | 298 | __tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem); |
204 | 299 | ||
205 | static int __init parse_tag_cmdline(struct tag *tag) | 300 | static int __init parse_tag_cmdline(struct tag *tag) |
206 | { | 301 | { |
@@ -209,12 +304,6 @@ static int __init parse_tag_cmdline(struct tag *tag) | |||
209 | } | 304 | } |
210 | __tagtable(ATAG_CMDLINE, parse_tag_cmdline); | 305 | __tagtable(ATAG_CMDLINE, parse_tag_cmdline); |
211 | 306 | ||
212 | static int __init parse_tag_rdimg(struct tag *tag) | ||
213 | { | ||
214 | return parse_tag_mem_range(tag, &mem_ramdisk); | ||
215 | } | ||
216 | __tagtable(ATAG_RDIMG, parse_tag_rdimg); | ||
217 | |||
218 | static int __init parse_tag_clock(struct tag *tag) | 307 | static int __init parse_tag_clock(struct tag *tag) |
219 | { | 308 | { |
220 | /* | 309 | /* |
@@ -225,12 +314,6 @@ static int __init parse_tag_clock(struct tag *tag) | |||
225 | } | 314 | } |
226 | __tagtable(ATAG_CLOCK, parse_tag_clock); | 315 | __tagtable(ATAG_CLOCK, parse_tag_clock); |
227 | 316 | ||
228 | static int __init parse_tag_rsvd_mem(struct tag *tag) | ||
229 | { | ||
230 | return parse_tag_mem_range(tag, &mem_reserved); | ||
231 | } | ||
232 | __tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem); | ||
233 | |||
234 | /* | 317 | /* |
235 | * Scan the tag table for this tag, and call its parse function. The | 318 | * Scan the tag table for this tag, and call its parse function. The |
236 | * tag table is built by the linker from all the __tagtable | 319 | * tag table is built by the linker from all the __tagtable |
@@ -262,66 +345,16 @@ static void __init parse_tags(struct tag *t) | |||
262 | t->hdr.tag); | 345 | t->hdr.tag); |
263 | } | 346 | } |
264 | 347 | ||
265 | static void __init print_memory_map(const char *what, | ||
266 | struct tag_mem_range *mem) | ||
267 | { | ||
268 | printk ("%s:\n", what); | ||
269 | for (; mem; mem = mem->next) { | ||
270 | printk (" %08lx - %08lx\n", | ||
271 | (unsigned long)mem->addr, | ||
272 | (unsigned long)(mem->addr + mem->size)); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | #define MAX_LOWMEM HIGHMEM_START | ||
277 | #define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) | ||
278 | |||
279 | /* | ||
280 | * Sort a list of memory regions in-place by ascending address. | ||
281 | * | ||
282 | * We're using bubble sort because we only have singly linked lists | ||
283 | * with few elements. | ||
284 | */ | ||
285 | static void __init sort_mem_list(struct tag_mem_range **pmem) | ||
286 | { | ||
287 | int done; | ||
288 | struct tag_mem_range **a, **b; | ||
289 | |||
290 | if (!*pmem) | ||
291 | return; | ||
292 | |||
293 | do { | ||
294 | done = 1; | ||
295 | a = pmem, b = &(*pmem)->next; | ||
296 | while (*b) { | ||
297 | if ((*a)->addr > (*b)->addr) { | ||
298 | struct tag_mem_range *tmp; | ||
299 | tmp = (*b)->next; | ||
300 | (*b)->next = *a; | ||
301 | *a = *b; | ||
302 | *b = tmp; | ||
303 | done = 0; | ||
304 | } | ||
305 | a = &(*a)->next; | ||
306 | b = &(*a)->next; | ||
307 | } | ||
308 | } while (!done); | ||
309 | } | ||
310 | |||
311 | /* | 348 | /* |
312 | * Find a free memory region large enough for storing the | 349 | * Find a free memory region large enough for storing the |
313 | * bootmem bitmap. | 350 | * bootmem bitmap. |
314 | */ | 351 | */ |
315 | static unsigned long __init | 352 | static unsigned long __init |
316 | find_bootmap_pfn(const struct tag_mem_range *mem) | 353 | find_bootmap_pfn(const struct resource *mem) |
317 | { | 354 | { |
318 | unsigned long bootmap_pages, bootmap_len; | 355 | unsigned long bootmap_pages, bootmap_len; |
319 | unsigned long node_pages = PFN_UP(mem->size); | 356 | unsigned long node_pages = PFN_UP(mem->end - mem->start + 1); |
320 | unsigned long bootmap_addr = mem->addr; | 357 | unsigned long bootmap_start; |
321 | struct tag_mem_range *reserved = mem_reserved; | ||
322 | struct tag_mem_range *ramdisk = mem_ramdisk; | ||
323 | unsigned long kern_start = __pa(_stext); | ||
324 | unsigned long kern_end = __pa(_end); | ||
325 | 358 | ||
326 | bootmap_pages = bootmem_bootmap_pages(node_pages); | 359 | bootmap_pages = bootmem_bootmap_pages(node_pages); |
327 | bootmap_len = bootmap_pages << PAGE_SHIFT; | 360 | bootmap_len = bootmap_pages << PAGE_SHIFT; |
@@ -331,87 +364,43 @@ find_bootmap_pfn(const struct tag_mem_range *mem) | |||
331 | * storing the bootmem bitmap. We can take advantage of the | 364 | * storing the bootmem bitmap. We can take advantage of the |
332 | * fact that all lists have been sorted. | 365 | * fact that all lists have been sorted. |
333 | * | 366 | * |
334 | * We have to check explicitly reserved regions as well as the | 367 | * We have to check that we don't collide with any reserved |
335 | * kernel image and any RAMDISK images... | 368 | * regions, which includes the kernel image and any RAMDISK |
336 | * | 369 | * images. |
337 | * Oh, and we have to make sure we don't overwrite the taglist | ||
338 | * since we're going to use it until the bootmem allocator is | ||
339 | * fully up and running. | ||
340 | */ | 370 | */ |
341 | while (1) { | 371 | bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE); |
342 | if ((bootmap_addr < kern_end) && | ||
343 | ((bootmap_addr + bootmap_len) > kern_start)) | ||
344 | bootmap_addr = kern_end; | ||
345 | |||
346 | while (reserved && | ||
347 | (bootmap_addr >= (reserved->addr + reserved->size))) | ||
348 | reserved = reserved->next; | ||
349 | |||
350 | if (reserved && | ||
351 | ((bootmap_addr + bootmap_len) >= reserved->addr)) { | ||
352 | bootmap_addr = reserved->addr + reserved->size; | ||
353 | continue; | ||
354 | } | ||
355 | |||
356 | while (ramdisk && | ||
357 | (bootmap_addr >= (ramdisk->addr + ramdisk->size))) | ||
358 | ramdisk = ramdisk->next; | ||
359 | |||
360 | if (!ramdisk || | ||
361 | ((bootmap_addr + bootmap_len) < ramdisk->addr)) | ||
362 | break; | ||
363 | |||
364 | bootmap_addr = ramdisk->addr + ramdisk->size; | ||
365 | } | ||
366 | |||
367 | if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size)) | ||
368 | return ~0UL; | ||
369 | 372 | ||
370 | return PFN_UP(bootmap_addr); | 373 | return bootmap_start >> PAGE_SHIFT; |
371 | } | 374 | } |
372 | 375 | ||
376 | #define MAX_LOWMEM HIGHMEM_START | ||
377 | #define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) | ||
378 | |||
373 | static void __init setup_bootmem(void) | 379 | static void __init setup_bootmem(void) |
374 | { | 380 | { |
375 | unsigned bootmap_size; | 381 | unsigned bootmap_size; |
376 | unsigned long first_pfn, bootmap_pfn, pages; | 382 | unsigned long first_pfn, bootmap_pfn, pages; |
377 | unsigned long max_pfn, max_low_pfn; | 383 | unsigned long max_pfn, max_low_pfn; |
378 | unsigned long kern_start = __pa(_stext); | ||
379 | unsigned long kern_end = __pa(_end); | ||
380 | unsigned node = 0; | 384 | unsigned node = 0; |
381 | struct tag_mem_range *bank, *res; | 385 | struct resource *res; |
382 | 386 | ||
383 | sort_mem_list(&mem_phys); | 387 | printk(KERN_INFO "Physical memory:\n"); |
384 | sort_mem_list(&mem_reserved); | 388 | for (res = system_ram; res; res = res->sibling) |
385 | 389 | printk(" %08x-%08x\n", res->start, res->end); | |
386 | print_memory_map("Physical memory", mem_phys); | 390 | printk(KERN_INFO "Reserved memory:\n"); |
387 | print_memory_map("Reserved memory", mem_reserved); | 391 | for (res = reserved; res; res = res->sibling) |
392 | printk(" %08x-%08x: %s\n", | ||
393 | res->start, res->end, res->name); | ||
388 | 394 | ||
389 | nodes_clear(node_online_map); | 395 | nodes_clear(node_online_map); |
390 | 396 | ||
391 | if (mem_ramdisk) { | 397 | if (system_ram->sibling) |
392 | #ifdef CONFIG_BLK_DEV_INITRD | ||
393 | initrd_start = (unsigned long)__va(mem_ramdisk->addr); | ||
394 | initrd_end = initrd_start + mem_ramdisk->size; | ||
395 | |||
396 | print_memory_map("RAMDISK images", mem_ramdisk); | ||
397 | if (mem_ramdisk->next) | ||
398 | printk(KERN_WARNING | ||
399 | "Warning: Only the first RAMDISK image " | ||
400 | "will be used\n"); | ||
401 | sort_mem_list(&mem_ramdisk); | ||
402 | #else | ||
403 | printk(KERN_WARNING "RAM disk image present, but " | ||
404 | "no initrd support in kernel!\n"); | ||
405 | #endif | ||
406 | } | ||
407 | |||
408 | if (mem_phys->next) | ||
409 | printk(KERN_WARNING "Only using first memory bank\n"); | 398 | printk(KERN_WARNING "Only using first memory bank\n"); |
410 | 399 | ||
411 | for (bank = mem_phys; bank; bank = NULL) { | 400 | for (res = system_ram; res; res = NULL) { |
412 | first_pfn = PFN_UP(bank->addr); | 401 | first_pfn = PFN_UP(res->start); |
413 | max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size); | 402 | max_low_pfn = max_pfn = PFN_DOWN(res->end + 1); |
414 | bootmap_pfn = find_bootmap_pfn(bank); | 403 | bootmap_pfn = find_bootmap_pfn(res); |
415 | if (bootmap_pfn > max_pfn) | 404 | if (bootmap_pfn > max_pfn) |
416 | panic("No space for bootmem bitmap!\n"); | 405 | panic("No space for bootmem bitmap!\n"); |
417 | 406 | ||
@@ -435,10 +424,6 @@ static void __init setup_bootmem(void) | |||
435 | bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, | 424 | bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, |
436 | first_pfn, max_low_pfn); | 425 | first_pfn, max_low_pfn); |
437 | 426 | ||
438 | printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n", | ||
439 | node, NODE_DATA(node)->bdata, | ||
440 | NODE_DATA(node)->bdata->node_bootmem_map); | ||
441 | |||
442 | /* | 427 | /* |
443 | * Register fully available RAM pages with the bootmem | 428 | * Register fully available RAM pages with the bootmem |
444 | * allocator. | 429 | * allocator. |
@@ -447,51 +432,26 @@ static void __init setup_bootmem(void) | |||
447 | free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), | 432 | free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), |
448 | PFN_PHYS(pages)); | 433 | PFN_PHYS(pages)); |
449 | 434 | ||
450 | /* | 435 | /* Reserve space for the bootmem bitmap... */ |
451 | * Reserve space for the kernel image (if present in | ||
452 | * this node)... | ||
453 | */ | ||
454 | if ((kern_start >= PFN_PHYS(first_pfn)) && | ||
455 | (kern_start < PFN_PHYS(max_pfn))) { | ||
456 | printk("Node %u: Kernel image %08lx - %08lx\n", | ||
457 | node, kern_start, kern_end); | ||
458 | reserve_bootmem_node(NODE_DATA(node), kern_start, | ||
459 | kern_end - kern_start); | ||
460 | } | ||
461 | |||
462 | /* ...the bootmem bitmap... */ | ||
463 | reserve_bootmem_node(NODE_DATA(node), | 436 | reserve_bootmem_node(NODE_DATA(node), |
464 | PFN_PHYS(bootmap_pfn), | 437 | PFN_PHYS(bootmap_pfn), |
465 | bootmap_size); | 438 | bootmap_size); |
466 | 439 | ||
467 | /* ...any RAMDISK images... */ | ||
468 | for (res = mem_ramdisk; res; res = res->next) { | ||
469 | if (res->addr > PFN_PHYS(max_pfn)) | ||
470 | break; | ||
471 | |||
472 | if (res->addr >= PFN_PHYS(first_pfn)) { | ||
473 | printk("Node %u: RAMDISK %08lx - %08lx\n", | ||
474 | node, | ||
475 | (unsigned long)res->addr, | ||
476 | (unsigned long)(res->addr + res->size)); | ||
477 | reserve_bootmem_node(NODE_DATA(node), | ||
478 | res->addr, res->size); | ||
479 | } | ||
480 | } | ||
481 | |||
482 | /* ...and any other reserved regions. */ | 440 | /* ...and any other reserved regions. */ |
483 | for (res = mem_reserved; res; res = res->next) { | 441 | for (res = reserved; res; res = res->sibling) { |
484 | if (res->addr > PFN_PHYS(max_pfn)) | 442 | if (res->start > PFN_PHYS(max_pfn)) |
485 | break; | 443 | break; |
486 | 444 | ||
487 | if (res->addr >= PFN_PHYS(first_pfn)) { | 445 | /* |
488 | printk("Node %u: Reserved %08lx - %08lx\n", | 446 | * resource_init will complain about partial |
489 | node, | 447 | * overlaps, so we'll just ignore such |
490 | (unsigned long)res->addr, | 448 | * resources for now. |
491 | (unsigned long)(res->addr + res->size)); | 449 | */ |
492 | reserve_bootmem_node(NODE_DATA(node), | 450 | if (res->start >= PFN_PHYS(first_pfn) |
493 | res->addr, res->size); | 451 | && res->end < PFN_PHYS(max_pfn)) |
494 | } | 452 | reserve_bootmem_node( |
453 | NODE_DATA(node), res->start, | ||
454 | res->end - res->start + 1); | ||
495 | } | 455 | } |
496 | 456 | ||
497 | node_set_online(node); | 457 | node_set_online(node); |
@@ -502,6 +462,20 @@ void __init setup_arch (char **cmdline_p) | |||
502 | { | 462 | { |
503 | struct clk *cpu_clk; | 463 | struct clk *cpu_clk; |
504 | 464 | ||
465 | init_mm.start_code = (unsigned long)_text; | ||
466 | init_mm.end_code = (unsigned long)_etext; | ||
467 | init_mm.end_data = (unsigned long)_edata; | ||
468 | init_mm.brk = (unsigned long)_end; | ||
469 | |||
470 | /* | ||
471 | * Include .init section to make allocations easier. It will | ||
472 | * be removed before the resource is actually requested. | ||
473 | */ | ||
474 | kernel_code.start = __pa(__init_begin); | ||
475 | kernel_code.end = __pa(init_mm.end_code - 1); | ||
476 | kernel_data.start = __pa(init_mm.end_code); | ||
477 | kernel_data.end = __pa(init_mm.brk - 1); | ||
478 | |||
505 | parse_tags(bootloader_tags); | 479 | parse_tags(bootloader_tags); |
506 | 480 | ||
507 | setup_processor(); | 481 | setup_processor(); |
@@ -527,11 +501,6 @@ void __init setup_arch (char **cmdline_p) | |||
527 | ((cpu_hz + 500) / 1000) % 1000); | 501 | ((cpu_hz + 500) / 1000) % 1000); |
528 | } | 502 | } |
529 | 503 | ||
530 | init_mm.start_code = (unsigned long) &_text; | ||
531 | init_mm.end_code = (unsigned long) &_etext; | ||
532 | init_mm.end_data = (unsigned long) &_edata; | ||
533 | init_mm.brk = (unsigned long) &_end; | ||
534 | |||
535 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | 504 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); |
536 | *cmdline_p = command_line; | 505 | *cmdline_p = command_line; |
537 | parse_early_param(); | 506 | parse_early_param(); |