aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2008-01-30 07:33:17 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:17 -0500
commit751752789162fde69474edfa15935d0a77c0bc17 (patch)
tree43eef77784989bc25979da1cc128e31fc46b3cea /arch/x86
parentedcd81199dbad5db11ae91b507cec1d46dd94a49 (diff)
x86: replace hard coded reservations in 64-bit early boot code with dynamic table
On x86-64 there are several memory allocations before bootmem. To avoid them stomping on each other they used to be all hard coded in bad_area(). Replace this with an array that is filled as needed. This cleans up the code considerably and allows to expand its use. Cc: peterz@infradead.org Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/e820_64.c95
-rw-r--r--arch/x86/kernel/head64.c48
-rw-r--r--arch/x86/kernel/setup_64.c67
-rw-r--r--arch/x86/mm/init_64.c5
-rw-r--r--arch/x86/mm/numa_64.c1
5 files changed, 108 insertions, 108 deletions
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
index 07cfaae7ab07..f8b7bebb4344 100644
--- a/arch/x86/kernel/e820_64.c
+++ b/arch/x86/kernel/e820_64.c
@@ -47,56 +47,65 @@ unsigned long end_pfn_map;
47 */ 47 */
48static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT; 48static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
49 49
50/* Check for some hardcoded bad areas that early boot is not allowed to touch */ 50/*
51static inline int bad_addr(unsigned long *addrp, unsigned long size) 51 * Early reserved memory areas.
52{ 52 */
53 unsigned long addr = *addrp, last = addr + size; 53#define MAX_EARLY_RES 20
54
55struct early_res {
56 unsigned long start, end;
57};
58static struct early_res early_res[MAX_EARLY_RES] __initdata = {
59 { 0, PAGE_SIZE }, /* BIOS data page */
60#ifdef CONFIG_SMP
61 { SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE },
62#endif
63 {}
64};
54 65
55 /* various gunk below that needed for SMP startup */ 66void __init reserve_early(unsigned long start, unsigned long end)
56 if (addr < 0x8000) { 67{
57 *addrp = PAGE_ALIGN(0x8000); 68 int i;
58 return 1; 69 struct early_res *r;
70 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
71 r = &early_res[i];
72 if (end > r->start && start < r->end)
73 panic("Duplicated early reservation %lx-%lx\n",
74 start, end);
59 } 75 }
76 if (i >= MAX_EARLY_RES)
77 panic("Too many early reservations");
78 r = &early_res[i];
79 r->start = start;
80 r->end = end;
81}
60 82
61 /* direct mapping tables of the kernel */ 83void __init early_res_to_bootmem(void)
62 if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { 84{
63 *addrp = PAGE_ALIGN(table_end << PAGE_SHIFT); 85 int i;
64 return 1; 86 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
87 struct early_res *r = &early_res[i];
88 reserve_bootmem_generic(r->start, r->end - r->start);
65 } 89 }
90}
66 91
67 /* initrd */ 92/* Check for already reserved areas */
68#ifdef CONFIG_BLK_DEV_INITRD 93static inline int bad_addr(unsigned long *addrp, unsigned long size)
69 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { 94{
70 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; 95 int i;
71 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; 96 unsigned long addr = *addrp, last;
72 unsigned long ramdisk_end = ramdisk_image+ramdisk_size; 97 int changed = 0;
73 98again:
74 if (last >= ramdisk_image && addr < ramdisk_end) { 99 last = addr + size;
75 *addrp = PAGE_ALIGN(ramdisk_end); 100 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
76 return 1; 101 struct early_res *r = &early_res[i];
102 if (last >= r->start && addr < r->end) {
103 *addrp = addr = r->end;
104 changed = 1;
105 goto again;
77 } 106 }
78 } 107 }
79#endif 108 return changed;
80 /* kernel code */
81 if (last >= __pa_symbol(&_text) && addr < __pa_symbol(&_end)) {
82 *addrp = PAGE_ALIGN(__pa_symbol(&_end));
83 return 1;
84 }
85
86 if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
87 *addrp = PAGE_ALIGN(ebda_addr + ebda_size);
88 return 1;
89 }
90
91#ifdef CONFIG_NUMA
92 /* NUMA memory to node map */
93 if (last >= nodemap_addr && addr < nodemap_addr + nodemap_size) {
94 *addrp = nodemap_addr + nodemap_size;
95 return 1;
96 }
97#endif
98 /* XXX ramdisk image here? */
99 return 0;
100} 109}
101 110
102/* 111/*
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 87e031d4abf1..58438bafedca 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -21,6 +21,7 @@
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22#include <asm/sections.h> 22#include <asm/sections.h>
23#include <asm/kdebug.h> 23#include <asm/kdebug.h>
24#include <asm/e820.h>
24 25
25static void __init zap_identity_mappings(void) 26static void __init zap_identity_mappings(void)
26{ 27{
@@ -48,6 +49,35 @@ static void __init copy_bootdata(char *real_mode_data)
48 } 49 }
49} 50}
50 51
52#define EBDA_ADDR_POINTER 0x40E
53
54static __init void reserve_ebda(void)
55{
56 unsigned ebda_addr, ebda_size;
57
58 /*
59 * there is a real-mode segmented pointer pointing to the
60 * 4K EBDA area at 0x40E
61 */
62 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
63 ebda_addr <<= 4;
64
65 if (!ebda_addr)
66 return;
67
68 ebda_size = *(unsigned short *)__va(ebda_addr);
69
70 /* Round EBDA up to pages */
71 if (ebda_size == 0)
72 ebda_size = 1;
73 ebda_size <<= 10;
74 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
75 if (ebda_size > 64*1024)
76 ebda_size = 64*1024;
77
78 reserve_early(ebda_addr, ebda_addr + ebda_size);
79}
80
51void __init x86_64_start_kernel(char * real_mode_data) 81void __init x86_64_start_kernel(char * real_mode_data)
52{ 82{
53 int i; 83 int i;
@@ -75,5 +105,23 @@ void __init x86_64_start_kernel(char * real_mode_data)
75 pda_init(0); 105 pda_init(0);
76 copy_bootdata(__va(real_mode_data)); 106 copy_bootdata(__va(real_mode_data));
77 107
108 reserve_early(__pa_symbol(&_text), __pa_symbol(&_end));
109
110 /* Reserve INITRD */
111 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
112 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
113 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
114 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
115 reserve_early(ramdisk_image, ramdisk_end);
116 }
117
118 reserve_ebda();
119
120 /*
121 * At this point everything still needed from the boot loader
122 * or BIOS or kernel text should be early reserved or marked not
123 * RAM in e820. All other memory is free game.
124 */
125
78 start_kernel(); 126 start_kernel();
79} 127}
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 4a3f00b49236..6cbd15625dce 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -245,41 +245,6 @@ static inline void __init reserve_crashkernel(void)
245{} 245{}
246#endif 246#endif
247 247
248#define EBDA_ADDR_POINTER 0x40E
249
250unsigned __initdata ebda_addr;
251unsigned __initdata ebda_size;
252
253static void __init discover_ebda(void)
254{
255 /*
256 * there is a real-mode segmented pointer pointing to the
257 * 4K EBDA area at 0x40E
258 */
259 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
260 /*
261 * There can be some situations, like paravirtualized guests,
262 * in which there is no available ebda information. In such
263 * case, just skip it
264 */
265 if (!ebda_addr) {
266 ebda_size = 0;
267 return;
268 }
269
270 ebda_addr <<= 4;
271
272 ebda_size = *(unsigned short *)__va(ebda_addr);
273
274 /* Round EBDA up to pages */
275 if (ebda_size == 0)
276 ebda_size = 1;
277 ebda_size <<= 10;
278 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
279 if (ebda_size > 64*1024)
280 ebda_size = 64*1024;
281}
282
283/* Overridden in paravirt.c if CONFIG_PARAVIRT */ 248/* Overridden in paravirt.c if CONFIG_PARAVIRT */
284void __attribute__((weak)) __init memory_setup(void) 249void __attribute__((weak)) __init memory_setup(void)
285{ 250{
@@ -349,8 +314,6 @@ void __init setup_arch(char **cmdline_p)
349 314
350 check_efer(); 315 check_efer();
351 316
352 discover_ebda();
353
354 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); 317 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
355 if (efi_enabled) 318 if (efi_enabled)
356 efi_init(); 319 efi_init();
@@ -397,33 +360,7 @@ void __init setup_arch(char **cmdline_p)
397 contig_initmem_init(0, end_pfn); 360 contig_initmem_init(0, end_pfn);
398#endif 361#endif
399 362
400 /* Reserve direct mapping */ 363 early_res_to_bootmem();
401 reserve_bootmem_generic(table_start << PAGE_SHIFT,
402 (table_end - table_start) << PAGE_SHIFT);
403
404 /* reserve kernel */
405 reserve_bootmem_generic(__pa_symbol(&_text),
406 __pa_symbol(&_end) - __pa_symbol(&_text));
407
408 /*
409 * reserve physical page 0 - it's a special BIOS page on many boxes,
410 * enabling clean reboots, SMP operation, laptop functions.
411 */
412 reserve_bootmem_generic(0, PAGE_SIZE);
413
414 /* reserve ebda region */
415 if (ebda_addr)
416 reserve_bootmem_generic(ebda_addr, ebda_size);
417#ifdef CONFIG_NUMA
418 /* reserve nodemap region */
419 if (nodemap_addr)
420 reserve_bootmem_generic(nodemap_addr, nodemap_size);
421#endif
422
423#ifdef CONFIG_SMP
424 /* Reserve SMP trampoline */
425 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
426#endif
427 364
428#ifdef CONFIG_ACPI_SLEEP 365#ifdef CONFIG_ACPI_SLEEP
429 /* 366 /*
@@ -453,6 +390,8 @@ void __init setup_arch(char **cmdline_p)
453 initrd_start = ramdisk_image + PAGE_OFFSET; 390 initrd_start = ramdisk_image + PAGE_OFFSET;
454 initrd_end = initrd_start+ramdisk_size; 391 initrd_end = initrd_start+ramdisk_size;
455 } else { 392 } else {
393 /* Assumes everything on node 0 */
394 free_bootmem(ramdisk_image, ramdisk_size);
456 printk(KERN_ERR "initrd extends beyond end of memory " 395 printk(KERN_ERR "initrd extends beyond end of memory "
457 "(0x%08lx > 0x%08lx)\ndisabling initrd\n", 396 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
458 ramdisk_end, end_of_mem); 397 ramdisk_end, end_of_mem);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 05f12c527b02..8198840c3dcb 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -176,7 +176,8 @@ __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
176 set_pte_phys(address, phys, prot); 176 set_pte_phys(address, phys, prot);
177} 177}
178 178
179unsigned long __meminitdata table_start, table_end; 179static unsigned long __initdata table_start;
180static unsigned long __meminitdata table_end;
180 181
181static __meminit void *alloc_low_page(unsigned long *phys) 182static __meminit void *alloc_low_page(unsigned long *phys)
182{ 183{
@@ -387,6 +388,8 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
387 if (!after_bootmem) 388 if (!after_bootmem)
388 mmu_cr4_features = read_cr4(); 389 mmu_cr4_features = read_cr4();
389 __flush_tlb_all(); 390 __flush_tlb_all();
391
392 reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
390} 393}
391 394
392#ifndef CONFIG_NUMA 395#ifndef CONFIG_NUMA
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 37d429beba96..5d24dc1ec237 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -102,6 +102,7 @@ static int __init allocate_cachealigned_memnodemap(void)
102 } 102 }
103 pad_addr = (nodemap_addr + pad) & ~pad; 103 pad_addr = (nodemap_addr + pad) & ~pad;
104 memnodemap = phys_to_virt(pad_addr); 104 memnodemap = phys_to_virt(pad_addr);
105 reserve_early(nodemap_addr, nodemap_addr + nodemap_size);
105 106
106 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", 107 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
107 nodemap_addr, nodemap_addr + nodemap_size); 108 nodemap_addr, nodemap_addr + nodemap_size);