aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/e820.c214
-rw-r--r--arch/x86/kernel/e820_64.c196
-rw-r--r--arch/x86/kernel/head32.c76
-rw-r--r--arch/x86/kernel/setup_32.c109
-rw-r--r--arch/x86/kernel/smpboot.c17
-rw-r--r--arch/x86/kernel/trampoline.c2
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c9
7 files changed, 314 insertions, 309 deletions
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 41c480ae47df..35da8cdbe5e6 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -22,7 +22,9 @@
22#include <asm/pgtable.h> 22#include <asm/pgtable.h>
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/e820.h> 24#include <asm/e820.h>
25#include <asm/proto.h>
25#include <asm/setup.h> 26#include <asm/setup.h>
27#include <asm/trampoline.h>
26 28
27struct e820map e820; 29struct e820map e820;
28 30
@@ -493,3 +495,215 @@ __init void e820_setup_gap(void)
493 pci_mem_start, gapstart, gapsize); 495 pci_mem_start, gapstart, gapsize);
494} 496}
495 497
498
499/*
500 * Early reserved memory areas.
501 */
502#define MAX_EARLY_RES 20
503
504struct early_res {
505 u64 start, end;
506 char name[16];
507};
508static struct early_res early_res[MAX_EARLY_RES] __initdata = {
509 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
510#if defined(CONFIG_X86_64) && defined(CONFIG_X86_TRAMPOLINE)
511 { TRAMPOLINE_BASE, TRAMPOLINE_BASE + 2 * PAGE_SIZE, "TRAMPOLINE" },
512#endif
513#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
514 /*
515 * But first pinch a few for the stack/trampoline stuff
516 * FIXME: Don't need the extra page at 4K, but need to fix
517 * trampoline before removing it. (see the GDT stuff)
518 */
519 { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE" },
520 /*
521 * Has to be in very low memory so we can execute
522 * real-mode AP code.
523 */
524 { TRAMPOLINE_BASE, TRAMPOLINE_BASE + PAGE_SIZE, "TRAMPOLINE" },
525#endif
526 {}
527};
528
529void __init reserve_early(u64 start, u64 end, char *name)
530{
531 int i;
532 struct early_res *r;
533 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
534 r = &early_res[i];
535 if (end > r->start && start < r->end)
536 panic("Overlapping early reservations %llx-%llx %s to %llx-%llx %s\n",
537 start, end - 1, name?name:"", r->start,
538 r->end - 1, r->name);
539 }
540 if (i >= MAX_EARLY_RES)
541 panic("Too many early reservations");
542 r = &early_res[i];
543 r->start = start;
544 r->end = end;
545 if (name)
546 strncpy(r->name, name, sizeof(r->name) - 1);
547}
548
549void __init free_early(u64 start, u64 end)
550{
551 struct early_res *r;
552 int i, j;
553
554 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
555 r = &early_res[i];
556 if (start == r->start && end == r->end)
557 break;
558 }
559 if (i >= MAX_EARLY_RES || !early_res[i].end)
560 panic("free_early on not reserved area: %llx-%llx!",
561 start, end);
562
563 for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
564 ;
565
566 memmove(&early_res[i], &early_res[i + 1],
567 (j - 1 - i) * sizeof(struct early_res));
568
569 early_res[j - 1].end = 0;
570}
571
572void __init early_res_to_bootmem(u64 start, u64 end)
573{
574 int i;
575 u64 final_start, final_end;
576 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
577 struct early_res *r = &early_res[i];
578 final_start = max(start, r->start);
579 final_end = min(end, r->end);
580 if (final_start >= final_end)
581 continue;
582 printk(KERN_INFO " early res: %d [%llx-%llx] %s\n", i,
583 final_start, final_end - 1, r->name);
584#ifdef CONFIG_X86_64
585 reserve_bootmem_generic(final_start, final_end - final_start);
586#else
587 reserve_bootmem(final_start, final_end - final_start,
588 BOOTMEM_DEFAULT);
589#endif
590 }
591}
592
593/* Check for already reserved areas */
594static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
595{
596 int i;
597 u64 addr = *addrp, last;
598 int changed = 0;
599again:
600 last = addr + size;
601 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
602 struct early_res *r = &early_res[i];
603 if (last >= r->start && addr < r->end) {
604 *addrp = addr = round_up(r->end, align);
605 changed = 1;
606 goto again;
607 }
608 }
609 return changed;
610}
611
612/* Check for already reserved areas */
613static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
614{
615 int i;
616 u64 addr = *addrp, last;
617 u64 size = *sizep;
618 int changed = 0;
619again:
620 last = addr + size;
621 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
622 struct early_res *r = &early_res[i];
623 if (last > r->start && addr < r->start) {
624 size = r->start - addr;
625 changed = 1;
626 goto again;
627 }
628 if (last > r->end && addr < r->end) {
629 addr = round_up(r->end, align);
630 size = last - addr;
631 changed = 1;
632 goto again;
633 }
634 if (last <= r->end && addr >= r->start) {
635 (*sizep)++;
636 return 0;
637 }
638 }
639 if (changed) {
640 *addrp = addr;
641 *sizep = size;
642 }
643 return changed;
644}
645
646/*
647 * Find a free area with specified alignment in a specific range.
648 */
649u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
650{
651 int i;
652
653 for (i = 0; i < e820.nr_map; i++) {
654 struct e820entry *ei = &e820.map[i];
655 u64 addr, last;
656 u64 ei_last;
657
658 if (ei->type != E820_RAM)
659 continue;
660 addr = round_up(ei->addr, align);
661 ei_last = ei->addr + ei->size;
662 if (addr < start)
663 addr = round_up(start, align);
664 if (addr >= ei_last)
665 continue;
666 while (bad_addr(&addr, size, align) && addr+size <= ei_last)
667 ;
668 last = addr + size;
669 if (last > ei_last)
670 continue;
671 if (last > end)
672 continue;
673 return addr;
674 }
675 return -1ULL;
676}
677
678/*
679 * Find next free range after *start
680 */
681u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
682{
683 int i;
684
685 for (i = 0; i < e820.nr_map; i++) {
686 struct e820entry *ei = &e820.map[i];
687 u64 addr, last;
688 u64 ei_last;
689
690 if (ei->type != E820_RAM)
691 continue;
692 addr = round_up(ei->addr, align);
693 ei_last = ei->addr + ei->size;
694 if (addr < start)
695 addr = round_up(start, align);
696 if (addr >= ei_last)
697 continue;
698 *sizep = ei_last - addr;
699 while (bad_addr_size(&addr, sizep, align) &&
700 addr + *sizep <= ei_last)
701 ;
702 last = addr + *sizep;
703 if (last > ei_last)
704 continue;
705 return addr;
706 }
707 return -1UL;
708
709}
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
index 354fbb221709..07941b554519 100644
--- a/arch/x86/kernel/e820_64.c
+++ b/arch/x86/kernel/e820_64.c
@@ -47,202 +47,6 @@ unsigned long max_pfn_mapped;
47static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT; 47static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
48 48
49/* 49/*
50 * Early reserved memory areas.
51 */
52#define MAX_EARLY_RES 20
53
54struct early_res {
55 unsigned long start, end;
56 char name[16];
57};
58static struct early_res early_res[MAX_EARLY_RES] __initdata = {
59 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
60#ifdef CONFIG_X86_TRAMPOLINE
61 { TRAMPOLINE_BASE, TRAMPOLINE_BASE + 2 * PAGE_SIZE, "TRAMPOLINE" },
62#endif
63 {}
64};
65
66void __init reserve_early(unsigned long start, unsigned long end, char *name)
67{
68 int i;
69 struct early_res *r;
70 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
71 r = &early_res[i];
72 if (end > r->start && start < r->end)
73 panic("Overlapping early reservations %lx-%lx %s to %lx-%lx %s\n",
74 start, end - 1, name?name:"", r->start, r->end - 1, r->name);
75 }
76 if (i >= MAX_EARLY_RES)
77 panic("Too many early reservations");
78 r = &early_res[i];
79 r->start = start;
80 r->end = end;
81 if (name)
82 strncpy(r->name, name, sizeof(r->name) - 1);
83}
84
85void __init free_early(unsigned long start, unsigned long end)
86{
87 struct early_res *r;
88 int i, j;
89
90 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
91 r = &early_res[i];
92 if (start == r->start && end == r->end)
93 break;
94 }
95 if (i >= MAX_EARLY_RES || !early_res[i].end)
96 panic("free_early on not reserved area: %lx-%lx!", start, end);
97
98 for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
99 ;
100
101 memmove(&early_res[i], &early_res[i + 1],
102 (j - 1 - i) * sizeof(struct early_res));
103
104 early_res[j - 1].end = 0;
105}
106
107void __init early_res_to_bootmem(unsigned long start, unsigned long end)
108{
109 int i;
110 unsigned long final_start, final_end;
111 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
112 struct early_res *r = &early_res[i];
113 final_start = max(start, r->start);
114 final_end = min(end, r->end);
115 if (final_start >= final_end)
116 continue;
117 printk(KERN_INFO " early res: %d [%lx-%lx] %s\n", i,
118 final_start, final_end - 1, r->name);
119 reserve_bootmem_generic(final_start, final_end - final_start);
120 }
121}
122
123/* Check for already reserved areas */
124static inline int __init
125bad_addr(unsigned long *addrp, unsigned long size, unsigned long align)
126{
127 int i;
128 unsigned long addr = *addrp, last;
129 int changed = 0;
130again:
131 last = addr + size;
132 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
133 struct early_res *r = &early_res[i];
134 if (last >= r->start && addr < r->end) {
135 *addrp = addr = round_up(r->end, align);
136 changed = 1;
137 goto again;
138 }
139 }
140 return changed;
141}
142
143/* Check for already reserved areas */
144static inline int __init
145bad_addr_size(unsigned long *addrp, unsigned long *sizep, unsigned long align)
146{
147 int i;
148 unsigned long addr = *addrp, last;
149 unsigned long size = *sizep;
150 int changed = 0;
151again:
152 last = addr + size;
153 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
154 struct early_res *r = &early_res[i];
155 if (last > r->start && addr < r->start) {
156 size = r->start - addr;
157 changed = 1;
158 goto again;
159 }
160 if (last > r->end && addr < r->end) {
161 addr = round_up(r->end, align);
162 size = last - addr;
163 changed = 1;
164 goto again;
165 }
166 if (last <= r->end && addr >= r->start) {
167 (*sizep)++;
168 return 0;
169 }
170 }
171 if (changed) {
172 *addrp = addr;
173 *sizep = size;
174 }
175 return changed;
176}
177
178/*
179 * Find a free area with specified alignment in a specific range.
180 */
181unsigned long __init find_e820_area(unsigned long start, unsigned long end,
182 unsigned long size, unsigned long align)
183{
184 int i;
185
186 for (i = 0; i < e820.nr_map; i++) {
187 struct e820entry *ei = &e820.map[i];
188 unsigned long addr, last;
189 unsigned long ei_last;
190
191 if (ei->type != E820_RAM)
192 continue;
193 addr = round_up(ei->addr, align);
194 ei_last = ei->addr + ei->size;
195 if (addr < start)
196 addr = round_up(start, align);
197 if (addr >= ei_last)
198 continue;
199 while (bad_addr(&addr, size, align) && addr+size <= ei_last)
200 ;
201 last = addr + size;
202 if (last > ei_last)
203 continue;
204 if (last > end)
205 continue;
206 return addr;
207 }
208 return -1UL;
209}
210
211/*
212 * Find next free range after *start
213 */
214unsigned long __init find_e820_area_size(unsigned long start,
215 unsigned long *sizep,
216 unsigned long align)
217{
218 int i;
219
220 for (i = 0; i < e820.nr_map; i++) {
221 struct e820entry *ei = &e820.map[i];
222 unsigned long addr, last;
223 unsigned long ei_last;
224
225 if (ei->type != E820_RAM)
226 continue;
227 addr = round_up(ei->addr, align);
228 ei_last = ei->addr + ei->size;
229 if (addr < start)
230 addr = round_up(start, align);
231 if (addr >= ei_last)
232 continue;
233 *sizep = ei_last - addr;
234 while (bad_addr_size(&addr, sizep, align) &&
235 addr + *sizep <= ei_last)
236 ;
237 last = addr + *sizep;
238 if (last > ei_last)
239 continue;
240 return addr;
241 }
242 return -1UL;
243
244}
245/*
246 * Find the highest page frame number we have available 50 * Find the highest page frame number we have available
247 */ 51 */
248unsigned long __init e820_end_of_ram(void) 52unsigned long __init e820_end_of_ram(void)
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 3db059058927..c216d3c2a991 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -8,7 +8,83 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/start_kernel.h> 9#include <linux/start_kernel.h>
10 10
11#include <asm/setup.h>
12#include <asm/sections.h>
13#include <asm/e820.h>
14#include <asm/bios_ebda.h>
15
16#define BIOS_LOWMEM_KILOBYTES 0x413
17
18/*
19 * The BIOS places the EBDA/XBDA at the top of conventional
20 * memory, and usually decreases the reported amount of
21 * conventional memory (int 0x12) too. This also contains a
22 * workaround for Dell systems that neglect to reserve EBDA.
23 * The same workaround also avoids a problem with the AMD768MPX
24 * chipset: reserve a page before VGA to prevent PCI prefetch
25 * into it (errata #56). Usually the page is reserved anyways,
26 * unless you have no PS/2 mouse plugged in.
27 */
28static void __init reserve_ebda_region(void)
29{
30 unsigned int lowmem, ebda_addr;
31
32 /* To determine the position of the EBDA and the */
33 /* end of conventional memory, we need to look at */
34 /* the BIOS data area. In a paravirtual environment */
35 /* that area is absent. We'll just have to assume */
36 /* that the paravirt case can handle memory setup */
37 /* correctly, without our help. */
38 if (paravirt_enabled())
39 return;
40
41 /* end of low (conventional) memory */
42 lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
43 lowmem <<= 10;
44
45 /* start of EBDA area */
46 ebda_addr = get_bios_ebda();
47
48 /* Fixup: bios puts an EBDA in the top 64K segment */
49 /* of conventional memory, but does not adjust lowmem. */
50 if ((lowmem - ebda_addr) <= 0x10000)
51 lowmem = ebda_addr;
52
53 /* Fixup: bios does not report an EBDA at all. */
54 /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
55 if ((ebda_addr == 0) && (lowmem >= 0x9f000))
56 lowmem = 0x9f000;
57
58 /* Paranoia: should never happen, but... */
59 if ((lowmem == 0) || (lowmem >= 0x100000))
60 lowmem = 0x9f000;
61
62 /* reserve all memory between lowmem and the 1MB mark */
63 reserve_early(lowmem, 0x100000, "BIOS reserved");
64}
65
11void __init i386_start_kernel(void) 66void __init i386_start_kernel(void)
12{ 67{
68 reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS");
69
70#ifdef CONFIG_BLK_DEV_INITRD
71 /* Reserve INITRD */
72 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
73 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
74 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
75 u64 ramdisk_end = ramdisk_image + ramdisk_size;
76 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
77 }
78#endif
79 reserve_early(__pa_symbol(&_end), init_pg_tables_end, "INIT_PG_TABLE");
80
81 reserve_ebda_region();
82
83 /*
84 * At this point everything still needed from the boot loader
85 * or BIOS or kernel text should be early reserved or marked not
86 * RAM in e820. All other memory is free game.
87 */
88
13 start_kernel(); 89 start_kernel();
14} 90}
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 5faeab69edd9..fed482c62450 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -359,56 +359,6 @@ unsigned long __init find_max_low_pfn(void)
359 return max_low_pfn; 359 return max_low_pfn;
360} 360}
361 361
362#define BIOS_LOWMEM_KILOBYTES 0x413
363
364/*
365 * The BIOS places the EBDA/XBDA at the top of conventional
366 * memory, and usually decreases the reported amount of
367 * conventional memory (int 0x12) too. This also contains a
368 * workaround for Dell systems that neglect to reserve EBDA.
369 * The same workaround also avoids a problem with the AMD768MPX
370 * chipset: reserve a page before VGA to prevent PCI prefetch
371 * into it (errata #56). Usually the page is reserved anyways,
372 * unless you have no PS/2 mouse plugged in.
373 */
374static void __init reserve_ebda_region(void)
375{
376 unsigned int lowmem, ebda_addr;
377
378 /* To determine the position of the EBDA and the */
379 /* end of conventional memory, we need to look at */
380 /* the BIOS data area. In a paravirtual environment */
381 /* that area is absent. We'll just have to assume */
382 /* that the paravirt case can handle memory setup */
383 /* correctly, without our help. */
384 if (paravirt_enabled())
385 return;
386
387 /* end of low (conventional) memory */
388 lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
389 lowmem <<= 10;
390
391 /* start of EBDA area */
392 ebda_addr = get_bios_ebda();
393
394 /* Fixup: bios puts an EBDA in the top 64K segment */
395 /* of conventional memory, but does not adjust lowmem. */
396 if ((lowmem - ebda_addr) <= 0x10000)
397 lowmem = ebda_addr;
398
399 /* Fixup: bios does not report an EBDA at all. */
400 /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
401 if ((ebda_addr == 0) && (lowmem >= 0x9f000))
402 lowmem = 0x9f000;
403
404 /* Paranoia: should never happen, but... */
405 if ((lowmem == 0) || (lowmem >= 0x100000))
406 lowmem = 0x9f000;
407
408 /* reserve all memory between lowmem and the 1MB mark */
409 reserve_bootmem(lowmem, 0x100000 - lowmem, BOOTMEM_DEFAULT);
410}
411
412#ifndef CONFIG_NEED_MULTIPLE_NODES 362#ifndef CONFIG_NEED_MULTIPLE_NODES
413static void __init setup_bootmem_allocator(void); 363static void __init setup_bootmem_allocator(void);
414static unsigned long __init setup_memory(void) 364static unsigned long __init setup_memory(void)
@@ -522,25 +472,32 @@ static void __init reserve_initrd(void)
522 unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT; 472 unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT;
523 unsigned long ramdisk_here; 473 unsigned long ramdisk_here;
524 474
525 initrd_start = 0;
526
527 if (!boot_params.hdr.type_of_loader || 475 if (!boot_params.hdr.type_of_loader ||
528 !ramdisk_image || !ramdisk_size) 476 !ramdisk_image || !ramdisk_size)
529 return; /* No initrd provided by bootloader */ 477 return; /* No initrd provided by bootloader */
530 478
479 initrd_start = 0;
480
531 if (ramdisk_end < ramdisk_image) { 481 if (ramdisk_end < ramdisk_image) {
482 free_bootmem(ramdisk_image, ramdisk_size);
532 printk(KERN_ERR "initrd wraps around end of memory, " 483 printk(KERN_ERR "initrd wraps around end of memory, "
533 "disabling initrd\n"); 484 "disabling initrd\n");
534 return; 485 return;
535 } 486 }
536 if (ramdisk_size >= end_of_lowmem/2) { 487 if (ramdisk_size >= end_of_lowmem/2) {
488 free_bootmem(ramdisk_image, ramdisk_size);
537 printk(KERN_ERR "initrd too large to handle, " 489 printk(KERN_ERR "initrd too large to handle, "
538 "disabling initrd\n"); 490 "disabling initrd\n");
539 return; 491 return;
540 } 492 }
493
541 if (ramdisk_end <= end_of_lowmem) { 494 if (ramdisk_end <= end_of_lowmem) {
542 /* All in lowmem, easy case */ 495 /* All in lowmem, easy case */
543 reserve_bootmem(ramdisk_image, ramdisk_size, BOOTMEM_DEFAULT); 496 /*
497 * don't need to reserve again, already reserved early
498 * in i386_start_kernel, and early_res_to_bootmem
499 * convert that to reserved in bootmem
500 */
544 initrd_start = ramdisk_image + PAGE_OFFSET; 501 initrd_start = ramdisk_image + PAGE_OFFSET;
545 initrd_end = initrd_start+ramdisk_size; 502 initrd_end = initrd_start+ramdisk_size;
546 return; 503 return;
@@ -582,6 +539,8 @@ static void __init relocate_initrd(void)
582 p = (char *)__va(ramdisk_image); 539 p = (char *)__va(ramdisk_image);
583 memcpy(q, p, clen); 540 memcpy(q, p, clen);
584 q += clen; 541 q += clen;
542 /* need to free these low pages...*/
543 free_bootmem(ramdisk_image, clen);
585 ramdisk_image += clen; 544 ramdisk_image += clen;
586 ramdisk_size -= clen; 545 ramdisk_size -= clen;
587 } 546 }
@@ -600,47 +559,28 @@ static void __init relocate_initrd(void)
600 ramdisk_image += clen; 559 ramdisk_image += clen;
601 ramdisk_size -= clen; 560 ramdisk_size -= clen;
602 } 561 }
562 /* high pages is not converted by early_res_to_bootmem */
603} 563}
604 564
605#endif /* CONFIG_BLK_DEV_INITRD */ 565#endif /* CONFIG_BLK_DEV_INITRD */
606 566
607void __init setup_bootmem_allocator(void) 567void __init setup_bootmem_allocator(void)
608{ 568{
609 unsigned long bootmap_size; 569 unsigned long bootmap_size, bootmap;
610 /* 570 /*
611 * Initialize the boot-time allocator (with low memory only): 571 * Initialize the boot-time allocator (with low memory only):
612 */ 572 */
613 bootmap_size = init_bootmem(min_low_pfn, max_low_pfn); 573 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
614 574 bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
575 max_low_pfn<<PAGE_SHIFT, bootmap_size,
576 PAGE_SIZE);
577 if (bootmap == -1L)
578 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
579 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, max_low_pfn);
615 register_bootmem_low_pages(max_low_pfn); 580 register_bootmem_low_pages(max_low_pfn);
581 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
582 reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
616 583
617 /*
618 * Reserve the bootmem bitmap itself as well. We do this in two
619 * steps (first step was init_bootmem()) because this catches
620 * the (very unlikely) case of us accidentally initializing the
621 * bootmem allocator with an invalid RAM area.
622 */
623 reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
624 bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text),
625 BOOTMEM_DEFAULT);
626
627 /*
628 * reserve physical page 0 - it's a special BIOS page on many boxes,
629 * enabling clean reboots, SMP operation, laptop functions.
630 */
631 reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT);
632
633 /* reserve EBDA region */
634 reserve_ebda_region();
635
636#ifdef CONFIG_SMP
637 /*
638 * But first pinch a few for the stack/trampoline stuff
639 * FIXME: Don't need the extra page at 4K, but need to fix
640 * trampoline before removing it. (see the GDT stuff)
641 */
642 reserve_bootmem(PAGE_SIZE, PAGE_SIZE, BOOTMEM_DEFAULT);
643#endif
644#ifdef CONFIG_ACPI_SLEEP 584#ifdef CONFIG_ACPI_SLEEP
645 /* 585 /*
646 * Reserve low memory region for sleep support. 586 * Reserve low memory region for sleep support.
@@ -803,9 +743,6 @@ void __init setup_arch(char **cmdline_p)
803 * not to exceed the 8Mb limit. 743 * not to exceed the 8Mb limit.
804 */ 744 */
805 745
806#ifdef CONFIG_SMP
807 smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
808#endif
809 paging_init(); 746 paging_init();
810 747
811 /* 748 /*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 38988491c622..843722e2b79e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -555,23 +555,6 @@ cpumask_t cpu_coregroup_map(int cpu)
555 return c->llc_shared_map; 555 return c->llc_shared_map;
556} 556}
557 557
558#ifdef CONFIG_X86_32
559/*
560 * We are called very early to get the low memory for the
561 * SMP bootup trampoline page.
562 */
563void __init smp_alloc_memory(void)
564{
565 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
566 /*
567 * Has to be in very low memory so we can execute
568 * real-mode AP code.
569 */
570 if (__pa(trampoline_base) >= 0x9F000)
571 BUG();
572}
573#endif
574
575static void impress_friends(void) 558static void impress_friends(void)
576{ 559{
577 int cpu; 560 int cpu;
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index abbf199adebb..1106fac6024d 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -2,7 +2,7 @@
2 2
3#include <asm/trampoline.h> 3#include <asm/trampoline.h>
4 4
5/* ready for x86_64, no harm for x86, since it will overwrite after alloc */ 5/* ready for x86_64 and x86 */
6unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); 6unsigned char *trampoline_base = __va(TRAMPOLINE_BASE);
7 7
8/* 8/*
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 8acbf0cdf1a5..7bbebbfe8c4e 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -1137,15 +1137,6 @@ void flush_tlb_all(void)
1137 on_each_cpu(do_flush_tlb_all, 0, 1, 1); 1137 on_each_cpu(do_flush_tlb_all, 0, 1, 1);
1138} 1138}
1139 1139
1140/* used to set up the trampoline for other CPUs when the memory manager
1141 * is sorted out */
1142void __init smp_alloc_memory(void)
1143{
1144 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
1145 if (__pa(trampoline_base) >= 0x93000)
1146 BUG();
1147}
1148
1149/* send a reschedule CPI to one CPU by physical CPU number*/ 1140/* send a reschedule CPI to one CPU by physical CPU number*/
1150static void voyager_smp_send_reschedule(int cpu) 1141static void voyager_smp_send_reschedule(int cpu)
1151{ 1142{