aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/boot/boot.h1
-rw-r--r--arch/x86/boot/compressed/kaslr.c140
-rw-r--r--arch/x86/boot/string.c13
-rw-r--r--arch/x86/include/asm/e820.h2
-rw-r--r--arch/x86/include/asm/pgtable_32.h32
-rw-r--r--arch/x86/kernel/e820.c22
-rw-r--r--arch/x86/kernel/head32.c62
-rw-r--r--arch/x86/kernel/head_32.S121
9 files changed, 261 insertions, 136 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7b6fd68b4715..f8fbfc5a98ba 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1994,10 +1994,6 @@ config RANDOMIZE_BASE
1994 theoretically possible, but the implementations are further 1994 theoretically possible, but the implementations are further
1995 limited due to memory layouts. 1995 limited due to memory layouts.
1996 1996
1997 If CONFIG_HIBERNATE is also enabled, KASLR is disabled at boot
1998 time. To enable it, boot with "kaslr" on the kernel command
1999 line (which will also disable hibernation).
2000
2001 If unsure, say N. 1997 If unsure, say N.
2002 1998
2003# Relocation on x86 needs some additional build support 1999# Relocation on x86 needs some additional build support
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index e5612f3e3b57..9b42b6d1e902 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -333,6 +333,7 @@ size_t strnlen(const char *s, size_t maxlen);
333unsigned int atou(const char *s); 333unsigned int atou(const char *s);
334unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base); 334unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base);
335size_t strlen(const char *s); 335size_t strlen(const char *s);
336char *strchr(const char *s, int c);
336 337
337/* tty.c */ 338/* tty.c */
338void puts(const char *); 339void puts(const char *);
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index a66854d99ee1..8b7c9e75edcb 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -11,6 +11,7 @@
11 */ 11 */
12#include "misc.h" 12#include "misc.h"
13#include "error.h" 13#include "error.h"
14#include "../boot.h"
14 15
15#include <generated/compile.h> 16#include <generated/compile.h>
16#include <linux/module.h> 17#include <linux/module.h>
@@ -52,15 +53,22 @@ static unsigned long get_boot_seed(void)
52#include "../../lib/kaslr.c" 53#include "../../lib/kaslr.c"
53 54
54struct mem_vector { 55struct mem_vector {
55 unsigned long start; 56 unsigned long long start;
56 unsigned long size; 57 unsigned long long size;
57}; 58};
58 59
60/* Only supporting at most 4 unusable memmap regions with kaslr */
61#define MAX_MEMMAP_REGIONS 4
62
63static bool memmap_too_large;
64
59enum mem_avoid_index { 65enum mem_avoid_index {
60 MEM_AVOID_ZO_RANGE = 0, 66 MEM_AVOID_ZO_RANGE = 0,
61 MEM_AVOID_INITRD, 67 MEM_AVOID_INITRD,
62 MEM_AVOID_CMDLINE, 68 MEM_AVOID_CMDLINE,
63 MEM_AVOID_BOOTPARAMS, 69 MEM_AVOID_BOOTPARAMS,
70 MEM_AVOID_MEMMAP_BEGIN,
71 MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
64 MEM_AVOID_MAX, 72 MEM_AVOID_MAX,
65}; 73};
66 74
@@ -77,6 +85,123 @@ static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
77 return true; 85 return true;
78} 86}
79 87
88/**
89 * _memparse - Parse a string with mem suffixes into a number
90 * @ptr: Where parse begins
91 * @retptr: (output) Optional pointer to next char after parse completes
92 *
93 * Parses a string into a number. The number stored at @ptr is
94 * potentially suffixed with K, M, G, T, P, E.
95 */
96static unsigned long long _memparse(const char *ptr, char **retptr)
97{
98 char *endptr; /* Local pointer to end of parsed string */
99
100 unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
101
102 switch (*endptr) {
103 case 'E':
104 case 'e':
105 ret <<= 10;
106 case 'P':
107 case 'p':
108 ret <<= 10;
109 case 'T':
110 case 't':
111 ret <<= 10;
112 case 'G':
113 case 'g':
114 ret <<= 10;
115 case 'M':
116 case 'm':
117 ret <<= 10;
118 case 'K':
119 case 'k':
120 ret <<= 10;
121 endptr++;
122 default:
123 break;
124 }
125
126 if (retptr)
127 *retptr = endptr;
128
129 return ret;
130}
131
132static int
133parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
134{
135 char *oldp;
136
137 if (!p)
138 return -EINVAL;
139
140 /* We don't care about this option here */
141 if (!strncmp(p, "exactmap", 8))
142 return -EINVAL;
143
144 oldp = p;
145 *size = _memparse(p, &p);
146 if (p == oldp)
147 return -EINVAL;
148
149 switch (*p) {
150 case '@':
151 /* Skip this region, usable */
152 *start = 0;
153 *size = 0;
154 return 0;
155 case '#':
156 case '$':
157 case '!':
158 *start = _memparse(p + 1, &p);
159 return 0;
160 }
161
162 return -EINVAL;
163}
164
165static void mem_avoid_memmap(void)
166{
167 char arg[128];
168 int rc;
169 int i;
170 char *str;
171
172 /* See if we have any memmap areas */
173 rc = cmdline_find_option("memmap", arg, sizeof(arg));
174 if (rc <= 0)
175 return;
176
177 i = 0;
178 str = arg;
179 while (str && (i < MAX_MEMMAP_REGIONS)) {
180 int rc;
181 unsigned long long start, size;
182 char *k = strchr(str, ',');
183
184 if (k)
185 *k++ = 0;
186
187 rc = parse_memmap(str, &start, &size);
188 if (rc < 0)
189 break;
190 str = k;
191 /* A usable region that should not be skipped */
192 if (size == 0)
193 continue;
194
195 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
196 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
197 i++;
198 }
199
200 /* More than 4 memmaps, fail kaslr */
201 if ((i >= MAX_MEMMAP_REGIONS) && str)
202 memmap_too_large = true;
203}
204
80/* 205/*
81 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T). 206 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
82 * The mem_avoid array is used to store the ranges that need to be avoided 207 * The mem_avoid array is used to store the ranges that need to be avoided
@@ -197,6 +322,9 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
197 322
198 /* We don't need to set a mapping for setup_data. */ 323 /* We don't need to set a mapping for setup_data. */
199 324
325 /* Mark the memmap regions we need to avoid */
326 mem_avoid_memmap();
327
200#ifdef CONFIG_X86_VERBOSE_BOOTUP 328#ifdef CONFIG_X86_VERBOSE_BOOTUP
201 /* Make sure video RAM can be used. */ 329 /* Make sure video RAM can be used. */
202 add_identity_map(0, PMD_SIZE); 330 add_identity_map(0, PMD_SIZE);
@@ -379,6 +507,12 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
379 int i; 507 int i;
380 unsigned long addr; 508 unsigned long addr;
381 509
510 /* Check if we had too many memmaps. */
511 if (memmap_too_large) {
512 debug_putstr("Aborted e820 scan (more than 4 memmap= args)!\n");
513 return 0;
514 }
515
382 /* Make sure minimum is aligned. */ 516 /* Make sure minimum is aligned. */
383 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); 517 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
384 518
@@ -456,7 +590,7 @@ void choose_random_location(unsigned long input,
456 /* Walk e820 and find a random address. */ 590 /* Walk e820 and find a random address. */
457 random_addr = find_random_phys_addr(min_addr, output_size); 591 random_addr = find_random_phys_addr(min_addr, output_size);
458 if (!random_addr) { 592 if (!random_addr) {
459 warn("KASLR disabled: could not find suitable E820 region!"); 593 warn("Physical KASLR disabled: no suitable memory region!");
460 } else { 594 } else {
461 /* Update the new physical address location. */ 595 /* Update the new physical address location. */
462 if (*output != random_addr) { 596 if (*output != random_addr) {
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 9e240fcba784..5457b02fc050 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -156,3 +156,16 @@ char *strstr(const char *s1, const char *s2)
156 } 156 }
157 return NULL; 157 return NULL;
158} 158}
159
160/**
161 * strchr - Find the first occurrence of the character c in the string s.
162 * @s: the string to be searched
163 * @c: the character to search for
164 */
165char *strchr(const char *s, int c)
166{
167 while (*s != (char)c)
168 if (*s++ == '\0')
169 return NULL;
170 return (char *)s;
171}
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index ec23d8e1297c..67313f3a9874 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -30,8 +30,6 @@ extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
30 int checktype); 30 int checktype);
31extern void update_e820(void); 31extern void update_e820(void);
32extern void e820_setup_gap(void); 32extern void e820_setup_gap(void);
33extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
34 unsigned long start_addr, unsigned long long end_addr);
35struct setup_data; 33struct setup_data;
36extern void parse_e820_ext(u64 phys_addr, u32 data_len); 34extern void parse_e820_ext(u64 phys_addr, u32 data_len);
37 35
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index b6c0b404898a..fbc73360aea0 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -27,6 +27,7 @@ struct vm_area_struct;
27 27
28extern pgd_t swapper_pg_dir[1024]; 28extern pgd_t swapper_pg_dir[1024];
29extern pgd_t initial_page_table[1024]; 29extern pgd_t initial_page_table[1024];
30extern pmd_t initial_pg_pmd[];
30 31
31static inline void pgtable_cache_init(void) { } 32static inline void pgtable_cache_init(void) { }
32static inline void check_pgt_cache(void) { } 33static inline void check_pgt_cache(void) { }
@@ -75,4 +76,35 @@ do { \
75#define kern_addr_valid(kaddr) (0) 76#define kern_addr_valid(kaddr) (0)
76#endif 77#endif
77 78
79/*
80 * This is how much memory in addition to the memory covered up to
81 * and including _end we need mapped initially.
82 * We need:
83 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
84 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
85 *
86 * Modulo rounding, each megabyte assigned here requires a kilobyte of
87 * memory, which is currently unreclaimed.
88 *
89 * This should be a multiple of a page.
90 *
91 * KERNEL_IMAGE_SIZE should be greater than pa(_end)
92 * and small than max_low_pfn, otherwise will waste some page table entries
93 */
94#if PTRS_PER_PMD > 1
95#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
96#else
97#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
98#endif
99
100/*
101 * Number of possible pages in the lowmem region.
102 *
103 * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
104 * gas warning about overflowing shift count when gas has been compiled
105 * with only a host target support using a 32-bit type for internal
106 * representation.
107 */
108#define LOWMEM_PAGES ((((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
109
78#endif /* _ASM_X86_PGTABLE_32_H */ 110#endif /* _ASM_X86_PGTABLE_32_H */
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 90e8dde3ec26..b2bbad6ebe4d 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -580,24 +580,19 @@ static void __init update_e820_saved(void)
580} 580}
581#define MAX_GAP_END 0x100000000ull 581#define MAX_GAP_END 0x100000000ull
582/* 582/*
583 * Search for a gap in the e820 memory space from start_addr to end_addr. 583 * Search for a gap in the e820 memory space from 0 to MAX_GAP_END.
584 */ 584 */
585__init int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, 585static int __init e820_search_gap(unsigned long *gapstart,
586 unsigned long start_addr, unsigned long long end_addr) 586 unsigned long *gapsize)
587{ 587{
588 unsigned long long last; 588 unsigned long long last = MAX_GAP_END;
589 int i = e820->nr_map; 589 int i = e820->nr_map;
590 int found = 0; 590 int found = 0;
591 591
592 last = (end_addr && end_addr < MAX_GAP_END) ? end_addr : MAX_GAP_END;
593
594 while (--i >= 0) { 592 while (--i >= 0) {
595 unsigned long long start = e820->map[i].addr; 593 unsigned long long start = e820->map[i].addr;
596 unsigned long long end = start + e820->map[i].size; 594 unsigned long long end = start + e820->map[i].size;
597 595
598 if (end < start_addr)
599 continue;
600
601 /* 596 /*
602 * Since "last" is at most 4GB, we know we'll 597 * Since "last" is at most 4GB, we know we'll
603 * fit in 32 bits if this condition is true 598 * fit in 32 bits if this condition is true
@@ -628,18 +623,19 @@ __init void e820_setup_gap(void)
628 unsigned long gapstart, gapsize; 623 unsigned long gapstart, gapsize;
629 int found; 624 int found;
630 625
631 gapstart = 0x10000000;
632 gapsize = 0x400000; 626 gapsize = 0x400000;
633 found = e820_search_gap(&gapstart, &gapsize, 0, MAX_GAP_END); 627 found = e820_search_gap(&gapstart, &gapsize);
634 628
635#ifdef CONFIG_X86_64
636 if (!found) { 629 if (!found) {
630#ifdef CONFIG_X86_64
637 gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024; 631 gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
638 printk(KERN_ERR 632 printk(KERN_ERR
639 "e820: cannot find a gap in the 32bit address range\n" 633 "e820: cannot find a gap in the 32bit address range\n"
640 "e820: PCI devices with unassigned 32bit BARs may break!\n"); 634 "e820: PCI devices with unassigned 32bit BARs may break!\n");
641 } 635#else
636 gapstart = 0x10000000;
642#endif 637#endif
638 }
643 639
644 /* 640 /*
645 * e820_reserve_resources_late protect stolen RAM already 641 * e820_reserve_resources_late protect stolen RAM already
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index f16c55bfc090..e5fb436a6548 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -49,3 +49,65 @@ asmlinkage __visible void __init i386_start_kernel(void)
49 49
50 start_kernel(); 50 start_kernel();
51} 51}
52
53/*
54 * Initialize page tables. This creates a PDE and a set of page
55 * tables, which are located immediately beyond __brk_base. The variable
56 * _brk_end is set up to point to the first "safe" location.
57 * Mappings are created both at virtual address 0 (identity mapping)
58 * and PAGE_OFFSET for up to _end.
59 *
60 * In PAE mode initial_page_table is statically defined to contain
61 * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
62 * entries). The identity mapping is handled by pointing two PGD entries
63 * to the first kernel PMD. Note the upper half of each PMD or PTE are
64 * always zero at this stage.
65 */
66void __init mk_early_pgtbl_32(void)
67{
68#ifdef __pa
69#undef __pa
70#endif
71#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
72 pte_t pte, *ptep;
73 int i;
74 unsigned long *ptr;
75 /* Enough space to fit pagetables for the low memory linear map */
76 const unsigned long limit = __pa(_end) +
77 (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
78#ifdef CONFIG_X86_PAE
79 pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd);
80#define SET_PL2(pl2, val) { (pl2).pmd = (val); }
81#else
82 pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table);
83#define SET_PL2(pl2, val) { (pl2).pgd = (val); }
84#endif
85
86 ptep = (pte_t *)__pa(__brk_base);
87 pte.pte = PTE_IDENT_ATTR;
88
89 while ((pte.pte & PTE_PFN_MASK) < limit) {
90
91 SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR);
92 *pl2p = pl2;
93#ifndef CONFIG_X86_PAE
94 /* Kernel PDE entry */
95 *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
96#endif
97 for (i = 0; i < PTRS_PER_PTE; i++) {
98 *ptep = pte;
99 pte.pte += PAGE_SIZE;
100 ptep++;
101 }
102
103 pl2p++;
104 }
105
106 ptr = (unsigned long *)__pa(&max_pfn_mapped);
107 /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */
108 *ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
109
110 ptr = (unsigned long *)__pa(&_brk_end);
111 *ptr = (unsigned long)ptep + PAGE_OFFSET;
112}
113
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 4e8577d03372..1f85ee8f9439 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -24,6 +24,7 @@
24#include <asm/nops.h> 24#include <asm/nops.h>
25#include <asm/bootparam.h> 25#include <asm/bootparam.h>
26#include <asm/export.h> 26#include <asm/export.h>
27#include <asm/pgtable_32.h>
27 28
28/* Physical address */ 29/* Physical address */
29#define pa(X) ((X) - __PAGE_OFFSET) 30#define pa(X) ((X) - __PAGE_OFFSET)
@@ -41,44 +42,10 @@
41#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 42#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
42#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 43#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
43 44
44/*
45 * This is how much memory in addition to the memory covered up to
46 * and including _end we need mapped initially.
47 * We need:
48 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
49 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
50 *
51 * Modulo rounding, each megabyte assigned here requires a kilobyte of
52 * memory, which is currently unreclaimed.
53 *
54 * This should be a multiple of a page.
55 *
56 * KERNEL_IMAGE_SIZE should be greater than pa(_end)
57 * and small than max_low_pfn, otherwise will waste some page table entries
58 */
59
60#if PTRS_PER_PMD > 1
61#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
62#else
63#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
64#endif
65 45
66#define SIZEOF_PTREGS 17*4 46#define SIZEOF_PTREGS 17*4
67 47
68/* 48/*
69 * Number of possible pages in the lowmem region.
70 *
71 * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
72 * gas warning about overflowing shift count when gas has been compiled
73 * with only a host target support using a 32-bit type for internal
74 * representation.
75 */
76LOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT)
77
78/* Enough space to fit pagetables for the low memory linear map */
79MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
80
81/*
82 * Worst-case size of the kernel mapping we need to make: 49 * Worst-case size of the kernel mapping we need to make:
83 * a relocatable kernel can live anywhere in lowmem, so we need to be able 50 * a relocatable kernel can live anywhere in lowmem, so we need to be able
84 * to map all of lowmem. 51 * to map all of lowmem.
@@ -160,90 +127,15 @@ ENTRY(startup_32)
160 call load_ucode_bsp 127 call load_ucode_bsp
161#endif 128#endif
162 129
163/* 130 /* Create early pagetables. */
164 * Initialize page tables. This creates a PDE and a set of page 131 call mk_early_pgtbl_32
165 * tables, which are located immediately beyond __brk_base. The variable
166 * _brk_end is set up to point to the first "safe" location.
167 * Mappings are created both at virtual address 0 (identity mapping)
168 * and PAGE_OFFSET for up to _end.
169 */
170#ifdef CONFIG_X86_PAE
171
172 /*
173 * In PAE mode initial_page_table is statically defined to contain
174 * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
175 * entries). The identity mapping is handled by pointing two PGD entries
176 * to the first kernel PMD.
177 *
178 * Note the upper half of each PMD or PTE are always zero at this stage.
179 */
180
181#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
182
183 xorl %ebx,%ebx /* %ebx is kept at zero */
184
185 movl $pa(__brk_base), %edi
186 movl $pa(initial_pg_pmd), %edx
187 movl $PTE_IDENT_ATTR, %eax
18810:
189 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */
190 movl %ecx,(%edx) /* Store PMD entry */
191 /* Upper half already zero */
192 addl $8,%edx
193 movl $512,%ecx
19411:
195 stosl
196 xchgl %eax,%ebx
197 stosl
198 xchgl %eax,%ebx
199 addl $0x1000,%eax
200 loop 11b
201
202 /*
203 * End condition: we must map up to the end + MAPPING_BEYOND_END.
204 */
205 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
206 cmpl %ebp,%eax
207 jb 10b
2081:
209 addl $__PAGE_OFFSET, %edi
210 movl %edi, pa(_brk_end)
211 shrl $12, %eax
212 movl %eax, pa(max_pfn_mapped)
213 132
214 /* Do early initialization of the fixmap area */ 133 /* Do early initialization of the fixmap area */
215 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 134 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
135#ifdef CONFIG_X86_PAE
136#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
216 movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) 137 movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
217#else /* Not PAE */ 138#else
218
219page_pde_offset = (__PAGE_OFFSET >> 20);
220
221 movl $pa(__brk_base), %edi
222 movl $pa(initial_page_table), %edx
223 movl $PTE_IDENT_ATTR, %eax
22410:
225 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */
226 movl %ecx,(%edx) /* Store identity PDE entry */
227 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
228 addl $4,%edx
229 movl $1024, %ecx
23011:
231 stosl
232 addl $0x1000,%eax
233 loop 11b
234 /*
235 * End condition: we must map up to the end + MAPPING_BEYOND_END.
236 */
237 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
238 cmpl %ebp,%eax
239 jb 10b
240 addl $__PAGE_OFFSET, %edi
241 movl %edi, pa(_brk_end)
242 shrl $12, %eax
243 movl %eax, pa(max_pfn_mapped)
244
245 /* Do early initialization of the fixmap area */
246 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
247 movl %eax,pa(initial_page_table+0xffc) 139 movl %eax,pa(initial_page_table+0xffc)
248#endif 140#endif
249 141
@@ -666,6 +558,7 @@ ENTRY(setup_once_ref)
666__PAGE_ALIGNED_BSS 558__PAGE_ALIGNED_BSS
667 .align PAGE_SIZE 559 .align PAGE_SIZE
668#ifdef CONFIG_X86_PAE 560#ifdef CONFIG_X86_PAE
561.globl initial_pg_pmd
669initial_pg_pmd: 562initial_pg_pmd:
670 .fill 1024*KPMDS,4,0 563 .fill 1024*KPMDS,4,0
671#else 564#else