aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-30 07:34:10 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:10 -0500
commit8550eb99821b3f78cddfd19964f30e8bc4e429e0 (patch)
tree0384ed8bb2aefead53201ab5499f3df15e21a10f /arch
parent10f22dde556d1ed41d55355d1fb8ad495f9810c8 (diff)
x86: arch/x86/mm/init_32.c cleanup
Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/init_32.c240
1 files changed, 122 insertions, 118 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8ed5c189d7aa..c6975fc6944a 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -39,6 +39,7 @@
39#include <asm/fixmap.h> 39#include <asm/fixmap.h>
40#include <asm/e820.h> 40#include <asm/e820.h>
41#include <asm/apic.h> 41#include <asm/apic.h>
42#include <asm/bugs.h>
42#include <asm/tlb.h> 43#include <asm/tlb.h>
43#include <asm/tlbflush.h> 44#include <asm/tlbflush.h>
44#include <asm/pgalloc.h> 45#include <asm/pgalloc.h>
@@ -50,7 +51,7 @@ unsigned int __VMALLOC_RESERVE = 128 << 20;
50DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 51DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
51unsigned long highstart_pfn, highend_pfn; 52unsigned long highstart_pfn, highend_pfn;
52 53
53static int noinline do_test_wp_bit(void); 54static noinline int do_test_wp_bit(void);
54 55
55/* 56/*
56 * Creates a middle page table and puts a pointer to it in the 57 * Creates a middle page table and puts a pointer to it in the
@@ -61,7 +62,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
61{ 62{
62 pud_t *pud; 63 pud_t *pud;
63 pmd_t *pmd_table; 64 pmd_t *pmd_table;
64 65
65#ifdef CONFIG_X86_PAE 66#ifdef CONFIG_X86_PAE
66 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { 67 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
67 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); 68 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
@@ -69,18 +70,18 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
69 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); 70 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
70 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 71 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
71 pud = pud_offset(pgd, 0); 72 pud = pud_offset(pgd, 0);
72 if (pmd_table != pmd_offset(pud, 0)) 73 BUG_ON(pmd_table != pmd_offset(pud, 0));
73 BUG();
74 } 74 }
75#endif 75#endif
76 pud = pud_offset(pgd, 0); 76 pud = pud_offset(pgd, 0);
77 pmd_table = pmd_offset(pud, 0); 77 pmd_table = pmd_offset(pud, 0);
78
78 return pmd_table; 79 return pmd_table;
79} 80}
80 81
81/* 82/*
82 * Create a page table and place a pointer to it in a middle page 83 * Create a page table and place a pointer to it in a middle page
83 * directory entry. 84 * directory entry:
84 */ 85 */
85static pte_t * __init one_page_table_init(pmd_t *pmd) 86static pte_t * __init one_page_table_init(pmd_t *pmd)
86{ 87{
@@ -90,9 +91,10 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
90#ifdef CONFIG_DEBUG_PAGEALLOC 91#ifdef CONFIG_DEBUG_PAGEALLOC
91 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); 92 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
92#endif 93#endif
93 if (!page_table) 94 if (!page_table) {
94 page_table = 95 page_table =
95 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); 96 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
97 }
96 98
97 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); 99 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
98 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); 100 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
@@ -103,22 +105,21 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
103} 105}
104 106
105/* 107/*
106 * This function initializes a certain range of kernel virtual memory 108 * This function initializes a certain range of kernel virtual memory
107 * with new bootmem page tables, everywhere page tables are missing in 109 * with new bootmem page tables, everywhere page tables are missing in
108 * the given range. 110 * the given range.
109 */ 111 *
110 112 * NOTE: The pagetables are allocated contiguous on the physical space
111/* 113 * so we can cache the place of the first one and move around without
112 * NOTE: The pagetables are allocated contiguous on the physical space
113 * so we can cache the place of the first one and move around without
114 * checking the pgd every time. 114 * checking the pgd every time.
115 */ 115 */
116static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base) 116static void __init
117page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
117{ 118{
118 pgd_t *pgd;
119 pmd_t *pmd;
120 int pgd_idx, pmd_idx; 119 int pgd_idx, pmd_idx;
121 unsigned long vaddr; 120 unsigned long vaddr;
121 pgd_t *pgd;
122 pmd_t *pmd;
122 123
123 vaddr = start; 124 vaddr = start;
124 pgd_idx = pgd_index(vaddr); 125 pgd_idx = pgd_index(vaddr);
@@ -128,7 +129,8 @@ static void __init page_table_range_init (unsigned long start, unsigned long end
128 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { 129 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
129 pmd = one_md_table_init(pgd); 130 pmd = one_md_table_init(pgd);
130 pmd = pmd + pmd_index(vaddr); 131 pmd = pmd + pmd_index(vaddr);
131 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { 132 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
133 pmd++, pmd_idx++) {
132 one_page_table_init(pmd); 134 one_page_table_init(pmd);
133 135
134 vaddr += PMD_SIZE; 136 vaddr += PMD_SIZE;
@@ -145,17 +147,17 @@ static inline int is_kernel_text(unsigned long addr)
145} 147}
146 148
147/* 149/*
148 * This maps the physical memory to kernel virtual address space, a total 150 * This maps the physical memory to kernel virtual address space, a total
149 * of max_low_pfn pages, by creating page tables starting from address 151 * of max_low_pfn pages, by creating page tables starting from address
150 * PAGE_OFFSET. 152 * PAGE_OFFSET:
151 */ 153 */
152static void __init kernel_physical_mapping_init(pgd_t *pgd_base) 154static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
153{ 155{
156 int pgd_idx, pmd_idx, pte_ofs;
154 unsigned long pfn; 157 unsigned long pfn;
155 pgd_t *pgd; 158 pgd_t *pgd;
156 pmd_t *pmd; 159 pmd_t *pmd;
157 pte_t *pte; 160 pte_t *pte;
158 int pgd_idx, pmd_idx, pte_ofs;
159 161
160 pgd_idx = pgd_index(PAGE_OFFSET); 162 pgd_idx = pgd_index(PAGE_OFFSET);
161 pgd = pgd_base + pgd_idx; 163 pgd = pgd_base + pgd_idx;
@@ -165,40 +167,43 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
165 pmd = one_md_table_init(pgd); 167 pmd = one_md_table_init(pgd);
166 if (pfn >= max_low_pfn) 168 if (pfn >= max_low_pfn)
167 continue; 169 continue;
170
168 for (pmd_idx = 0; 171 for (pmd_idx = 0;
169 pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; 172 pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
170 pmd++, pmd_idx++) { 173 pmd++, pmd_idx++) {
171 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET; 174 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
172 175
173 /* Map with big pages if possible, otherwise 176 /*
174 create normal page tables. */ 177 * Map with big pages if possible, otherwise
178 * create normal page tables:
179 */
175 if (cpu_has_pse) { 180 if (cpu_has_pse) {
176 unsigned int address2; 181 unsigned int addr2;
177 pgprot_t prot = PAGE_KERNEL_LARGE; 182 pgprot_t prot = PAGE_KERNEL_LARGE;
178 183
179 address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + 184 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
180 PAGE_OFFSET + PAGE_SIZE-1; 185 PAGE_OFFSET + PAGE_SIZE-1;
181 186
182 if (is_kernel_text(address) || 187 if (is_kernel_text(addr) ||
183 is_kernel_text(address2)) 188 is_kernel_text(addr2))
184 prot = PAGE_KERNEL_LARGE_EXEC; 189 prot = PAGE_KERNEL_LARGE_EXEC;
185 190
186 set_pmd(pmd, pfn_pmd(pfn, prot)); 191 set_pmd(pmd, pfn_pmd(pfn, prot));
187 192
188 pfn += PTRS_PER_PTE; 193 pfn += PTRS_PER_PTE;
189 } else { 194 continue;
190 pte = one_page_table_init(pmd); 195 }
196 pte = one_page_table_init(pmd);
191 197
192 for (pte_ofs = 0; 198 for (pte_ofs = 0;
193 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; 199 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
194 pte++, pfn++, pte_ofs++, address += PAGE_SIZE) { 200 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
195 pgprot_t prot = PAGE_KERNEL; 201 pgprot_t prot = PAGE_KERNEL;
196 202
197 if (is_kernel_text(address)) 203 if (is_kernel_text(addr))
198 prot = PAGE_KERNEL_EXEC; 204 prot = PAGE_KERNEL_EXEC;
199 205
200 set_pte(pte, pfn_pte(pfn, prot)); 206 set_pte(pte, pfn_pte(pfn, prot));
201 }
202 } 207 }
203 } 208 }
204 } 209 }
@@ -215,14 +220,19 @@ static inline int page_kills_ppro(unsigned long pagenr)
215pte_t *kmap_pte; 220pte_t *kmap_pte;
216pgprot_t kmap_prot; 221pgprot_t kmap_prot;
217 222
218#define kmap_get_fixmap_pte(vaddr) \ 223static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
219 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr)) 224{
225 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
226 vaddr), vaddr), vaddr);
227}
220 228
221static void __init kmap_init(void) 229static void __init kmap_init(void)
222{ 230{
223 unsigned long kmap_vstart; 231 unsigned long kmap_vstart;
224 232
225 /* cache the first kmap pte */ 233 /*
234 * Cache the first kmap pte:
235 */
226 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 236 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
227 kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 237 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
228 238
@@ -231,11 +241,11 @@ static void __init kmap_init(void)
231 241
232static void __init permanent_kmaps_init(pgd_t *pgd_base) 242static void __init permanent_kmaps_init(pgd_t *pgd_base)
233{ 243{
244 unsigned long vaddr;
234 pgd_t *pgd; 245 pgd_t *pgd;
235 pud_t *pud; 246 pud_t *pud;
236 pmd_t *pmd; 247 pmd_t *pmd;
237 pte_t *pte; 248 pte_t *pte;
238 unsigned long vaddr;
239 249
240 vaddr = PKMAP_BASE; 250 vaddr = PKMAP_BASE;
241 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 251 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
@@ -244,7 +254,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
244 pud = pud_offset(pgd, vaddr); 254 pud = pud_offset(pgd, vaddr);
245 pmd = pmd_offset(pud, vaddr); 255 pmd = pmd_offset(pud, vaddr);
246 pte = pte_offset_kernel(pmd, vaddr); 256 pte = pte_offset_kernel(pmd, vaddr);
247 pkmap_page_table = pte; 257 pkmap_page_table = pte;
248} 258}
249 259
250static void __meminit free_new_highpage(struct page *page) 260static void __meminit free_new_highpage(struct page *page)
@@ -263,7 +273,8 @@ void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
263 SetPageReserved(page); 273 SetPageReserved(page);
264} 274}
265 275
266static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn) 276static int __meminit
277add_one_highpage_hotplug(struct page *page, unsigned long pfn)
267{ 278{
268 free_new_highpage(page); 279 free_new_highpage(page);
269 totalram_pages++; 280 totalram_pages++;
@@ -271,6 +282,7 @@ static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long p
271 max_mapnr = max(pfn, max_mapnr); 282 max_mapnr = max(pfn, max_mapnr);
272#endif 283#endif
273 num_physpages++; 284 num_physpages++;
285
274 return 0; 286 return 0;
275} 287}
276 288
@@ -278,7 +290,7 @@ static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long p
278 * Not currently handling the NUMA case. 290 * Not currently handling the NUMA case.
279 * Assuming single node and all memory that 291 * Assuming single node and all memory that
280 * has been added dynamically that would be 292 * has been added dynamically that would be
281 * onlined here is in HIGHMEM 293 * onlined here is in HIGHMEM.
282 */ 294 */
283void __meminit online_page(struct page *page) 295void __meminit online_page(struct page *page)
284{ 296{
@@ -286,13 +298,11 @@ void __meminit online_page(struct page *page)
286 add_one_highpage_hotplug(page, page_to_pfn(page)); 298 add_one_highpage_hotplug(page, page_to_pfn(page));
287} 299}
288 300
289 301#ifndef CONFIG_NUMA
290#ifdef CONFIG_NUMA
291extern void set_highmem_pages_init(int);
292#else
293static void __init set_highmem_pages_init(int bad_ppro) 302static void __init set_highmem_pages_init(int bad_ppro)
294{ 303{
295 int pfn; 304 int pfn;
305
296 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) { 306 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
297 /* 307 /*
298 * Holes under sparsemem might not have no mem_map[]: 308 * Holes under sparsemem might not have no mem_map[]:
@@ -302,23 +312,18 @@ static void __init set_highmem_pages_init(int bad_ppro)
302 } 312 }
303 totalram_pages += totalhigh_pages; 313 totalram_pages += totalhigh_pages;
304} 314}
305#endif /* CONFIG_FLATMEM */ 315#endif /* !CONFIG_NUMA */
306 316
307#else 317#else
308#define kmap_init() do { } while (0) 318# define kmap_init() do { } while (0)
309#define permanent_kmaps_init(pgd_base) do { } while (0) 319# define permanent_kmaps_init(pgd_base) do { } while (0)
310#define set_highmem_pages_init(bad_ppro) do { } while (0) 320# define set_highmem_pages_init(bad_ppro) do { } while (0)
311#endif /* CONFIG_HIGHMEM */ 321#endif /* CONFIG_HIGHMEM */
312 322
313pteval_t __PAGE_KERNEL = _PAGE_KERNEL; 323pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
314EXPORT_SYMBOL(__PAGE_KERNEL); 324EXPORT_SYMBOL(__PAGE_KERNEL);
315pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
316 325
317#ifdef CONFIG_NUMA 326pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
318extern void __init remap_numa_kva(void);
319#else
320#define remap_numa_kva() do {} while (0)
321#endif
322 327
323void __init native_pagetable_setup_start(pgd_t *base) 328void __init native_pagetable_setup_start(pgd_t *base)
324{ 329{
@@ -382,10 +387,10 @@ void __init native_pagetable_setup_done(pgd_t *base)
382 * be partially populated, and so it avoids stomping on any existing 387 * be partially populated, and so it avoids stomping on any existing
383 * mappings. 388 * mappings.
384 */ 389 */
385static void __init pagetable_init (void) 390static void __init pagetable_init(void)
386{ 391{
387 unsigned long vaddr, end;
388 pgd_t *pgd_base = swapper_pg_dir; 392 pgd_t *pgd_base = swapper_pg_dir;
393 unsigned long vaddr, end;
389 394
390 paravirt_pagetable_setup_start(pgd_base); 395 paravirt_pagetable_setup_start(pgd_base);
391 396
@@ -424,7 +429,7 @@ static void __init pagetable_init (void)
424 * driver might have split up a kernel 4MB mapping. 429 * driver might have split up a kernel 4MB mapping.
425 */ 430 */
426char __nosavedata swsusp_pg_dir[PAGE_SIZE] 431char __nosavedata swsusp_pg_dir[PAGE_SIZE]
427 __attribute__ ((aligned (PAGE_SIZE))); 432 __attribute__ ((aligned(PAGE_SIZE)));
428 433
429static inline void save_pg_dir(void) 434static inline void save_pg_dir(void)
430{ 435{
@@ -436,7 +441,7 @@ static inline void save_pg_dir(void)
436} 441}
437#endif 442#endif
438 443
439void zap_low_mappings (void) 444void zap_low_mappings(void)
440{ 445{
441 int i; 446 int i;
442 447
@@ -448,23 +453,24 @@ void zap_low_mappings (void)
448 * Note that "pgd_clear()" doesn't do it for 453 * Note that "pgd_clear()" doesn't do it for
449 * us, because pgd_clear() is a no-op on i386. 454 * us, because pgd_clear() is a no-op on i386.
450 */ 455 */
451 for (i = 0; i < USER_PTRS_PER_PGD; i++) 456 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
452#ifdef CONFIG_X86_PAE 457#ifdef CONFIG_X86_PAE
453 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); 458 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
454#else 459#else
455 set_pgd(swapper_pg_dir+i, __pgd(0)); 460 set_pgd(swapper_pg_dir+i, __pgd(0));
456#endif 461#endif
462 }
457 flush_tlb_all(); 463 flush_tlb_all();
458} 464}
459 465
460int nx_enabled = 0; 466int nx_enabled;
461 467
462pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX; 468pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
463EXPORT_SYMBOL_GPL(__supported_pte_mask); 469EXPORT_SYMBOL_GPL(__supported_pte_mask);
464 470
465#ifdef CONFIG_X86_PAE 471#ifdef CONFIG_X86_PAE
466 472
467static int disable_nx __initdata = 0; 473static int disable_nx __initdata;
468 474
469/* 475/*
470 * noexec = on|off 476 * noexec = on|off
@@ -481,11 +487,14 @@ static int __init noexec_setup(char *str)
481 __supported_pte_mask |= _PAGE_NX; 487 __supported_pte_mask |= _PAGE_NX;
482 disable_nx = 0; 488 disable_nx = 0;
483 } 489 }
484 } else if (!strcmp(str,"off")) { 490 } else {
485 disable_nx = 1; 491 if (!strcmp(str, "off")) {
486 __supported_pte_mask &= ~_PAGE_NX; 492 disable_nx = 1;
487 } else 493 __supported_pte_mask &= ~_PAGE_NX;
488 return -EINVAL; 494 } else {
495 return -EINVAL;
496 }
497 }
489 498
490 return 0; 499 return 0;
491} 500}
@@ -497,6 +506,7 @@ static void __init set_nx(void)
497 506
498 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { 507 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
499 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); 508 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
509
500 if ((v[3] & (1 << 20)) && !disable_nx) { 510 if ((v[3] & (1 << 20)) && !disable_nx) {
501 rdmsr(MSR_EFER, l, h); 511 rdmsr(MSR_EFER, l, h);
502 l |= EFER_NX; 512 l |= EFER_NX;
@@ -506,7 +516,6 @@ static void __init set_nx(void)
506 } 516 }
507 } 517 }
508} 518}
509
510#endif 519#endif
511 520
512/* 521/*
@@ -523,7 +532,6 @@ void __init paging_init(void)
523 if (nx_enabled) 532 if (nx_enabled)
524 printk("NX (Execute Disable) protection: active\n"); 533 printk("NX (Execute Disable) protection: active\n");
525#endif 534#endif
526
527 pagetable_init(); 535 pagetable_init();
528 536
529 load_cr3(swapper_pg_dir); 537 load_cr3(swapper_pg_dir);
@@ -547,7 +555,6 @@ void __init paging_init(void)
547 * used to involve black magic jumps to work around some nasty CPU bugs, 555 * used to involve black magic jumps to work around some nasty CPU bugs,
548 * but fortunately the switch to using exceptions got rid of all that. 556 * but fortunately the switch to using exceptions got rid of all that.
549 */ 557 */
550
551static void __init test_wp_bit(void) 558static void __init test_wp_bit(void)
552{ 559{
553 printk("Checking if this processor honours the WP bit even in supervisor mode... "); 560 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
@@ -567,19 +574,16 @@ static void __init test_wp_bit(void)
567 } 574 }
568} 575}
569 576
570static struct kcore_list kcore_mem, kcore_vmalloc; 577static struct kcore_list kcore_mem, kcore_vmalloc;
571 578
572void __init mem_init(void) 579void __init mem_init(void)
573{ 580{
574 extern int ppro_with_ram_bug(void);
575 int codesize, reservedpages, datasize, initsize; 581 int codesize, reservedpages, datasize, initsize;
576 int tmp; 582 int tmp, bad_ppro;
577 int bad_ppro;
578 583
579#ifdef CONFIG_FLATMEM 584#ifdef CONFIG_FLATMEM
580 BUG_ON(!mem_map); 585 BUG_ON(!mem_map);
581#endif 586#endif
582
583 bad_ppro = ppro_with_ram_bug(); 587 bad_ppro = ppro_with_ram_bug();
584 588
585#ifdef CONFIG_HIGHMEM 589#ifdef CONFIG_HIGHMEM
@@ -591,14 +595,13 @@ void __init mem_init(void)
591 BUG(); 595 BUG();
592 } 596 }
593#endif 597#endif
594
595 /* this will put all low memory onto the freelists */ 598 /* this will put all low memory onto the freelists */
596 totalram_pages += free_all_bootmem(); 599 totalram_pages += free_all_bootmem();
597 600
598 reservedpages = 0; 601 reservedpages = 0;
599 for (tmp = 0; tmp < max_low_pfn; tmp++) 602 for (tmp = 0; tmp < max_low_pfn; tmp++)
600 /* 603 /*
601 * Only count reserved RAM pages 604 * Only count reserved RAM pages:
602 */ 605 */
603 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) 606 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
604 reservedpages++; 607 reservedpages++;
@@ -609,11 +612,12 @@ void __init mem_init(void)
609 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 612 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
610 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 613 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
611 614
612 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 615 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
613 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 616 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
614 VMALLOC_END-VMALLOC_START); 617 VMALLOC_END-VMALLOC_START);
615 618
616 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", 619 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
620 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
617 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 621 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
618 num_physpages << (PAGE_SHIFT-10), 622 num_physpages << (PAGE_SHIFT-10),
619 codesize >> 10, 623 codesize >> 10,
@@ -625,44 +629,45 @@ void __init mem_init(void)
625 629
626#if 1 /* double-sanity-check paranoia */ 630#if 1 /* double-sanity-check paranoia */
627 printk("virtual kernel memory layout:\n" 631 printk("virtual kernel memory layout:\n"
628 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 632 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
629#ifdef CONFIG_HIGHMEM 633#ifdef CONFIG_HIGHMEM
630 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 634 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
631#endif 635#endif
632 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 636 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
633 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 637 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
634 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" 638 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
635 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" 639 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
636 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", 640 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
637 FIXADDR_START, FIXADDR_TOP, 641 FIXADDR_START, FIXADDR_TOP,
638 (FIXADDR_TOP - FIXADDR_START) >> 10, 642 (FIXADDR_TOP - FIXADDR_START) >> 10,
639 643
640#ifdef CONFIG_HIGHMEM 644#ifdef CONFIG_HIGHMEM
641 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, 645 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
642 (LAST_PKMAP*PAGE_SIZE) >> 10, 646 (LAST_PKMAP*PAGE_SIZE) >> 10,
643#endif 647#endif
644 648
645 VMALLOC_START, VMALLOC_END, 649 VMALLOC_START, VMALLOC_END,
646 (VMALLOC_END - VMALLOC_START) >> 20, 650 (VMALLOC_END - VMALLOC_START) >> 20,
647 651
648 (unsigned long)__va(0), (unsigned long)high_memory, 652 (unsigned long)__va(0), (unsigned long)high_memory,
649 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, 653 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
650 654
651 (unsigned long)&__init_begin, (unsigned long)&__init_end, 655 (unsigned long)&__init_begin, (unsigned long)&__init_end,
652 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, 656 ((unsigned long)&__init_end -
657 (unsigned long)&__init_begin) >> 10,
653 658
654 (unsigned long)&_etext, (unsigned long)&_edata, 659 (unsigned long)&_etext, (unsigned long)&_edata,
655 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, 660 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
656 661
657 (unsigned long)&_text, (unsigned long)&_etext, 662 (unsigned long)&_text, (unsigned long)&_etext,
658 ((unsigned long)&_etext - (unsigned long)&_text) >> 10); 663 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
659 664
660#ifdef CONFIG_HIGHMEM 665#ifdef CONFIG_HIGHMEM
661 BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START); 666 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
662 BUG_ON(VMALLOC_END > PKMAP_BASE); 667 BUG_ON(VMALLOC_END > PKMAP_BASE);
663#endif 668#endif
664 BUG_ON(VMALLOC_START > VMALLOC_END); 669 BUG_ON(VMALLOC_START > VMALLOC_END);
665 BUG_ON((unsigned long)high_memory > VMALLOC_START); 670 BUG_ON((unsigned long)high_memory > VMALLOC_START);
666#endif /* double-sanity-check paranoia */ 671#endif /* double-sanity-check paranoia */
667 672
668#ifdef CONFIG_X86_PAE 673#ifdef CONFIG_X86_PAE
@@ -693,45 +698,45 @@ int arch_add_memory(int nid, u64 start, u64 size)
693 698
694 return __add_pages(zone, start_pfn, nr_pages); 699 return __add_pages(zone, start_pfn, nr_pages);
695} 700}
696
697#endif 701#endif
698 702
699struct kmem_cache *pmd_cache; 703struct kmem_cache *pmd_cache;
700 704
701void __init pgtable_cache_init(void) 705void __init pgtable_cache_init(void)
702{ 706{
703 if (PTRS_PER_PMD > 1) 707 if (PTRS_PER_PMD > 1) {
704 pmd_cache = kmem_cache_create("pmd", 708 pmd_cache = kmem_cache_create("pmd",
705 PTRS_PER_PMD*sizeof(pmd_t), 709 PTRS_PER_PMD*sizeof(pmd_t),
706 PTRS_PER_PMD*sizeof(pmd_t), 710 PTRS_PER_PMD*sizeof(pmd_t),
707 SLAB_PANIC, 711 SLAB_PANIC,
708 pmd_ctor); 712 pmd_ctor);
713 }
709} 714}
710 715
711/* 716/*
712 * This function cannot be __init, since exceptions don't work in that 717 * This function cannot be __init, since exceptions don't work in that
713 * section. Put this after the callers, so that it cannot be inlined. 718 * section. Put this after the callers, so that it cannot be inlined.
714 */ 719 */
715static int noinline do_test_wp_bit(void) 720static noinline int do_test_wp_bit(void)
716{ 721{
717 char tmp_reg; 722 char tmp_reg;
718 int flag; 723 int flag;
719 724
720 __asm__ __volatile__( 725 __asm__ __volatile__(
721 " movb %0,%1 \n" 726 " movb %0, %1 \n"
722 "1: movb %1,%0 \n" 727 "1: movb %1, %0 \n"
723 " xorl %2,%2 \n" 728 " xorl %2, %2 \n"
724 "2: \n" 729 "2: \n"
725 ".section __ex_table,\"a\"\n" 730 ".section __ex_table, \"a\"\n"
726 " .align 4 \n" 731 " .align 4 \n"
727 " .long 1b,2b \n" 732 " .long 1b, 2b \n"
728 ".previous \n" 733 ".previous \n"
729 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), 734 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
730 "=q" (tmp_reg), 735 "=q" (tmp_reg),
731 "=r" (flag) 736 "=r" (flag)
732 :"2" (1) 737 :"2" (1)
733 :"memory"); 738 :"memory");
734 739
735 return flag; 740 return flag;
736} 741}
737 742
@@ -824,4 +829,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
824 free_init_pages("initrd memory", start, end); 829 free_init_pages("initrd memory", start, end);
825} 830}
826#endif 831#endif
827