diff options
Diffstat (limited to 'arch/i386/mm/init.c')
-rw-r--r-- | arch/i386/mm/init.c | 696 |
1 files changed, 696 insertions, 0 deletions
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c new file mode 100644 index 000000000000..7a7ea3737265 --- /dev/null +++ b/arch/i386/mm/init.c | |||
@@ -0,0 +1,696 @@ | |||
1 | /* | ||
2 | * linux/arch/i386/mm/init.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * | ||
6 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/signal.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/hugetlb.h> | ||
21 | #include <linux/swap.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/highmem.h> | ||
25 | #include <linux/pagemap.h> | ||
26 | #include <linux/bootmem.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/proc_fs.h> | ||
29 | #include <linux/efi.h> | ||
30 | |||
31 | #include <asm/processor.h> | ||
32 | #include <asm/system.h> | ||
33 | #include <asm/uaccess.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/dma.h> | ||
36 | #include <asm/fixmap.h> | ||
37 | #include <asm/e820.h> | ||
38 | #include <asm/apic.h> | ||
39 | #include <asm/tlb.h> | ||
40 | #include <asm/tlbflush.h> | ||
41 | #include <asm/sections.h> | ||
42 | |||
43 | unsigned int __VMALLOC_RESERVE = 128 << 20; | ||
44 | |||
45 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
46 | unsigned long highstart_pfn, highend_pfn; | ||
47 | |||
48 | static int noinline do_test_wp_bit(void); | ||
49 | |||
50 | /* | ||
51 | * Creates a middle page table and puts a pointer to it in the | ||
52 | * given global directory entry. This only returns the gd entry | ||
53 | * in non-PAE compilation mode, since the middle layer is folded. | ||
54 | */ | ||
55 | static pmd_t * __init one_md_table_init(pgd_t *pgd) | ||
56 | { | ||
57 | pud_t *pud; | ||
58 | pmd_t *pmd_table; | ||
59 | |||
60 | #ifdef CONFIG_X86_PAE | ||
61 | pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); | ||
62 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | ||
63 | pud = pud_offset(pgd, 0); | ||
64 | if (pmd_table != pmd_offset(pud, 0)) | ||
65 | BUG(); | ||
66 | #else | ||
67 | pud = pud_offset(pgd, 0); | ||
68 | pmd_table = pmd_offset(pud, 0); | ||
69 | #endif | ||
70 | |||
71 | return pmd_table; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Create a page table and place a pointer to it in a middle page | ||
76 | * directory entry. | ||
77 | */ | ||
78 | static pte_t * __init one_page_table_init(pmd_t *pmd) | ||
79 | { | ||
80 | if (pmd_none(*pmd)) { | ||
81 | pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | ||
82 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); | ||
83 | if (page_table != pte_offset_kernel(pmd, 0)) | ||
84 | BUG(); | ||
85 | |||
86 | return page_table; | ||
87 | } | ||
88 | |||
89 | return pte_offset_kernel(pmd, 0); | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * This function initializes a certain range of kernel virtual memory | ||
94 | * with new bootmem page tables, everywhere page tables are missing in | ||
95 | * the given range. | ||
96 | */ | ||
97 | |||
98 | /* | ||
99 | * NOTE: The pagetables are allocated contiguous on the physical space | ||
100 | * so we can cache the place of the first one and move around without | ||
101 | * checking the pgd every time. | ||
102 | */ | ||
103 | static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base) | ||
104 | { | ||
105 | pgd_t *pgd; | ||
106 | pud_t *pud; | ||
107 | pmd_t *pmd; | ||
108 | int pgd_idx, pmd_idx; | ||
109 | unsigned long vaddr; | ||
110 | |||
111 | vaddr = start; | ||
112 | pgd_idx = pgd_index(vaddr); | ||
113 | pmd_idx = pmd_index(vaddr); | ||
114 | pgd = pgd_base + pgd_idx; | ||
115 | |||
116 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | ||
117 | if (pgd_none(*pgd)) | ||
118 | one_md_table_init(pgd); | ||
119 | pud = pud_offset(pgd, vaddr); | ||
120 | pmd = pmd_offset(pud, vaddr); | ||
121 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { | ||
122 | if (pmd_none(*pmd)) | ||
123 | one_page_table_init(pmd); | ||
124 | |||
125 | vaddr += PMD_SIZE; | ||
126 | } | ||
127 | pmd_idx = 0; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | static inline int is_kernel_text(unsigned long addr) | ||
132 | { | ||
133 | if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) | ||
134 | return 1; | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * This maps the physical memory to kernel virtual address space, a total | ||
140 | * of max_low_pfn pages, by creating page tables starting from address | ||
141 | * PAGE_OFFSET. | ||
142 | */ | ||
143 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | ||
144 | { | ||
145 | unsigned long pfn; | ||
146 | pgd_t *pgd; | ||
147 | pmd_t *pmd; | ||
148 | pte_t *pte; | ||
149 | int pgd_idx, pmd_idx, pte_ofs; | ||
150 | |||
151 | pgd_idx = pgd_index(PAGE_OFFSET); | ||
152 | pgd = pgd_base + pgd_idx; | ||
153 | pfn = 0; | ||
154 | |||
155 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { | ||
156 | pmd = one_md_table_init(pgd); | ||
157 | if (pfn >= max_low_pfn) | ||
158 | continue; | ||
159 | for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) { | ||
160 | unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET; | ||
161 | |||
162 | /* Map with big pages if possible, otherwise create normal page tables. */ | ||
163 | if (cpu_has_pse) { | ||
164 | unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; | ||
165 | |||
166 | if (is_kernel_text(address) || is_kernel_text(address2)) | ||
167 | set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); | ||
168 | else | ||
169 | set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE)); | ||
170 | pfn += PTRS_PER_PTE; | ||
171 | } else { | ||
172 | pte = one_page_table_init(pmd); | ||
173 | |||
174 | for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) { | ||
175 | if (is_kernel_text(address)) | ||
176 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | ||
177 | else | ||
178 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL)); | ||
179 | } | ||
180 | } | ||
181 | } | ||
182 | } | ||
183 | } | ||
184 | |||
185 | static inline int page_kills_ppro(unsigned long pagenr) | ||
186 | { | ||
187 | if (pagenr >= 0x70000 && pagenr <= 0x7003F) | ||
188 | return 1; | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | extern int is_available_memory(efi_memory_desc_t *); | ||
193 | |||
194 | static inline int page_is_ram(unsigned long pagenr) | ||
195 | { | ||
196 | int i; | ||
197 | unsigned long addr, end; | ||
198 | |||
199 | if (efi_enabled) { | ||
200 | efi_memory_desc_t *md; | ||
201 | |||
202 | for (i = 0; i < memmap.nr_map; i++) { | ||
203 | md = &memmap.map[i]; | ||
204 | if (!is_available_memory(md)) | ||
205 | continue; | ||
206 | addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT; | ||
207 | end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT; | ||
208 | |||
209 | if ((pagenr >= addr) && (pagenr < end)) | ||
210 | return 1; | ||
211 | } | ||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | for (i = 0; i < e820.nr_map; i++) { | ||
216 | |||
217 | if (e820.map[i].type != E820_RAM) /* not usable memory */ | ||
218 | continue; | ||
219 | /* | ||
220 | * !!!FIXME!!! Some BIOSen report areas as RAM that | ||
221 | * are not. Notably the 640->1Mb area. We need a sanity | ||
222 | * check here. | ||
223 | */ | ||
224 | addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT; | ||
225 | end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT; | ||
226 | if ((pagenr >= addr) && (pagenr < end)) | ||
227 | return 1; | ||
228 | } | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | #ifdef CONFIG_HIGHMEM | ||
233 | pte_t *kmap_pte; | ||
234 | pgprot_t kmap_prot; | ||
235 | |||
236 | #define kmap_get_fixmap_pte(vaddr) \ | ||
237 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr)) | ||
238 | |||
239 | static void __init kmap_init(void) | ||
240 | { | ||
241 | unsigned long kmap_vstart; | ||
242 | |||
243 | /* cache the first kmap pte */ | ||
244 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | ||
245 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | ||
246 | |||
247 | kmap_prot = PAGE_KERNEL; | ||
248 | } | ||
249 | |||
250 | static void __init permanent_kmaps_init(pgd_t *pgd_base) | ||
251 | { | ||
252 | pgd_t *pgd; | ||
253 | pud_t *pud; | ||
254 | pmd_t *pmd; | ||
255 | pte_t *pte; | ||
256 | unsigned long vaddr; | ||
257 | |||
258 | vaddr = PKMAP_BASE; | ||
259 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | ||
260 | |||
261 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
262 | pud = pud_offset(pgd, vaddr); | ||
263 | pmd = pmd_offset(pud, vaddr); | ||
264 | pte = pte_offset_kernel(pmd, vaddr); | ||
265 | pkmap_page_table = pte; | ||
266 | } | ||
267 | |||
268 | void __init one_highpage_init(struct page *page, int pfn, int bad_ppro) | ||
269 | { | ||
270 | if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { | ||
271 | ClearPageReserved(page); | ||
272 | set_bit(PG_highmem, &page->flags); | ||
273 | set_page_count(page, 1); | ||
274 | __free_page(page); | ||
275 | totalhigh_pages++; | ||
276 | } else | ||
277 | SetPageReserved(page); | ||
278 | } | ||
279 | |||
280 | #ifndef CONFIG_DISCONTIGMEM | ||
281 | static void __init set_highmem_pages_init(int bad_ppro) | ||
282 | { | ||
283 | int pfn; | ||
284 | for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) | ||
285 | one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); | ||
286 | totalram_pages += totalhigh_pages; | ||
287 | } | ||
288 | #else | ||
289 | extern void set_highmem_pages_init(int); | ||
290 | #endif /* !CONFIG_DISCONTIGMEM */ | ||
291 | |||
292 | #else | ||
293 | #define kmap_init() do { } while (0) | ||
294 | #define permanent_kmaps_init(pgd_base) do { } while (0) | ||
295 | #define set_highmem_pages_init(bad_ppro) do { } while (0) | ||
296 | #endif /* CONFIG_HIGHMEM */ | ||
297 | |||
298 | unsigned long long __PAGE_KERNEL = _PAGE_KERNEL; | ||
299 | unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; | ||
300 | |||
301 | #ifndef CONFIG_DISCONTIGMEM | ||
302 | #define remap_numa_kva() do {} while (0) | ||
303 | #else | ||
304 | extern void __init remap_numa_kva(void); | ||
305 | #endif | ||
306 | |||
307 | static void __init pagetable_init (void) | ||
308 | { | ||
309 | unsigned long vaddr; | ||
310 | pgd_t *pgd_base = swapper_pg_dir; | ||
311 | |||
312 | #ifdef CONFIG_X86_PAE | ||
313 | int i; | ||
314 | /* Init entries of the first-level page table to the zero page */ | ||
315 | for (i = 0; i < PTRS_PER_PGD; i++) | ||
316 | set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); | ||
317 | #endif | ||
318 | |||
319 | /* Enable PSE if available */ | ||
320 | if (cpu_has_pse) { | ||
321 | set_in_cr4(X86_CR4_PSE); | ||
322 | } | ||
323 | |||
324 | /* Enable PGE if available */ | ||
325 | if (cpu_has_pge) { | ||
326 | set_in_cr4(X86_CR4_PGE); | ||
327 | __PAGE_KERNEL |= _PAGE_GLOBAL; | ||
328 | __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL; | ||
329 | } | ||
330 | |||
331 | kernel_physical_mapping_init(pgd_base); | ||
332 | remap_numa_kva(); | ||
333 | |||
334 | /* | ||
335 | * Fixed mappings, only the page table structure has to be | ||
336 | * created - mappings will be set by set_fixmap(): | ||
337 | */ | ||
338 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | ||
339 | page_table_range_init(vaddr, 0, pgd_base); | ||
340 | |||
341 | permanent_kmaps_init(pgd_base); | ||
342 | |||
343 | #ifdef CONFIG_X86_PAE | ||
344 | /* | ||
345 | * Add low memory identity-mappings - SMP needs it when | ||
346 | * starting up on an AP from real-mode. In the non-PAE | ||
347 | * case we already have these mappings through head.S. | ||
348 | * All user-space mappings are explicitly cleared after | ||
349 | * SMP startup. | ||
350 | */ | ||
351 | pgd_base[0] = pgd_base[USER_PTRS_PER_PGD]; | ||
352 | #endif | ||
353 | } | ||
354 | |||
355 | #if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND) | ||
356 | /* | ||
357 | * Swap suspend & friends need this for resume because things like the intel-agp | ||
358 | * driver might have split up a kernel 4MB mapping. | ||
359 | */ | ||
360 | char __nosavedata swsusp_pg_dir[PAGE_SIZE] | ||
361 | __attribute__ ((aligned (PAGE_SIZE))); | ||
362 | |||
363 | static inline void save_pg_dir(void) | ||
364 | { | ||
365 | memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); | ||
366 | } | ||
367 | #else | ||
368 | static inline void save_pg_dir(void) | ||
369 | { | ||
370 | } | ||
371 | #endif | ||
372 | |||
373 | void zap_low_mappings (void) | ||
374 | { | ||
375 | int i; | ||
376 | |||
377 | save_pg_dir(); | ||
378 | |||
379 | /* | ||
380 | * Zap initial low-memory mappings. | ||
381 | * | ||
382 | * Note that "pgd_clear()" doesn't do it for | ||
383 | * us, because pgd_clear() is a no-op on i386. | ||
384 | */ | ||
385 | for (i = 0; i < USER_PTRS_PER_PGD; i++) | ||
386 | #ifdef CONFIG_X86_PAE | ||
387 | set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); | ||
388 | #else | ||
389 | set_pgd(swapper_pg_dir+i, __pgd(0)); | ||
390 | #endif | ||
391 | flush_tlb_all(); | ||
392 | } | ||
393 | |||
394 | static int disable_nx __initdata = 0; | ||
395 | u64 __supported_pte_mask = ~_PAGE_NX; | ||
396 | |||
397 | /* | ||
398 | * noexec = on|off | ||
399 | * | ||
400 | * Control non executable mappings. | ||
401 | * | ||
402 | * on Enable | ||
403 | * off Disable | ||
404 | */ | ||
405 | void __init noexec_setup(const char *str) | ||
406 | { | ||
407 | if (!strncmp(str, "on",2) && cpu_has_nx) { | ||
408 | __supported_pte_mask |= _PAGE_NX; | ||
409 | disable_nx = 0; | ||
410 | } else if (!strncmp(str,"off",3)) { | ||
411 | disable_nx = 1; | ||
412 | __supported_pte_mask &= ~_PAGE_NX; | ||
413 | } | ||
414 | } | ||
415 | |||
416 | int nx_enabled = 0; | ||
417 | #ifdef CONFIG_X86_PAE | ||
418 | |||
419 | static void __init set_nx(void) | ||
420 | { | ||
421 | unsigned int v[4], l, h; | ||
422 | |||
423 | if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { | ||
424 | cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); | ||
425 | if ((v[3] & (1 << 20)) && !disable_nx) { | ||
426 | rdmsr(MSR_EFER, l, h); | ||
427 | l |= EFER_NX; | ||
428 | wrmsr(MSR_EFER, l, h); | ||
429 | nx_enabled = 1; | ||
430 | __supported_pte_mask |= _PAGE_NX; | ||
431 | } | ||
432 | } | ||
433 | } | ||
434 | |||
435 | /* | ||
436 | * Enables/disables executability of a given kernel page and | ||
437 | * returns the previous setting. | ||
438 | */ | ||
439 | int __init set_kernel_exec(unsigned long vaddr, int enable) | ||
440 | { | ||
441 | pte_t *pte; | ||
442 | int ret = 1; | ||
443 | |||
444 | if (!nx_enabled) | ||
445 | goto out; | ||
446 | |||
447 | pte = lookup_address(vaddr); | ||
448 | BUG_ON(!pte); | ||
449 | |||
450 | if (!pte_exec_kernel(*pte)) | ||
451 | ret = 0; | ||
452 | |||
453 | if (enable) | ||
454 | pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); | ||
455 | else | ||
456 | pte->pte_high |= 1 << (_PAGE_BIT_NX - 32); | ||
457 | __flush_tlb_all(); | ||
458 | out: | ||
459 | return ret; | ||
460 | } | ||
461 | |||
462 | #endif | ||
463 | |||
464 | /* | ||
465 | * paging_init() sets up the page tables - note that the first 8MB are | ||
466 | * already mapped by head.S. | ||
467 | * | ||
468 | * This routines also unmaps the page at virtual kernel address 0, so | ||
469 | * that we can trap those pesky NULL-reference errors in the kernel. | ||
470 | */ | ||
471 | void __init paging_init(void) | ||
472 | { | ||
473 | #ifdef CONFIG_X86_PAE | ||
474 | set_nx(); | ||
475 | if (nx_enabled) | ||
476 | printk("NX (Execute Disable) protection: active\n"); | ||
477 | #endif | ||
478 | |||
479 | pagetable_init(); | ||
480 | |||
481 | load_cr3(swapper_pg_dir); | ||
482 | |||
483 | #ifdef CONFIG_X86_PAE | ||
484 | /* | ||
485 | * We will bail out later - printk doesn't work right now so | ||
486 | * the user would just see a hanging kernel. | ||
487 | */ | ||
488 | if (cpu_has_pae) | ||
489 | set_in_cr4(X86_CR4_PAE); | ||
490 | #endif | ||
491 | __flush_tlb_all(); | ||
492 | |||
493 | kmap_init(); | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's | ||
498 | * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This | ||
499 | * used to involve black magic jumps to work around some nasty CPU bugs, | ||
500 | * but fortunately the switch to using exceptions got rid of all that. | ||
501 | */ | ||
502 | |||
503 | static void __init test_wp_bit(void) | ||
504 | { | ||
505 | printk("Checking if this processor honours the WP bit even in supervisor mode... "); | ||
506 | |||
507 | /* Any page-aligned address will do, the test is non-destructive */ | ||
508 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); | ||
509 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); | ||
510 | clear_fixmap(FIX_WP_TEST); | ||
511 | |||
512 | if (!boot_cpu_data.wp_works_ok) { | ||
513 | printk("No.\n"); | ||
514 | #ifdef CONFIG_X86_WP_WORKS_OK | ||
515 | panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); | ||
516 | #endif | ||
517 | } else { | ||
518 | printk("Ok.\n"); | ||
519 | } | ||
520 | } | ||
521 | |||
522 | static void __init set_max_mapnr_init(void) | ||
523 | { | ||
524 | #ifdef CONFIG_HIGHMEM | ||
525 | num_physpages = highend_pfn; | ||
526 | #else | ||
527 | num_physpages = max_low_pfn; | ||
528 | #endif | ||
529 | #ifndef CONFIG_DISCONTIGMEM | ||
530 | max_mapnr = num_physpages; | ||
531 | #endif | ||
532 | } | ||
533 | |||
534 | static struct kcore_list kcore_mem, kcore_vmalloc; | ||
535 | |||
536 | void __init mem_init(void) | ||
537 | { | ||
538 | extern int ppro_with_ram_bug(void); | ||
539 | int codesize, reservedpages, datasize, initsize; | ||
540 | int tmp; | ||
541 | int bad_ppro; | ||
542 | |||
543 | #ifndef CONFIG_DISCONTIGMEM | ||
544 | if (!mem_map) | ||
545 | BUG(); | ||
546 | #endif | ||
547 | |||
548 | bad_ppro = ppro_with_ram_bug(); | ||
549 | |||
550 | #ifdef CONFIG_HIGHMEM | ||
551 | /* check that fixmap and pkmap do not overlap */ | ||
552 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { | ||
553 | printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n"); | ||
554 | printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", | ||
555 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START); | ||
556 | BUG(); | ||
557 | } | ||
558 | #endif | ||
559 | |||
560 | set_max_mapnr_init(); | ||
561 | |||
562 | #ifdef CONFIG_HIGHMEM | ||
563 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | ||
564 | #else | ||
565 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | ||
566 | #endif | ||
567 | |||
568 | /* this will put all low memory onto the freelists */ | ||
569 | totalram_pages += free_all_bootmem(); | ||
570 | |||
571 | reservedpages = 0; | ||
572 | for (tmp = 0; tmp < max_low_pfn; tmp++) | ||
573 | /* | ||
574 | * Only count reserved RAM pages | ||
575 | */ | ||
576 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | ||
577 | reservedpages++; | ||
578 | |||
579 | set_highmem_pages_init(bad_ppro); | ||
580 | |||
581 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
582 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
583 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
584 | |||
585 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | ||
586 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | ||
587 | VMALLOC_END-VMALLOC_START); | ||
588 | |||
589 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", | ||
590 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
591 | num_physpages << (PAGE_SHIFT-10), | ||
592 | codesize >> 10, | ||
593 | reservedpages << (PAGE_SHIFT-10), | ||
594 | datasize >> 10, | ||
595 | initsize >> 10, | ||
596 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | ||
597 | ); | ||
598 | |||
599 | #ifdef CONFIG_X86_PAE | ||
600 | if (!cpu_has_pae) | ||
601 | panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); | ||
602 | #endif | ||
603 | if (boot_cpu_data.wp_works_ok < 0) | ||
604 | test_wp_bit(); | ||
605 | |||
606 | /* | ||
607 | * Subtle. SMP is doing it's boot stuff late (because it has to | ||
608 | * fork idle threads) - but it also needs low mappings for the | ||
609 | * protected-mode entry to work. We zap these entries only after | ||
610 | * the WP-bit has been tested. | ||
611 | */ | ||
612 | #ifndef CONFIG_SMP | ||
613 | zap_low_mappings(); | ||
614 | #endif | ||
615 | } | ||
616 | |||
617 | kmem_cache_t *pgd_cache; | ||
618 | kmem_cache_t *pmd_cache; | ||
619 | |||
620 | void __init pgtable_cache_init(void) | ||
621 | { | ||
622 | if (PTRS_PER_PMD > 1) { | ||
623 | pmd_cache = kmem_cache_create("pmd", | ||
624 | PTRS_PER_PMD*sizeof(pmd_t), | ||
625 | PTRS_PER_PMD*sizeof(pmd_t), | ||
626 | 0, | ||
627 | pmd_ctor, | ||
628 | NULL); | ||
629 | if (!pmd_cache) | ||
630 | panic("pgtable_cache_init(): cannot create pmd cache"); | ||
631 | } | ||
632 | pgd_cache = kmem_cache_create("pgd", | ||
633 | PTRS_PER_PGD*sizeof(pgd_t), | ||
634 | PTRS_PER_PGD*sizeof(pgd_t), | ||
635 | 0, | ||
636 | pgd_ctor, | ||
637 | PTRS_PER_PMD == 1 ? pgd_dtor : NULL); | ||
638 | if (!pgd_cache) | ||
639 | panic("pgtable_cache_init(): Cannot create pgd cache"); | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | * This function cannot be __init, since exceptions don't work in that | ||
644 | * section. Put this after the callers, so that it cannot be inlined. | ||
645 | */ | ||
646 | static int noinline do_test_wp_bit(void) | ||
647 | { | ||
648 | char tmp_reg; | ||
649 | int flag; | ||
650 | |||
651 | __asm__ __volatile__( | ||
652 | " movb %0,%1 \n" | ||
653 | "1: movb %1,%0 \n" | ||
654 | " xorl %2,%2 \n" | ||
655 | "2: \n" | ||
656 | ".section __ex_table,\"a\"\n" | ||
657 | " .align 4 \n" | ||
658 | " .long 1b,2b \n" | ||
659 | ".previous \n" | ||
660 | :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), | ||
661 | "=q" (tmp_reg), | ||
662 | "=r" (flag) | ||
663 | :"2" (1) | ||
664 | :"memory"); | ||
665 | |||
666 | return flag; | ||
667 | } | ||
668 | |||
669 | void free_initmem(void) | ||
670 | { | ||
671 | unsigned long addr; | ||
672 | |||
673 | addr = (unsigned long)(&__init_begin); | ||
674 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | ||
675 | ClearPageReserved(virt_to_page(addr)); | ||
676 | set_page_count(virt_to_page(addr), 1); | ||
677 | memset((void *)addr, 0xcc, PAGE_SIZE); | ||
678 | free_page(addr); | ||
679 | totalram_pages++; | ||
680 | } | ||
681 | printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10); | ||
682 | } | ||
683 | |||
684 | #ifdef CONFIG_BLK_DEV_INITRD | ||
685 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
686 | { | ||
687 | if (start < end) | ||
688 | printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
689 | for (; start < end; start += PAGE_SIZE) { | ||
690 | ClearPageReserved(virt_to_page(start)); | ||
691 | set_page_count(virt_to_page(start), 1); | ||
692 | free_page(start); | ||
693 | totalram_pages++; | ||
694 | } | ||
695 | } | ||
696 | #endif | ||