diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/arm/mm/mmu.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 272 |
1 files changed, 118 insertions, 154 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e8ed9dc461fe..594d677b92c8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/mman.h> | 14 | #include <linux/mman.h> |
15 | #include <linux/nodemask.h> | 15 | #include <linux/nodemask.h> |
16 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/sort.h> | ||
18 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
19 | 18 | ||
20 | #include <asm/cputype.h> | 19 | #include <asm/cputype.h> |
@@ -25,14 +24,13 @@ | |||
25 | #include <asm/smp_plat.h> | 24 | #include <asm/smp_plat.h> |
26 | #include <asm/tlb.h> | 25 | #include <asm/tlb.h> |
27 | #include <asm/highmem.h> | 26 | #include <asm/highmem.h> |
27 | #include <asm/traps.h> | ||
28 | 28 | ||
29 | #include <asm/mach/arch.h> | 29 | #include <asm/mach/arch.h> |
30 | #include <asm/mach/map.h> | 30 | #include <asm/mach/map.h> |
31 | 31 | ||
32 | #include "mm.h" | 32 | #include "mm.h" |
33 | 33 | ||
34 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
35 | |||
36 | /* | 34 | /* |
37 | * empty_zero_page is a special page that is used for | 35 | * empty_zero_page is a special page that is used for |
38 | * zero-initialized data and COW. | 36 | * zero-initialized data and COW. |
@@ -63,7 +61,7 @@ struct cachepolicy { | |||
63 | const char policy[16]; | 61 | const char policy[16]; |
64 | unsigned int cr_mask; | 62 | unsigned int cr_mask; |
65 | unsigned int pmd; | 63 | unsigned int pmd; |
66 | unsigned int pte; | 64 | pteval_t pte; |
67 | }; | 65 | }; |
68 | 66 | ||
69 | static struct cachepolicy cache_policies[] __initdata = { | 67 | static struct cachepolicy cache_policies[] __initdata = { |
@@ -191,7 +189,7 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
191 | } | 189 | } |
192 | #endif | 190 | #endif |
193 | 191 | ||
194 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE | 192 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN |
195 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE | 193 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE |
196 | 194 | ||
197 | static struct mem_type mem_types[] = { | 195 | static struct mem_type mem_types[] = { |
@@ -236,19 +234,18 @@ static struct mem_type mem_types[] = { | |||
236 | }, | 234 | }, |
237 | [MT_LOW_VECTORS] = { | 235 | [MT_LOW_VECTORS] = { |
238 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 236 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
239 | L_PTE_EXEC, | 237 | L_PTE_RDONLY, |
240 | .prot_l1 = PMD_TYPE_TABLE, | 238 | .prot_l1 = PMD_TYPE_TABLE, |
241 | .domain = DOMAIN_USER, | 239 | .domain = DOMAIN_USER, |
242 | }, | 240 | }, |
243 | [MT_HIGH_VECTORS] = { | 241 | [MT_HIGH_VECTORS] = { |
244 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 242 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
245 | L_PTE_USER | L_PTE_EXEC, | 243 | L_PTE_USER | L_PTE_RDONLY, |
246 | .prot_l1 = PMD_TYPE_TABLE, | 244 | .prot_l1 = PMD_TYPE_TABLE, |
247 | .domain = DOMAIN_USER, | 245 | .domain = DOMAIN_USER, |
248 | }, | 246 | }, |
249 | [MT_MEMORY] = { | 247 | [MT_MEMORY] = { |
250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 248 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, |
251 | L_PTE_WRITE | L_PTE_EXEC, | ||
252 | .prot_l1 = PMD_TYPE_TABLE, | 249 | .prot_l1 = PMD_TYPE_TABLE, |
253 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 250 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
254 | .domain = DOMAIN_KERNEL, | 251 | .domain = DOMAIN_KERNEL, |
@@ -259,23 +256,22 @@ static struct mem_type mem_types[] = { | |||
259 | }, | 256 | }, |
260 | [MT_MEMORY_NONCACHED] = { | 257 | [MT_MEMORY_NONCACHED] = { |
261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 258 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
262 | L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, | 259 | L_PTE_MT_BUFFERABLE, |
263 | .prot_l1 = PMD_TYPE_TABLE, | 260 | .prot_l1 = PMD_TYPE_TABLE, |
264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 261 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
265 | .domain = DOMAIN_KERNEL, | 262 | .domain = DOMAIN_KERNEL, |
266 | }, | 263 | }, |
267 | [MT_MEMORY_DTCM] = { | 264 | [MT_MEMORY_DTCM] = { |
268 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | | 265 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
269 | L_PTE_DIRTY | L_PTE_WRITE, | 266 | L_PTE_XN, |
270 | .prot_l1 = PMD_TYPE_TABLE, | 267 | .prot_l1 = PMD_TYPE_TABLE, |
271 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | 268 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
272 | .domain = DOMAIN_KERNEL, | 269 | .domain = DOMAIN_KERNEL, |
273 | }, | 270 | }, |
274 | [MT_MEMORY_ITCM] = { | 271 | [MT_MEMORY_ITCM] = { |
275 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 272 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, |
276 | L_PTE_USER | L_PTE_EXEC, | ||
277 | .prot_l1 = PMD_TYPE_TABLE, | 273 | .prot_l1 = PMD_TYPE_TABLE, |
278 | .domain = DOMAIN_IO, | 274 | .domain = DOMAIN_KERNEL, |
279 | }, | 275 | }, |
280 | }; | 276 | }; |
281 | 277 | ||
@@ -310,9 +306,8 @@ static void __init build_mem_type_table(void) | |||
310 | cachepolicy = CPOLICY_WRITEBACK; | 306 | cachepolicy = CPOLICY_WRITEBACK; |
311 | ecc_mask = 0; | 307 | ecc_mask = 0; |
312 | } | 308 | } |
313 | #ifdef CONFIG_SMP | 309 | if (is_smp()) |
314 | cachepolicy = CPOLICY_WRITEALLOC; | 310 | cachepolicy = CPOLICY_WRITEALLOC; |
315 | #endif | ||
316 | 311 | ||
317 | /* | 312 | /* |
318 | * Strip out features not present on earlier architectures. | 313 | * Strip out features not present on earlier architectures. |
@@ -406,13 +401,11 @@ static void __init build_mem_type_table(void) | |||
406 | cp = &cache_policies[cachepolicy]; | 401 | cp = &cache_policies[cachepolicy]; |
407 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 402 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
408 | 403 | ||
409 | #ifndef CONFIG_SMP | ||
410 | /* | 404 | /* |
411 | * Only use write-through for non-SMP systems | 405 | * Only use write-through for non-SMP systems |
412 | */ | 406 | */ |
413 | if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | 407 | if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) |
414 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | 408 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; |
415 | #endif | ||
416 | 409 | ||
417 | /* | 410 | /* |
418 | * Enable CPU-specific coherency if supported. | 411 | * Enable CPU-specific coherency if supported. |
@@ -436,22 +429,23 @@ static void __init build_mem_type_table(void) | |||
436 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 429 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
437 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 430 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
438 | 431 | ||
439 | #ifdef CONFIG_SMP | 432 | if (is_smp()) { |
440 | /* | 433 | /* |
441 | * Mark memory with the "shared" attribute for SMP systems | 434 | * Mark memory with the "shared" attribute |
442 | */ | 435 | * for SMP systems |
443 | user_pgprot |= L_PTE_SHARED; | 436 | */ |
444 | kern_pgprot |= L_PTE_SHARED; | 437 | user_pgprot |= L_PTE_SHARED; |
445 | vecs_pgprot |= L_PTE_SHARED; | 438 | kern_pgprot |= L_PTE_SHARED; |
446 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; | 439 | vecs_pgprot |= L_PTE_SHARED; |
447 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; | 440 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; |
448 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | 441 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; |
449 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | 442 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; |
450 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 443 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; |
451 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | 444 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
452 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | 445 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; |
453 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | 446 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; |
454 | #endif | 447 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; |
448 | } | ||
455 | } | 449 | } |
456 | 450 | ||
457 | /* | 451 | /* |
@@ -482,7 +476,7 @@ static void __init build_mem_type_table(void) | |||
482 | 476 | ||
483 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 477 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
484 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 478 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
485 | L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); | 479 | L_PTE_DIRTY | kern_pgprot); |
486 | 480 | ||
487 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 481 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
488 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 482 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
@@ -537,8 +531,8 @@ static void __init *early_alloc(unsigned long sz) | |||
537 | static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) | 531 | static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) |
538 | { | 532 | { |
539 | if (pmd_none(*pmd)) { | 533 | if (pmd_none(*pmd)) { |
540 | pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); | 534 | pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); |
541 | __pmd_populate(pmd, __pa(pte) | prot); | 535 | __pmd_populate(pmd, __pa(pte), prot); |
542 | } | 536 | } |
543 | BUG_ON(pmd_bad(*pmd)); | 537 | BUG_ON(pmd_bad(*pmd)); |
544 | return pte_offset_kernel(pmd, addr); | 538 | return pte_offset_kernel(pmd, addr); |
@@ -555,11 +549,11 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
555 | } while (pte++, addr += PAGE_SIZE, addr != end); | 549 | } while (pte++, addr += PAGE_SIZE, addr != end); |
556 | } | 550 | } |
557 | 551 | ||
558 | static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, | 552 | static void __init alloc_init_section(pud_t *pud, unsigned long addr, |
559 | unsigned long end, unsigned long phys, | 553 | unsigned long end, phys_addr_t phys, |
560 | const struct mem_type *type) | 554 | const struct mem_type *type) |
561 | { | 555 | { |
562 | pmd_t *pmd = pmd_offset(pgd, addr); | 556 | pmd_t *pmd = pmd_offset(pud, addr); |
563 | 557 | ||
564 | /* | 558 | /* |
565 | * Try a section mapping - end, addr and phys must all be aligned | 559 | * Try a section mapping - end, addr and phys must all be aligned |
@@ -588,20 +582,34 @@ static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, | |||
588 | } | 582 | } |
589 | } | 583 | } |
590 | 584 | ||
585 | static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | ||
586 | unsigned long phys, const struct mem_type *type) | ||
587 | { | ||
588 | pud_t *pud = pud_offset(pgd, addr); | ||
589 | unsigned long next; | ||
590 | |||
591 | do { | ||
592 | next = pud_addr_end(addr, end); | ||
593 | alloc_init_section(pud, addr, next, phys, type); | ||
594 | phys += next - addr; | ||
595 | } while (pud++, addr = next, addr != end); | ||
596 | } | ||
597 | |||
591 | static void __init create_36bit_mapping(struct map_desc *md, | 598 | static void __init create_36bit_mapping(struct map_desc *md, |
592 | const struct mem_type *type) | 599 | const struct mem_type *type) |
593 | { | 600 | { |
594 | unsigned long phys, addr, length, end; | 601 | unsigned long addr, length, end; |
602 | phys_addr_t phys; | ||
595 | pgd_t *pgd; | 603 | pgd_t *pgd; |
596 | 604 | ||
597 | addr = md->virtual; | 605 | addr = md->virtual; |
598 | phys = (unsigned long)__pfn_to_phys(md->pfn); | 606 | phys = __pfn_to_phys(md->pfn); |
599 | length = PAGE_ALIGN(md->length); | 607 | length = PAGE_ALIGN(md->length); |
600 | 608 | ||
601 | if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { | 609 | if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { |
602 | printk(KERN_ERR "MM: CPU does not support supersection " | 610 | printk(KERN_ERR "MM: CPU does not support supersection " |
603 | "mapping for 0x%08llx at 0x%08lx\n", | 611 | "mapping for 0x%08llx at 0x%08lx\n", |
604 | __pfn_to_phys((u64)md->pfn), addr); | 612 | (long long)__pfn_to_phys((u64)md->pfn), addr); |
605 | return; | 613 | return; |
606 | } | 614 | } |
607 | 615 | ||
@@ -614,14 +622,14 @@ static void __init create_36bit_mapping(struct map_desc *md, | |||
614 | if (type->domain) { | 622 | if (type->domain) { |
615 | printk(KERN_ERR "MM: invalid domain in supersection " | 623 | printk(KERN_ERR "MM: invalid domain in supersection " |
616 | "mapping for 0x%08llx at 0x%08lx\n", | 624 | "mapping for 0x%08llx at 0x%08lx\n", |
617 | __pfn_to_phys((u64)md->pfn), addr); | 625 | (long long)__pfn_to_phys((u64)md->pfn), addr); |
618 | return; | 626 | return; |
619 | } | 627 | } |
620 | 628 | ||
621 | if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { | 629 | if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { |
622 | printk(KERN_ERR "MM: cannot create mapping for " | 630 | printk(KERN_ERR "MM: cannot create mapping for 0x%08llx" |
623 | "0x%08llx at 0x%08lx invalid alignment\n", | 631 | " at 0x%08lx invalid alignment\n", |
624 | __pfn_to_phys((u64)md->pfn), addr); | 632 | (long long)__pfn_to_phys((u64)md->pfn), addr); |
625 | return; | 633 | return; |
626 | } | 634 | } |
627 | 635 | ||
@@ -634,7 +642,8 @@ static void __init create_36bit_mapping(struct map_desc *md, | |||
634 | pgd = pgd_offset_k(addr); | 642 | pgd = pgd_offset_k(addr); |
635 | end = addr + length; | 643 | end = addr + length; |
636 | do { | 644 | do { |
637 | pmd_t *pmd = pmd_offset(pgd, addr); | 645 | pud_t *pud = pud_offset(pgd, addr); |
646 | pmd_t *pmd = pmd_offset(pud, addr); | ||
638 | int i; | 647 | int i; |
639 | 648 | ||
640 | for (i = 0; i < 16; i++) | 649 | for (i = 0; i < 16; i++) |
@@ -655,22 +664,23 @@ static void __init create_36bit_mapping(struct map_desc *md, | |||
655 | */ | 664 | */ |
656 | static void __init create_mapping(struct map_desc *md) | 665 | static void __init create_mapping(struct map_desc *md) |
657 | { | 666 | { |
658 | unsigned long phys, addr, length, end; | 667 | unsigned long addr, length, end; |
668 | phys_addr_t phys; | ||
659 | const struct mem_type *type; | 669 | const struct mem_type *type; |
660 | pgd_t *pgd; | 670 | pgd_t *pgd; |
661 | 671 | ||
662 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | 672 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { |
663 | printk(KERN_WARNING "BUG: not creating mapping for " | 673 | printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx" |
664 | "0x%08llx at 0x%08lx in user region\n", | 674 | " at 0x%08lx in user region\n", |
665 | __pfn_to_phys((u64)md->pfn), md->virtual); | 675 | (long long)__pfn_to_phys((u64)md->pfn), md->virtual); |
666 | return; | 676 | return; |
667 | } | 677 | } |
668 | 678 | ||
669 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | 679 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && |
670 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | 680 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { |
671 | printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " | 681 | printk(KERN_WARNING "BUG: mapping for 0x%08llx" |
672 | "overlaps vmalloc space\n", | 682 | " at 0x%08lx overlaps vmalloc space\n", |
673 | __pfn_to_phys((u64)md->pfn), md->virtual); | 683 | (long long)__pfn_to_phys((u64)md->pfn), md->virtual); |
674 | } | 684 | } |
675 | 685 | ||
676 | type = &mem_types[md->type]; | 686 | type = &mem_types[md->type]; |
@@ -684,13 +694,13 @@ static void __init create_mapping(struct map_desc *md) | |||
684 | } | 694 | } |
685 | 695 | ||
686 | addr = md->virtual & PAGE_MASK; | 696 | addr = md->virtual & PAGE_MASK; |
687 | phys = (unsigned long)__pfn_to_phys(md->pfn); | 697 | phys = __pfn_to_phys(md->pfn); |
688 | length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | 698 | length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); |
689 | 699 | ||
690 | if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { | 700 | if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { |
691 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | 701 | printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not " |
692 | "be mapped using pages, ignoring.\n", | 702 | "be mapped using pages, ignoring.\n", |
693 | __pfn_to_phys(md->pfn), addr); | 703 | (long long)__pfn_to_phys(md->pfn), addr); |
694 | return; | 704 | return; |
695 | } | 705 | } |
696 | 706 | ||
@@ -699,7 +709,7 @@ static void __init create_mapping(struct map_desc *md) | |||
699 | do { | 709 | do { |
700 | unsigned long next = pgd_addr_end(addr, end); | 710 | unsigned long next = pgd_addr_end(addr, end); |
701 | 711 | ||
702 | alloc_init_section(pgd, addr, next, phys, type); | 712 | alloc_init_pud(pgd, addr, next, phys, type); |
703 | 713 | ||
704 | phys += next - addr; | 714 | phys += next - addr; |
705 | addr = next; | 715 | addr = next; |
@@ -747,20 +757,18 @@ static int __init early_vmalloc(char *arg) | |||
747 | } | 757 | } |
748 | early_param("vmalloc", early_vmalloc); | 758 | early_param("vmalloc", early_vmalloc); |
749 | 759 | ||
750 | phys_addr_t lowmem_end_addr; | 760 | static phys_addr_t lowmem_limit __initdata = 0; |
751 | 761 | ||
752 | static void __init sanity_check_meminfo(void) | 762 | void __init sanity_check_meminfo(void) |
753 | { | 763 | { |
754 | int i, j, highmem = 0; | 764 | int i, j, highmem = 0; |
755 | 765 | ||
756 | lowmem_end_addr = __pa(vmalloc_min - 1) + 1; | ||
757 | |||
758 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { | 766 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { |
759 | struct membank *bank = &meminfo.bank[j]; | 767 | struct membank *bank = &meminfo.bank[j]; |
760 | *bank = meminfo.bank[i]; | 768 | *bank = meminfo.bank[i]; |
761 | 769 | ||
762 | #ifdef CONFIG_HIGHMEM | 770 | #ifdef CONFIG_HIGHMEM |
763 | if (__va(bank->start) > vmalloc_min || | 771 | if (__va(bank->start) >= vmalloc_min || |
764 | __va(bank->start) < (void *)PAGE_OFFSET) | 772 | __va(bank->start) < (void *)PAGE_OFFSET) |
765 | highmem = 1; | 773 | highmem = 1; |
766 | 774 | ||
@@ -796,9 +804,10 @@ static void __init sanity_check_meminfo(void) | |||
796 | */ | 804 | */ |
797 | if (__va(bank->start) >= vmalloc_min || | 805 | if (__va(bank->start) >= vmalloc_min || |
798 | __va(bank->start) < (void *)PAGE_OFFSET) { | 806 | __va(bank->start) < (void *)PAGE_OFFSET) { |
799 | printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " | 807 | printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " |
800 | "(vmalloc region overlap).\n", | 808 | "(vmalloc region overlap).\n", |
801 | bank->start, bank->start + bank->size - 1); | 809 | (unsigned long long)bank->start, |
810 | (unsigned long long)bank->start + bank->size - 1); | ||
802 | continue; | 811 | continue; |
803 | } | 812 | } |
804 | 813 | ||
@@ -809,13 +818,17 @@ static void __init sanity_check_meminfo(void) | |||
809 | if (__va(bank->start + bank->size) > vmalloc_min || | 818 | if (__va(bank->start + bank->size) > vmalloc_min || |
810 | __va(bank->start + bank->size) < __va(bank->start)) { | 819 | __va(bank->start + bank->size) < __va(bank->start)) { |
811 | unsigned long newsize = vmalloc_min - __va(bank->start); | 820 | unsigned long newsize = vmalloc_min - __va(bank->start); |
812 | printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " | 821 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " |
813 | "to -%.8lx (vmalloc region overlap).\n", | 822 | "to -%.8llx (vmalloc region overlap).\n", |
814 | bank->start, bank->start + bank->size - 1, | 823 | (unsigned long long)bank->start, |
815 | bank->start + newsize - 1); | 824 | (unsigned long long)bank->start + bank->size - 1, |
825 | (unsigned long long)bank->start + newsize - 1); | ||
816 | bank->size = newsize; | 826 | bank->size = newsize; |
817 | } | 827 | } |
818 | #endif | 828 | #endif |
829 | if (!bank->highmem && bank->start + bank->size > lowmem_limit) | ||
830 | lowmem_limit = bank->start + bank->size; | ||
831 | |||
819 | j++; | 832 | j++; |
820 | } | 833 | } |
821 | #ifdef CONFIG_HIGHMEM | 834 | #ifdef CONFIG_HIGHMEM |
@@ -829,18 +842,6 @@ static void __init sanity_check_meminfo(void) | |||
829 | * rather difficult. | 842 | * rather difficult. |
830 | */ | 843 | */ |
831 | reason = "with VIPT aliasing cache"; | 844 | reason = "with VIPT aliasing cache"; |
832 | #ifdef CONFIG_SMP | ||
833 | } else if (tlb_ops_need_broadcast()) { | ||
834 | /* | ||
835 | * kmap_high needs to occasionally flush TLB entries, | ||
836 | * however, if the TLB entries need to be broadcast | ||
837 | * we may deadlock: | ||
838 | * kmap_high(irqs off)->flush_all_zero_pkmaps-> | ||
839 | * flush_tlb_kernel_range->smp_call_function_many | ||
840 | * (must not be called with irqs off) | ||
841 | */ | ||
842 | reason = "without hardware TLB ops broadcasting"; | ||
843 | #endif | ||
844 | } | 845 | } |
845 | if (reason) { | 846 | if (reason) { |
846 | printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", | 847 | printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", |
@@ -851,11 +852,13 @@ static void __init sanity_check_meminfo(void) | |||
851 | } | 852 | } |
852 | #endif | 853 | #endif |
853 | meminfo.nr_banks = j; | 854 | meminfo.nr_banks = j; |
855 | memblock_set_current_limit(lowmem_limit); | ||
854 | } | 856 | } |
855 | 857 | ||
856 | static inline void prepare_page_table(void) | 858 | static inline void prepare_page_table(void) |
857 | { | 859 | { |
858 | unsigned long addr; | 860 | unsigned long addr; |
861 | phys_addr_t end; | ||
859 | 862 | ||
860 | /* | 863 | /* |
861 | * Clear out all the mappings below the kernel image. | 864 | * Clear out all the mappings below the kernel image. |
@@ -871,10 +874,17 @@ static inline void prepare_page_table(void) | |||
871 | pmd_clear(pmd_off_k(addr)); | 874 | pmd_clear(pmd_off_k(addr)); |
872 | 875 | ||
873 | /* | 876 | /* |
877 | * Find the end of the first block of lowmem. | ||
878 | */ | ||
879 | end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; | ||
880 | if (end >= lowmem_limit) | ||
881 | end = lowmem_limit; | ||
882 | |||
883 | /* | ||
874 | * Clear out all the kernel space mappings, except for the first | 884 | * Clear out all the kernel space mappings, except for the first |
875 | * memory bank, up to the end of the vmalloc region. | 885 | * memory bank, up to the end of the vmalloc region. |
876 | */ | 886 | */ |
877 | for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); | 887 | for (addr = __phys_to_virt(end); |
878 | addr < VMALLOC_END; addr += PGDIR_SIZE) | 888 | addr < VMALLOC_END; addr += PGDIR_SIZE) |
879 | pmd_clear(pmd_off_k(addr)); | 889 | pmd_clear(pmd_off_k(addr)); |
880 | } | 890 | } |
@@ -910,12 +920,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
910 | { | 920 | { |
911 | struct map_desc map; | 921 | struct map_desc map; |
912 | unsigned long addr; | 922 | unsigned long addr; |
913 | void *vectors; | ||
914 | 923 | ||
915 | /* | 924 | /* |
916 | * Allocate the vector page early. | 925 | * Allocate the vector page early. |
917 | */ | 926 | */ |
918 | vectors = early_alloc(PAGE_SIZE); | 927 | vectors_page = early_alloc(PAGE_SIZE); |
919 | 928 | ||
920 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | 929 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) |
921 | pmd_clear(pmd_off_k(addr)); | 930 | pmd_clear(pmd_off_k(addr)); |
@@ -955,7 +964,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
955 | * location (0xffff0000). If we aren't using high-vectors, also | 964 | * location (0xffff0000). If we aren't using high-vectors, also |
956 | * create a mapping at the low-vectors virtual address. | 965 | * create a mapping at the low-vectors virtual address. |
957 | */ | 966 | */ |
958 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | 967 | map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); |
959 | map.virtual = 0xffff0000; | 968 | map.virtual = 0xffff0000; |
960 | map.length = PAGE_SIZE; | 969 | map.length = PAGE_SIZE; |
961 | map.type = MT_HIGH_VECTORS; | 970 | map.type = MT_HIGH_VECTORS; |
@@ -991,37 +1000,28 @@ static void __init kmap_init(void) | |||
991 | #endif | 1000 | #endif |
992 | } | 1001 | } |
993 | 1002 | ||
994 | static inline void map_memory_bank(struct membank *bank) | ||
995 | { | ||
996 | struct map_desc map; | ||
997 | |||
998 | map.pfn = bank_pfn_start(bank); | ||
999 | map.virtual = __phys_to_virt(bank_phys_start(bank)); | ||
1000 | map.length = bank_phys_size(bank); | ||
1001 | map.type = MT_MEMORY; | ||
1002 | |||
1003 | create_mapping(&map); | ||
1004 | } | ||
1005 | |||
1006 | static void __init map_lowmem(void) | 1003 | static void __init map_lowmem(void) |
1007 | { | 1004 | { |
1008 | struct meminfo *mi = &meminfo; | 1005 | struct memblock_region *reg; |
1009 | int i; | ||
1010 | 1006 | ||
1011 | /* Map all the lowmem memory banks. */ | 1007 | /* Map all the lowmem memory banks. */ |
1012 | for (i = 0; i < mi->nr_banks; i++) { | 1008 | for_each_memblock(memory, reg) { |
1013 | struct membank *bank = &mi->bank[i]; | 1009 | phys_addr_t start = reg->base; |
1010 | phys_addr_t end = start + reg->size; | ||
1011 | struct map_desc map; | ||
1012 | |||
1013 | if (end > lowmem_limit) | ||
1014 | end = lowmem_limit; | ||
1015 | if (start >= end) | ||
1016 | break; | ||
1014 | 1017 | ||
1015 | if (!bank->highmem) | 1018 | map.pfn = __phys_to_pfn(start); |
1016 | map_memory_bank(bank); | 1019 | map.virtual = __phys_to_virt(start); |
1017 | } | 1020 | map.length = end - start; |
1018 | } | 1021 | map.type = MT_MEMORY; |
1019 | 1022 | ||
1020 | static int __init meminfo_cmp(const void *_a, const void *_b) | 1023 | create_mapping(&map); |
1021 | { | 1024 | } |
1022 | const struct membank *a = _a, *b = _b; | ||
1023 | long cmp = bank_pfn_start(a) - bank_pfn_start(b); | ||
1024 | return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; | ||
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | /* | 1027 | /* |
@@ -1032,10 +1032,9 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1032 | { | 1032 | { |
1033 | void *zero_page; | 1033 | void *zero_page; |
1034 | 1034 | ||
1035 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); | 1035 | memblock_set_current_limit(lowmem_limit); |
1036 | 1036 | ||
1037 | build_mem_type_table(); | 1037 | build_mem_type_table(); |
1038 | sanity_check_meminfo(); | ||
1039 | prepare_page_table(); | 1038 | prepare_page_table(); |
1040 | map_lowmem(); | 1039 | map_lowmem(); |
1041 | devicemaps_init(mdesc); | 1040 | devicemaps_init(mdesc); |
@@ -1051,38 +1050,3 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1051 | empty_zero_page = virt_to_page(zero_page); | 1050 | empty_zero_page = virt_to_page(zero_page); |
1052 | __flush_dcache_page(NULL, empty_zero_page); | 1051 | __flush_dcache_page(NULL, empty_zero_page); |
1053 | } | 1052 | } |
1054 | |||
1055 | /* | ||
1056 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | ||
1057 | * the user-mode pages. This will then ensure that we have predictable | ||
1058 | * results when turning the mmu off | ||
1059 | */ | ||
1060 | void setup_mm_for_reboot(char mode) | ||
1061 | { | ||
1062 | unsigned long base_pmdval; | ||
1063 | pgd_t *pgd; | ||
1064 | int i; | ||
1065 | |||
1066 | /* | ||
1067 | * We need to access to user-mode page tables here. For kernel threads | ||
1068 | * we don't have any user-mode mappings so we use the context that we | ||
1069 | * "borrowed". | ||
1070 | */ | ||
1071 | pgd = current->active_mm->pgd; | ||
1072 | |||
1073 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; | ||
1074 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | ||
1075 | base_pmdval |= PMD_BIT4; | ||
1076 | |||
1077 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { | ||
1078 | unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; | ||
1079 | pmd_t *pmd; | ||
1080 | |||
1081 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); | ||
1082 | pmd[0] = __pmd(pmdval); | ||
1083 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | ||
1084 | flush_pmd_entry(pmd); | ||
1085 | } | ||
1086 | |||
1087 | local_flush_tlb_all(); | ||
1088 | } | ||