diff options
Diffstat (limited to 'arch/ppc64/mm/init.c')
-rw-r--r-- | arch/ppc64/mm/init.c | 96 |
1 files changed, 54 insertions, 42 deletions
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index e58a24d42879..c02dc9809ca5 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c | |||
@@ -42,7 +42,6 @@ | |||
42 | 42 | ||
43 | #include <asm/pgalloc.h> | 43 | #include <asm/pgalloc.h> |
44 | #include <asm/page.h> | 44 | #include <asm/page.h> |
45 | #include <asm/abs_addr.h> | ||
46 | #include <asm/prom.h> | 45 | #include <asm/prom.h> |
47 | #include <asm/lmb.h> | 46 | #include <asm/lmb.h> |
48 | #include <asm/rtas.h> | 47 | #include <asm/rtas.h> |
@@ -66,6 +65,14 @@ | |||
66 | #include <asm/vdso.h> | 65 | #include <asm/vdso.h> |
67 | #include <asm/imalloc.h> | 66 | #include <asm/imalloc.h> |
68 | 67 | ||
68 | #if PGTABLE_RANGE > USER_VSID_RANGE | ||
69 | #warning Limited user VSID range means pagetable space is wasted | ||
70 | #endif | ||
71 | |||
72 | #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) | ||
73 | #warning TASK_SIZE is smaller than it needs to be. | ||
74 | #endif | ||
75 | |||
69 | int mem_init_done; | 76 | int mem_init_done; |
70 | unsigned long ioremap_bot = IMALLOC_BASE; | 77 | unsigned long ioremap_bot = IMALLOC_BASE; |
71 | static unsigned long phbs_io_bot = PHBS_IO_BASE; | 78 | static unsigned long phbs_io_bot = PHBS_IO_BASE; |
@@ -159,7 +166,6 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
159 | ptep = pte_alloc_kernel(&init_mm, pmdp, ea); | 166 | ptep = pte_alloc_kernel(&init_mm, pmdp, ea); |
160 | if (!ptep) | 167 | if (!ptep) |
161 | return -ENOMEM; | 168 | return -ENOMEM; |
162 | pa = abs_to_phys(pa); | ||
163 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | 169 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
164 | __pgprot(flags))); | 170 | __pgprot(flags))); |
165 | spin_unlock(&init_mm.page_table_lock); | 171 | spin_unlock(&init_mm.page_table_lock); |
@@ -226,7 +232,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size, | |||
226 | * Before that, we map using addresses going | 232 | * Before that, we map using addresses going |
227 | * up from ioremap_bot. imalloc will use | 233 | * up from ioremap_bot. imalloc will use |
228 | * the addresses from ioremap_bot through | 234 | * the addresses from ioremap_bot through |
229 | * IMALLOC_END (0xE000001fffffffff) | 235 | * IMALLOC_END |
230 | * | 236 | * |
231 | */ | 237 | */ |
232 | pa = addr & PAGE_MASK; | 238 | pa = addr & PAGE_MASK; |
@@ -417,12 +423,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
417 | int index; | 423 | int index; |
418 | int err; | 424 | int err; |
419 | 425 | ||
420 | #ifdef CONFIG_HUGETLB_PAGE | ||
421 | /* We leave htlb_segs as it was, but for a fork, we need to | ||
422 | * clear the huge_pgdir. */ | ||
423 | mm->context.huge_pgdir = NULL; | ||
424 | #endif | ||
425 | |||
426 | again: | 426 | again: |
427 | if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) | 427 | if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) |
428 | return -ENOMEM; | 428 | return -ENOMEM; |
@@ -453,8 +453,6 @@ void destroy_context(struct mm_struct *mm) | |||
453 | spin_unlock(&mmu_context_lock); | 453 | spin_unlock(&mmu_context_lock); |
454 | 454 | ||
455 | mm->context.id = NO_CONTEXT; | 455 | mm->context.id = NO_CONTEXT; |
456 | |||
457 | hugetlb_mm_free_pgd(mm); | ||
458 | } | 456 | } |
459 | 457 | ||
460 | /* | 458 | /* |
@@ -484,9 +482,9 @@ void __init mm_init_ppc64(void) | |||
484 | for (i = 1; i < lmb.memory.cnt; i++) { | 482 | for (i = 1; i < lmb.memory.cnt; i++) { |
485 | unsigned long base, prevbase, prevsize; | 483 | unsigned long base, prevbase, prevsize; |
486 | 484 | ||
487 | prevbase = lmb.memory.region[i-1].physbase; | 485 | prevbase = lmb.memory.region[i-1].base; |
488 | prevsize = lmb.memory.region[i-1].size; | 486 | prevsize = lmb.memory.region[i-1].size; |
489 | base = lmb.memory.region[i].physbase; | 487 | base = lmb.memory.region[i].base; |
490 | if (base > (prevbase + prevsize)) { | 488 | if (base > (prevbase + prevsize)) { |
491 | io_hole_start = prevbase + prevsize; | 489 | io_hole_start = prevbase + prevsize; |
492 | io_hole_size = base - (prevbase + prevsize); | 490 | io_hole_size = base - (prevbase + prevsize); |
@@ -513,11 +511,8 @@ int page_is_ram(unsigned long pfn) | |||
513 | for (i=0; i < lmb.memory.cnt; i++) { | 511 | for (i=0; i < lmb.memory.cnt; i++) { |
514 | unsigned long base; | 512 | unsigned long base; |
515 | 513 | ||
516 | #ifdef CONFIG_MSCHUNKS | ||
517 | base = lmb.memory.region[i].physbase; | ||
518 | #else | ||
519 | base = lmb.memory.region[i].base; | 514 | base = lmb.memory.region[i].base; |
520 | #endif | 515 | |
521 | if ((paddr >= base) && | 516 | if ((paddr >= base) && |
522 | (paddr < (base + lmb.memory.region[i].size))) { | 517 | (paddr < (base + lmb.memory.region[i].size))) { |
523 | return 1; | 518 | return 1; |
@@ -547,7 +542,7 @@ void __init do_init_bootmem(void) | |||
547 | */ | 542 | */ |
548 | bootmap_pages = bootmem_bootmap_pages(total_pages); | 543 | bootmap_pages = bootmem_bootmap_pages(total_pages); |
549 | 544 | ||
550 | start = abs_to_phys(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE)); | 545 | start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); |
551 | BUG_ON(!start); | 546 | BUG_ON(!start); |
552 | 547 | ||
553 | boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); | 548 | boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); |
@@ -558,25 +553,25 @@ void __init do_init_bootmem(void) | |||
558 | * present. | 553 | * present. |
559 | */ | 554 | */ |
560 | for (i=0; i < lmb.memory.cnt; i++) { | 555 | for (i=0; i < lmb.memory.cnt; i++) { |
561 | unsigned long physbase, size; | 556 | unsigned long base, size; |
562 | unsigned long start_pfn, end_pfn; | 557 | unsigned long start_pfn, end_pfn; |
563 | 558 | ||
564 | physbase = lmb.memory.region[i].physbase; | 559 | base = lmb.memory.region[i].base; |
565 | size = lmb.memory.region[i].size; | 560 | size = lmb.memory.region[i].size; |
566 | 561 | ||
567 | start_pfn = physbase >> PAGE_SHIFT; | 562 | start_pfn = base >> PAGE_SHIFT; |
568 | end_pfn = start_pfn + (size >> PAGE_SHIFT); | 563 | end_pfn = start_pfn + (size >> PAGE_SHIFT); |
569 | memory_present(0, start_pfn, end_pfn); | 564 | memory_present(0, start_pfn, end_pfn); |
570 | 565 | ||
571 | free_bootmem(physbase, size); | 566 | free_bootmem(base, size); |
572 | } | 567 | } |
573 | 568 | ||
574 | /* reserve the sections we're already using */ | 569 | /* reserve the sections we're already using */ |
575 | for (i=0; i < lmb.reserved.cnt; i++) { | 570 | for (i=0; i < lmb.reserved.cnt; i++) { |
576 | unsigned long physbase = lmb.reserved.region[i].physbase; | 571 | unsigned long base = lmb.reserved.region[i].base; |
577 | unsigned long size = lmb.reserved.region[i].size; | 572 | unsigned long size = lmb.reserved.region[i].size; |
578 | 573 | ||
579 | reserve_bootmem(physbase, size); | 574 | reserve_bootmem(base, size); |
580 | } | 575 | } |
581 | } | 576 | } |
582 | 577 | ||
@@ -615,10 +610,10 @@ static int __init setup_kcore(void) | |||
615 | int i; | 610 | int i; |
616 | 611 | ||
617 | for (i=0; i < lmb.memory.cnt; i++) { | 612 | for (i=0; i < lmb.memory.cnt; i++) { |
618 | unsigned long physbase, size; | 613 | unsigned long base, size; |
619 | struct kcore_list *kcore_mem; | 614 | struct kcore_list *kcore_mem; |
620 | 615 | ||
621 | physbase = lmb.memory.region[i].physbase; | 616 | base = lmb.memory.region[i].base; |
622 | size = lmb.memory.region[i].size; | 617 | size = lmb.memory.region[i].size; |
623 | 618 | ||
624 | /* GFP_ATOMIC to avoid might_sleep warnings during boot */ | 619 | /* GFP_ATOMIC to avoid might_sleep warnings during boot */ |
@@ -626,7 +621,7 @@ static int __init setup_kcore(void) | |||
626 | if (!kcore_mem) | 621 | if (!kcore_mem) |
627 | panic("mem_init: kmalloc failed\n"); | 622 | panic("mem_init: kmalloc failed\n"); |
628 | 623 | ||
629 | kclist_add(kcore_mem, __va(physbase), size); | 624 | kclist_add(kcore_mem, __va(base), size); |
630 | } | 625 | } |
631 | 626 | ||
632 | kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); | 627 | kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); |
@@ -686,9 +681,6 @@ void __init mem_init(void) | |||
686 | 681 | ||
687 | mem_init_done = 1; | 682 | mem_init_done = 1; |
688 | 683 | ||
689 | #ifdef CONFIG_PPC_ISERIES | ||
690 | iommu_vio_init(); | ||
691 | #endif | ||
692 | /* Initialize the vDSO */ | 684 | /* Initialize the vDSO */ |
693 | vdso_init(); | 685 | vdso_init(); |
694 | } | 686 | } |
@@ -833,23 +825,43 @@ void __iomem * reserve_phb_iospace(unsigned long size) | |||
833 | return virt_addr; | 825 | return virt_addr; |
834 | } | 826 | } |
835 | 827 | ||
836 | kmem_cache_t *zero_cache; | 828 | static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) |
837 | |||
838 | static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags) | ||
839 | { | 829 | { |
840 | memset(pte, 0, PAGE_SIZE); | 830 | memset(addr, 0, kmem_cache_size(cache)); |
841 | } | 831 | } |
842 | 832 | ||
833 | static const int pgtable_cache_size[2] = { | ||
834 | PTE_TABLE_SIZE, PMD_TABLE_SIZE | ||
835 | }; | ||
836 | static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { | ||
837 | "pgd_pte_cache", "pud_pmd_cache", | ||
838 | }; | ||
839 | |||
840 | kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; | ||
841 | |||
843 | void pgtable_cache_init(void) | 842 | void pgtable_cache_init(void) |
844 | { | 843 | { |
845 | zero_cache = kmem_cache_create("zero", | 844 | int i; |
846 | PAGE_SIZE, | 845 | |
847 | 0, | 846 | BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]); |
848 | SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, | 847 | BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]); |
849 | zero_ctor, | 848 | BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]); |
850 | NULL); | 849 | BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]); |
851 | if (!zero_cache) | 850 | |
852 | panic("pgtable_cache_init(): could not create zero_cache!\n"); | 851 | for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { |
852 | int size = pgtable_cache_size[i]; | ||
853 | const char *name = pgtable_cache_name[i]; | ||
854 | |||
855 | pgtable_cache[i] = kmem_cache_create(name, | ||
856 | size, size, | ||
857 | SLAB_HWCACHE_ALIGN | ||
858 | | SLAB_MUST_HWCACHE_ALIGN, | ||
859 | zero_ctor, | ||
860 | NULL); | ||
861 | if (! pgtable_cache[i]) | ||
862 | panic("pgtable_cache_init(): could not create %s!\n", | ||
863 | name); | ||
864 | } | ||
853 | } | 865 | } |
854 | 866 | ||
855 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, | 867 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, |