diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2005-08-05 05:39:06 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-08-28 20:53:31 -0400 |
commit | e28f7faf05159f1cfd564596f5e6178edba6bd49 (patch) | |
tree | 45534d2c33bff8b64e3fd155fba55146cb7518e6 /arch/ppc64/mm/init.c | |
parent | decd300b30e499fe6be1bbfc5650fc971de8c1fa (diff) |
[PATCH] Four level pagetables for ppc64
Implement 4-level pagetables for ppc64
This patch implements full four-level page tables for ppc64, thereby
extending the usable user address range to 44 bits (16T).
The patch uses a full page for the tables at the bottom and top level,
and a quarter page for the intermediate levels. It uses full 64-bit
pointers at every level, thus also increasing the addressable range of
physical memory. This patch also tweaks the VSID allocation to allow
matching range for user addresses (this halves the number of available
contexts) and adds some #if and BUILD_BUG sanity checks.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc64/mm/init.c')
-rw-r--r-- | arch/ppc64/mm/init.c | 62 |
1 files changed, 41 insertions, 21 deletions
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index e58a24d42879..87f256df8de5 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c | |||
@@ -66,6 +66,14 @@ | |||
66 | #include <asm/vdso.h> | 66 | #include <asm/vdso.h> |
67 | #include <asm/imalloc.h> | 67 | #include <asm/imalloc.h> |
68 | 68 | ||
69 | #if PGTABLE_RANGE > USER_VSID_RANGE | ||
70 | #warning Limited user VSID range means pagetable space is wasted | ||
71 | #endif | ||
72 | |||
73 | #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) | ||
74 | #warning TASK_SIZE is smaller than it needs to be. | ||
75 | #endif | ||
76 | |||
69 | int mem_init_done; | 77 | int mem_init_done; |
70 | unsigned long ioremap_bot = IMALLOC_BASE; | 78 | unsigned long ioremap_bot = IMALLOC_BASE; |
71 | static unsigned long phbs_io_bot = PHBS_IO_BASE; | 79 | static unsigned long phbs_io_bot = PHBS_IO_BASE; |
@@ -226,7 +234,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size, | |||
226 | * Before that, we map using addresses going | 234 | * Before that, we map using addresses going |
227 | * up from ioremap_bot. imalloc will use | 235 | * up from ioremap_bot. imalloc will use |
228 | * the addresses from ioremap_bot through | 236 | * the addresses from ioremap_bot through |
229 | * IMALLOC_END (0xE000001fffffffff) | 237 | * IMALLOC_END |
230 | * | 238 | * |
231 | */ | 239 | */ |
232 | pa = addr & PAGE_MASK; | 240 | pa = addr & PAGE_MASK; |
@@ -417,12 +425,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
417 | int index; | 425 | int index; |
418 | int err; | 426 | int err; |
419 | 427 | ||
420 | #ifdef CONFIG_HUGETLB_PAGE | ||
421 | /* We leave htlb_segs as it was, but for a fork, we need to | ||
422 | * clear the huge_pgdir. */ | ||
423 | mm->context.huge_pgdir = NULL; | ||
424 | #endif | ||
425 | |||
426 | again: | 428 | again: |
427 | if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) | 429 | if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) |
428 | return -ENOMEM; | 430 | return -ENOMEM; |
@@ -453,8 +455,6 @@ void destroy_context(struct mm_struct *mm) | |||
453 | spin_unlock(&mmu_context_lock); | 455 | spin_unlock(&mmu_context_lock); |
454 | 456 | ||
455 | mm->context.id = NO_CONTEXT; | 457 | mm->context.id = NO_CONTEXT; |
456 | |||
457 | hugetlb_mm_free_pgd(mm); | ||
458 | } | 458 | } |
459 | 459 | ||
460 | /* | 460 | /* |
@@ -833,23 +833,43 @@ void __iomem * reserve_phb_iospace(unsigned long size) | |||
833 | return virt_addr; | 833 | return virt_addr; |
834 | } | 834 | } |
835 | 835 | ||
836 | kmem_cache_t *zero_cache; | 836 | static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) |
837 | |||
838 | static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags) | ||
839 | { | 837 | { |
840 | memset(pte, 0, PAGE_SIZE); | 838 | memset(addr, 0, kmem_cache_size(cache)); |
841 | } | 839 | } |
842 | 840 | ||
841 | static const int pgtable_cache_size[2] = { | ||
842 | PTE_TABLE_SIZE, PMD_TABLE_SIZE | ||
843 | }; | ||
844 | static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { | ||
845 | "pgd_pte_cache", "pud_pmd_cache", | ||
846 | }; | ||
847 | |||
848 | kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; | ||
849 | |||
843 | void pgtable_cache_init(void) | 850 | void pgtable_cache_init(void) |
844 | { | 851 | { |
845 | zero_cache = kmem_cache_create("zero", | 852 | int i; |
846 | PAGE_SIZE, | 853 | |
847 | 0, | 854 | BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]); |
848 | SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, | 855 | BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]); |
849 | zero_ctor, | 856 | BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]); |
850 | NULL); | 857 | BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]); |
851 | if (!zero_cache) | 858 | |
852 | panic("pgtable_cache_init(): could not create zero_cache!\n"); | 859 | for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { |
860 | int size = pgtable_cache_size[i]; | ||
861 | const char *name = pgtable_cache_name[i]; | ||
862 | |||
863 | pgtable_cache[i] = kmem_cache_create(name, | ||
864 | size, size, | ||
865 | SLAB_HWCACHE_ALIGN | ||
866 | | SLAB_MUST_HWCACHE_ALIGN, | ||
867 | zero_ctor, | ||
868 | NULL); | ||
869 | if (! pgtable_cache[i]) | ||
870 | panic("pgtable_cache_init(): could not create %s!\n", | ||
871 | name); | ||
872 | } | ||
853 | } | 873 | } |
854 | 874 | ||
855 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, | 875 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, |