diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2005-11-06 19:06:55 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-06 19:56:47 -0500 |
commit | 3c726f8dee6f55e96475574e9f645327e461884c (patch) | |
tree | f67c381e8f57959aa4a94bda4c68e24253cd8171 /arch/powerpc/mm/mem.c | |
parent | f912696ab330bf539231d1f8032320f2a08b850f (diff) |
[PATCH] ppc64: support 64k pages
Adds a new CONFIG_PPC_64K_PAGES which, when enabled, changes the kernel
base page size to 64K. The resulting kernel still boots on any
hardware. On current machines with 4K pages support only, the kernel
will maintain 16 "subpages" for each 64K page transparently.
Note that while real 64K capable HW has been tested, the current patch
will not enable it yet as such hardware is not released yet, and I'm
still verifying with the firmware architects the proper to get the
information from the newer hypervisors.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/powerpc/mm/mem.c')
-rw-r--r-- | arch/powerpc/mm/mem.c | 56 |
1 files changed, 21 insertions, 35 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 117b00012e14..7faa46b71f21 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -61,6 +61,9 @@ int init_bootmem_done; | |||
61 | int mem_init_done; | 61 | int mem_init_done; |
62 | unsigned long memory_limit; | 62 | unsigned long memory_limit; |
63 | 63 | ||
64 | extern void hash_preload(struct mm_struct *mm, unsigned long ea, | ||
65 | unsigned long access, unsigned long trap); | ||
66 | |||
64 | /* | 67 | /* |
65 | * This is called by /dev/mem to know if a given address has to | 68 | * This is called by /dev/mem to know if a given address has to |
66 | * be mapped non-cacheable or not | 69 | * be mapped non-cacheable or not |
@@ -493,18 +496,10 @@ EXPORT_SYMBOL(flush_icache_user_range); | |||
493 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | 496 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, |
494 | pte_t pte) | 497 | pte_t pte) |
495 | { | 498 | { |
496 | /* handle i-cache coherency */ | 499 | #ifdef CONFIG_PPC_STD_MMU |
497 | unsigned long pfn = pte_pfn(pte); | 500 | unsigned long access = 0, trap; |
498 | #ifdef CONFIG_PPC32 | ||
499 | pmd_t *pmd; | ||
500 | #else | ||
501 | unsigned long vsid; | ||
502 | void *pgdir; | ||
503 | pte_t *ptep; | ||
504 | int local = 0; | ||
505 | cpumask_t tmp; | ||
506 | unsigned long flags; | ||
507 | #endif | 501 | #endif |
502 | unsigned long pfn = pte_pfn(pte); | ||
508 | 503 | ||
509 | /* handle i-cache coherency */ | 504 | /* handle i-cache coherency */ |
510 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && | 505 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && |
@@ -535,30 +530,21 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | |||
535 | /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ | 530 | /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ |
536 | if (!pte_young(pte) || address >= TASK_SIZE) | 531 | if (!pte_young(pte) || address >= TASK_SIZE) |
537 | return; | 532 | return; |
538 | #ifdef CONFIG_PPC32 | ||
539 | if (Hash == 0) | ||
540 | return; | ||
541 | pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address); | ||
542 | if (!pmd_none(*pmd)) | ||
543 | add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd)); | ||
544 | #else | ||
545 | pgdir = vma->vm_mm->pgd; | ||
546 | if (pgdir == NULL) | ||
547 | return; | ||
548 | 533 | ||
549 | ptep = find_linux_pte(pgdir, address); | 534 | /* We try to figure out if we are coming from an instruction |
550 | if (!ptep) | 535 | * access fault and pass that down to __hash_page so we avoid |
536 | * double-faulting on execution of fresh text. We have to test | ||
537 | * for regs NULL since init will get here first thing at boot | ||
538 | * | ||
539 | * We also avoid filling the hash if not coming from a fault | ||
540 | */ | ||
541 | if (current->thread.regs == NULL) | ||
551 | return; | 542 | return; |
552 | 543 | trap = TRAP(current->thread.regs); | |
553 | vsid = get_vsid(vma->vm_mm->context.id, address); | 544 | if (trap == 0x400) |
554 | 545 | access |= _PAGE_EXEC; | |
555 | local_irq_save(flags); | 546 | else if (trap != 0x300) |
556 | tmp = cpumask_of_cpu(smp_processor_id()); | 547 | return; |
557 | if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) | 548 | hash_preload(vma->vm_mm, address, access, trap); |
558 | local = 1; | 549 | #endif /* CONFIG_PPC_STD_MMU */ |
559 | |||
560 | __hash_page(address, 0, vsid, ptep, 0x300, local); | ||
561 | local_irq_restore(flags); | ||
562 | #endif | ||
563 | #endif | ||
564 | } | 550 | } |