aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/init_64.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-09-26 16:45:15 -0400
committerDavid S. Miller <davem@davemloft.net>2013-11-13 15:33:08 -0500
commita7b9403f0e6d5f99139dca18be885819c8d380a1 (patch)
tree3e88ddc9250a1408eefc48a20a8fb445e19fcaa3 /arch/sparc/mm/init_64.c
parent2b77933c28f5044629bb19e8045aae65b72b939d (diff)
sparc64: Encode huge PMDs using PTE encoding.
Now that we have 64-bits for PMDs we can stop using special encodings for the huge PMD values, and just put real PTEs in there. We allocate a _PAGE_PMD_HUGE bit to distinguish between plain PMDs and huge ones. It is the same for both 4U and 4V PTE layouts. We also use _PAGE_SPECIAL to indicate the splitting state, since a huge PMD cannot also be special. All of the PMD --> PTE translation code disappears, and most of the huge PMD bit modifications and tests just degenerate into the PTE operations. In particular USER_PGTABLE_CHECK_PMD_HUGE becomes trivial. As a side effect, normal PMDs don't shift the physical address around. This also speeds up the page table walks in the TLB miss paths since they don't have to do the shifts any more. Another non-trivial aspect is that pte_modify() has to be changed to preserve the _PAGE_PMD_HUGE bits as well as the page size field of the pte. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/init_64.c')
-rw-r--r--arch/sparc/mm/init_64.c102
1 files changed, 1 insertions, 101 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 828784419e25..bd6430ded69f 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2602,121 +2602,21 @@ void pgtable_free(void *table, bool is_page)
2602} 2602}
2603 2603
2604#ifdef CONFIG_TRANSPARENT_HUGEPAGE 2604#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2605static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify)
2606{
2607 if (pgprot_val(pgprot) & _PAGE_VALID)
2608 pmd_val(pmd) |= PMD_HUGE_PRESENT;
2609 if (tlb_type == hypervisor) {
2610 if (pgprot_val(pgprot) & _PAGE_WRITE_4V)
2611 pmd_val(pmd) |= PMD_HUGE_WRITE;
2612 if (pgprot_val(pgprot) & _PAGE_EXEC_4V)
2613 pmd_val(pmd) |= PMD_HUGE_EXEC;
2614
2615 if (!for_modify) {
2616 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V)
2617 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2618 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V)
2619 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2620 }
2621 } else {
2622 if (pgprot_val(pgprot) & _PAGE_WRITE_4U)
2623 pmd_val(pmd) |= PMD_HUGE_WRITE;
2624 if (pgprot_val(pgprot) & _PAGE_EXEC_4U)
2625 pmd_val(pmd) |= PMD_HUGE_EXEC;
2626
2627 if (!for_modify) {
2628 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U)
2629 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2630 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U)
2631 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2632 }
2633 }
2634
2635 return pmd;
2636}
2637
2638pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
2639{
2640 pmd_t pmd;
2641
2642 pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT)));
2643 pmd_val(pmd) |= PMD_ISHUGE;
2644 pmd = pmd_set_protbits(pmd, pgprot, false);
2645 return pmd;
2646}
2647
2648pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
2649{
2650 pmd_val(pmd) &= ~(PMD_HUGE_PRESENT |
2651 PMD_HUGE_WRITE |
2652 PMD_HUGE_EXEC);
2653 pmd = pmd_set_protbits(pmd, newprot, true);
2654 return pmd;
2655}
2656
2657pgprot_t pmd_pgprot(pmd_t entry)
2658{
2659 unsigned long pte = 0;
2660
2661 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2662 pte |= _PAGE_VALID;
2663
2664 if (tlb_type == hypervisor) {
2665 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2666 pte |= _PAGE_PRESENT_4V;
2667 if (pmd_val(entry) & PMD_HUGE_EXEC)
2668 pte |= _PAGE_EXEC_4V;
2669 if (pmd_val(entry) & PMD_HUGE_WRITE)
2670 pte |= _PAGE_W_4V;
2671 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2672 pte |= _PAGE_ACCESSED_4V;
2673 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2674 pte |= _PAGE_MODIFIED_4V;
2675 pte |= _PAGE_CP_4V|_PAGE_CV_4V;
2676 } else {
2677 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2678 pte |= _PAGE_PRESENT_4U;
2679 if (pmd_val(entry) & PMD_HUGE_EXEC)
2680 pte |= _PAGE_EXEC_4U;
2681 if (pmd_val(entry) & PMD_HUGE_WRITE)
2682 pte |= _PAGE_W_4U;
2683 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2684 pte |= _PAGE_ACCESSED_4U;
2685 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2686 pte |= _PAGE_MODIFIED_4U;
2687 pte |= _PAGE_CP_4U|_PAGE_CV_4U;
2688 }
2689
2690 return __pgprot(pte);
2691}
2692
2693void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 2605void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2694 pmd_t *pmd) 2606 pmd_t *pmd)
2695{ 2607{
2696 unsigned long pte, flags; 2608 unsigned long pte, flags;
2697 struct mm_struct *mm; 2609 struct mm_struct *mm;
2698 pmd_t entry = *pmd; 2610 pmd_t entry = *pmd;
2699 pgprot_t prot;
2700 2611
2701 if (!pmd_large(entry) || !pmd_young(entry)) 2612 if (!pmd_large(entry) || !pmd_young(entry))
2702 return; 2613 return;
2703 2614
2704 pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS); 2615 pte = pmd_val(entry);
2705 pte <<= PMD_PADDR_SHIFT;
2706 pte |= _PAGE_VALID;
2707 2616
2708 /* We are fabricating 8MB pages using 4MB real hw pages. */ 2617 /* We are fabricating 8MB pages using 4MB real hw pages. */
2709 pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); 2618 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2710 2619
2711 prot = pmd_pgprot(entry);
2712
2713 if (tlb_type == hypervisor)
2714 pgprot_val(prot) |= _PAGE_SZHUGE_4V;
2715 else
2716 pgprot_val(prot) |= _PAGE_SZHUGE_4U;
2717
2718 pte |= pgprot_val(prot);
2719
2720 mm = vma->vm_mm; 2620 mm = vma->vm_mm;
2721 2621
2722 spin_lock_irqsave(&mm->context.lock, flags); 2622 spin_lock_irqsave(&mm->context.lock, flags);