aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/fault_64.c14
-rw-r--r--arch/sparc/mm/gup.c9
-rw-r--r--arch/sparc/mm/hugetlbpage.c2
-rw-r--r--arch/sparc/mm/init_64.c293
-rw-r--r--arch/sparc/mm/init_64.h4
-rw-r--r--arch/sparc/mm/srmmu.c5
-rw-r--r--arch/sparc/mm/tlb.c25
-rw-r--r--arch/sparc/mm/tsb.c13
-rw-r--r--arch/sparc/mm/ultra.S12
9 files changed, 163 insertions, 214 deletions
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 2ebec263d685..69bb818fdd79 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -21,6 +21,7 @@
21#include <linux/kprobes.h> 21#include <linux/kprobes.h>
22#include <linux/kdebug.h> 22#include <linux/kdebug.h>
23#include <linux/percpu.h> 23#include <linux/percpu.h>
24#include <linux/context_tracking.h>
24 25
25#include <asm/page.h> 26#include <asm/page.h>
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
@@ -272,6 +273,7 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
272 273
273asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) 274asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
274{ 275{
276 enum ctx_state prev_state = exception_enter();
275 struct mm_struct *mm = current->mm; 277 struct mm_struct *mm = current->mm;
276 struct vm_area_struct *vma; 278 struct vm_area_struct *vma;
277 unsigned int insn = 0; 279 unsigned int insn = 0;
@@ -282,7 +284,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
282 fault_code = get_thread_fault_code(); 284 fault_code = get_thread_fault_code();
283 285
284 if (notify_page_fault(regs)) 286 if (notify_page_fault(regs))
285 return; 287 goto exit_exception;
286 288
287 si_code = SEGV_MAPERR; 289 si_code = SEGV_MAPERR;
288 address = current_thread_info()->fault_address; 290 address = current_thread_info()->fault_address;
@@ -313,7 +315,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
313 /* Valid, no problems... */ 315 /* Valid, no problems... */
314 } else { 316 } else {
315 bad_kernel_pc(regs, address); 317 bad_kernel_pc(regs, address);
316 return; 318 goto exit_exception;
317 } 319 }
318 } else 320 } else
319 flags |= FAULT_FLAG_USER; 321 flags |= FAULT_FLAG_USER;
@@ -430,7 +432,7 @@ good_area:
430 fault = handle_mm_fault(mm, vma, address, flags); 432 fault = handle_mm_fault(mm, vma, address, flags);
431 433
432 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 434 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
433 return; 435 goto exit_exception;
434 436
435 if (unlikely(fault & VM_FAULT_ERROR)) { 437 if (unlikely(fault & VM_FAULT_ERROR)) {
436 if (fault & VM_FAULT_OOM) 438 if (fault & VM_FAULT_OOM)
@@ -482,6 +484,8 @@ good_area:
482 484
483 } 485 }
484#endif 486#endif
487exit_exception:
488 exception_exit(prev_state);
485 return; 489 return;
486 490
487 /* 491 /*
@@ -494,7 +498,7 @@ bad_area:
494 498
495handle_kernel_fault: 499handle_kernel_fault:
496 do_kernel_fault(regs, si_code, fault_code, insn, address); 500 do_kernel_fault(regs, si_code, fault_code, insn, address);
497 return; 501 goto exit_exception;
498 502
499/* 503/*
500 * We ran out of memory, or some other thing happened to us that made 504 * We ran out of memory, or some other thing happened to us that made
@@ -505,7 +509,7 @@ out_of_memory:
505 up_read(&mm->mmap_sem); 509 up_read(&mm->mmap_sem);
506 if (!(regs->tstate & TSTATE_PRIV)) { 510 if (!(regs->tstate & TSTATE_PRIV)) {
507 pagefault_out_of_memory(); 511 pagefault_out_of_memory();
508 return; 512 goto exit_exception;
509 } 513 }
510 goto handle_kernel_fault; 514 goto handle_kernel_fault;
511 515
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 01ee23dd724d..c4d3da68b800 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -71,13 +71,12 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
71 int *nr) 71 int *nr)
72{ 72{
73 struct page *head, *page, *tail; 73 struct page *head, *page, *tail;
74 u32 mask;
75 int refs; 74 int refs;
76 75
77 mask = PMD_HUGE_PRESENT; 76 if (!pmd_large(pmd))
78 if (write) 77 return 0;
79 mask |= PMD_HUGE_WRITE; 78
80 if ((pmd_val(pmd) & mask) != mask) 79 if (write && !pmd_write(pmd))
81 return 0; 80 return 0;
82 81
83 refs = 0; 82 refs = 0;
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 96399646570a..30963178d7e9 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -21,8 +21,6 @@
21/* Slightly simplified from the non-hugepage variant because by 21/* Slightly simplified from the non-hugepage variant because by
22 * definition we don't have to worry about any page coloring stuff 22 * definition we don't have to worry about any page coloring stuff
23 */ 23 */
24#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
25#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
26 24
27static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, 25static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
28 unsigned long addr, 26 unsigned long addr,
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ed82edad1a39..6b643790e4fe 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -354,7 +354,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
354 354
355#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 355#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
356 if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) 356 if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
357 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, 357 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
358 address, pte_val(pte)); 358 address, pte_val(pte));
359 else 359 else
360#endif 360#endif
@@ -1557,6 +1557,96 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
1557 return ~0UL; 1557 return ~0UL;
1558} 1558}
1559 1559
1560unsigned long PAGE_OFFSET;
1561EXPORT_SYMBOL(PAGE_OFFSET);
1562
1563static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits)
1564{
1565 unsigned long final_shift;
1566 unsigned int val = *insn;
1567 unsigned int cnt;
1568
1569 /* We are patching in ilog2(max_supported_phys_address), and
1570 * we are doing so in a manner similar to a relocation addend.
1571 * That is, we are adding the shift value to whatever value
1572 * is in the shift instruction count field already.
1573 */
1574 cnt = (val & 0x3f);
1575 val &= ~0x3f;
1576
1577 /* If we are trying to shift >= 64 bits, clear the destination
1578 * register. This can happen when phys_bits ends up being equal
1579 * to MAX_PHYS_ADDRESS_BITS.
1580 */
1581 final_shift = (cnt + (64 - phys_bits));
1582 if (final_shift >= 64) {
1583 unsigned int rd = (val >> 25) & 0x1f;
1584
1585 val = 0x80100000 | (rd << 25);
1586 } else {
1587 val |= final_shift;
1588 }
1589 *insn = val;
1590
1591 __asm__ __volatile__("flush %0"
1592 : /* no outputs */
1593 : "r" (insn));
1594}
1595
1596static void __init page_offset_shift_patch(unsigned long phys_bits)
1597{
1598 extern unsigned int __page_offset_shift_patch;
1599 extern unsigned int __page_offset_shift_patch_end;
1600 unsigned int *p;
1601
1602 p = &__page_offset_shift_patch;
1603 while (p < &__page_offset_shift_patch_end) {
1604 unsigned int *insn = (unsigned int *)(unsigned long)*p;
1605
1606 page_offset_shift_patch_one(insn, phys_bits);
1607
1608 p++;
1609 }
1610}
1611
1612static void __init setup_page_offset(void)
1613{
1614 unsigned long max_phys_bits = 40;
1615
1616 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1617 max_phys_bits = 42;
1618 } else if (tlb_type == hypervisor) {
1619 switch (sun4v_chip_type) {
1620 case SUN4V_CHIP_NIAGARA1:
1621 case SUN4V_CHIP_NIAGARA2:
1622 max_phys_bits = 39;
1623 break;
1624 case SUN4V_CHIP_NIAGARA3:
1625 max_phys_bits = 43;
1626 break;
1627 case SUN4V_CHIP_NIAGARA4:
1628 case SUN4V_CHIP_NIAGARA5:
1629 case SUN4V_CHIP_SPARC64X:
1630 default:
1631 max_phys_bits = 47;
1632 break;
1633 }
1634 }
1635
1636 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1637 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1638 max_phys_bits);
1639 prom_halt();
1640 }
1641
1642 PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits);
1643
1644 pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1645 PAGE_OFFSET, max_phys_bits);
1646
1647 page_offset_shift_patch(max_phys_bits);
1648}
1649
1560static void __init tsb_phys_patch(void) 1650static void __init tsb_phys_patch(void)
1561{ 1651{
1562 struct tsb_ldquad_phys_patch_entry *pquad; 1652 struct tsb_ldquad_phys_patch_entry *pquad;
@@ -1722,7 +1812,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
1722#ifndef CONFIG_DEBUG_PAGEALLOC 1812#ifndef CONFIG_DEBUG_PAGEALLOC
1723 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { 1813 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1724 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 1814 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1725 0xfffff80000000000UL; 1815 PAGE_OFFSET;
1726 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1816 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1727 _PAGE_P_4V | _PAGE_W_4V); 1817 _PAGE_P_4V | _PAGE_W_4V);
1728 } else { 1818 } else {
@@ -1731,7 +1821,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
1731 1821
1732 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { 1822 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1733 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ 1823 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
1734 0xfffff80000000000UL; 1824 PAGE_OFFSET;
1735 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1825 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1736 _PAGE_P_4V | _PAGE_W_4V); 1826 _PAGE_P_4V | _PAGE_W_4V);
1737 } else { 1827 } else {
@@ -1740,7 +1830,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
1740 1830
1741 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { 1831 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
1742 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ 1832 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
1743 0xfffff80000000000UL; 1833 PAGE_OFFSET;
1744 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1834 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1745 _PAGE_P_4V | _PAGE_W_4V); 1835 _PAGE_P_4V | _PAGE_W_4V);
1746 } else { 1836 } else {
@@ -1752,7 +1842,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
1752/* paging_init() sets up the page tables */ 1842/* paging_init() sets up the page tables */
1753 1843
1754static unsigned long last_valid_pfn; 1844static unsigned long last_valid_pfn;
1755pgd_t swapper_pg_dir[2048]; 1845pgd_t swapper_pg_dir[PTRS_PER_PGD];
1756 1846
1757static void sun4u_pgprot_init(void); 1847static void sun4u_pgprot_init(void);
1758static void sun4v_pgprot_init(void); 1848static void sun4v_pgprot_init(void);
@@ -1763,6 +1853,8 @@ void __init paging_init(void)
1763 unsigned long real_end, i; 1853 unsigned long real_end, i;
1764 int node; 1854 int node;
1765 1855
1856 setup_page_offset();
1857
1766 /* These build time checkes make sure that the dcache_dirty_cpu() 1858 /* These build time checkes make sure that the dcache_dirty_cpu()
1767 * page->flags usage will work. 1859 * page->flags usage will work.
1768 * 1860 *
@@ -2261,10 +2353,10 @@ static void __init sun4u_pgprot_init(void)
2261 __ACCESS_BITS_4U | _PAGE_E_4U); 2353 __ACCESS_BITS_4U | _PAGE_E_4U);
2262 2354
2263#ifdef CONFIG_DEBUG_PAGEALLOC 2355#ifdef CONFIG_DEBUG_PAGEALLOC
2264 kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL; 2356 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2265#else 2357#else
2266 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 2358 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2267 0xfffff80000000000UL; 2359 PAGE_OFFSET;
2268#endif 2360#endif
2269 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | 2361 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2270 _PAGE_P_4U | _PAGE_W_4U); 2362 _PAGE_P_4U | _PAGE_W_4U);
@@ -2308,10 +2400,10 @@ static void __init sun4v_pgprot_init(void)
2308 _PAGE_CACHE = _PAGE_CACHE_4V; 2400 _PAGE_CACHE = _PAGE_CACHE_4V;
2309 2401
2310#ifdef CONFIG_DEBUG_PAGEALLOC 2402#ifdef CONFIG_DEBUG_PAGEALLOC
2311 kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL; 2403 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2312#else 2404#else
2313 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 2405 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2314 0xfffff80000000000UL; 2406 PAGE_OFFSET;
2315#endif 2407#endif
2316 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2408 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2317 _PAGE_P_4V | _PAGE_W_4V); 2409 _PAGE_P_4V | _PAGE_W_4V);
@@ -2455,53 +2547,13 @@ void __flush_tlb_all(void)
2455 : : "r" (pstate)); 2547 : : "r" (pstate));
2456} 2548}
2457 2549
2458static pte_t *get_from_cache(struct mm_struct *mm)
2459{
2460 struct page *page;
2461 pte_t *ret;
2462
2463 spin_lock(&mm->page_table_lock);
2464 page = mm->context.pgtable_page;
2465 ret = NULL;
2466 if (page) {
2467 void *p = page_address(page);
2468
2469 mm->context.pgtable_page = NULL;
2470
2471 ret = (pte_t *) (p + (PAGE_SIZE / 2));
2472 }
2473 spin_unlock(&mm->page_table_lock);
2474
2475 return ret;
2476}
2477
2478static struct page *__alloc_for_cache(struct mm_struct *mm)
2479{
2480 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2481 __GFP_REPEAT | __GFP_ZERO);
2482
2483 if (page) {
2484 spin_lock(&mm->page_table_lock);
2485 if (!mm->context.pgtable_page) {
2486 atomic_set(&page->_count, 2);
2487 mm->context.pgtable_page = page;
2488 }
2489 spin_unlock(&mm->page_table_lock);
2490 }
2491 return page;
2492}
2493
2494pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 2550pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2495 unsigned long address) 2551 unsigned long address)
2496{ 2552{
2497 struct page *page; 2553 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2498 pte_t *pte; 2554 __GFP_REPEAT | __GFP_ZERO);
2499 2555 pte_t *pte = NULL;
2500 pte = get_from_cache(mm);
2501 if (pte)
2502 return pte;
2503 2556
2504 page = __alloc_for_cache(mm);
2505 if (page) 2557 if (page)
2506 pte = (pte_t *) page_address(page); 2558 pte = (pte_t *) page_address(page);
2507 2559
@@ -2511,36 +2563,30 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2511pgtable_t pte_alloc_one(struct mm_struct *mm, 2563pgtable_t pte_alloc_one(struct mm_struct *mm,
2512 unsigned long address) 2564 unsigned long address)
2513{ 2565{
2514 struct page *page; 2566 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2515 pte_t *pte; 2567 __GFP_REPEAT | __GFP_ZERO);
2516 2568 pte_t *pte = NULL;
2517 pte = get_from_cache(mm);
2518 if (pte)
2519 return pte;
2520 2569
2521 page = __alloc_for_cache(mm); 2570 if (!page)
2522 if (page) { 2571 return NULL;
2523 pgtable_page_ctor(page); 2572 if (!pgtable_page_ctor(page)) {
2524 pte = (pte_t *) page_address(page); 2573 free_hot_cold_page(page, 0);
2574 return NULL;
2525 } 2575 }
2526 2576 return (pte_t *) page_address(page);
2527 return pte;
2528} 2577}
2529 2578
2530void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 2579void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2531{ 2580{
2532 struct page *page = virt_to_page(pte); 2581 free_page((unsigned long)pte);
2533 if (put_page_testzero(page))
2534 free_hot_cold_page(page, 0);
2535} 2582}
2536 2583
2537static void __pte_free(pgtable_t pte) 2584static void __pte_free(pgtable_t pte)
2538{ 2585{
2539 struct page *page = virt_to_page(pte); 2586 struct page *page = virt_to_page(pte);
2540 if (put_page_testzero(page)) { 2587
2541 pgtable_page_dtor(page); 2588 pgtable_page_dtor(page);
2542 free_hot_cold_page(page, 0); 2589 __free_page(page);
2543 }
2544} 2590}
2545 2591
2546void pte_free(struct mm_struct *mm, pgtable_t pte) 2592void pte_free(struct mm_struct *mm, pgtable_t pte)
@@ -2557,124 +2603,27 @@ void pgtable_free(void *table, bool is_page)
2557} 2603}
2558 2604
2559#ifdef CONFIG_TRANSPARENT_HUGEPAGE 2605#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2560static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify)
2561{
2562 if (pgprot_val(pgprot) & _PAGE_VALID)
2563 pmd_val(pmd) |= PMD_HUGE_PRESENT;
2564 if (tlb_type == hypervisor) {
2565 if (pgprot_val(pgprot) & _PAGE_WRITE_4V)
2566 pmd_val(pmd) |= PMD_HUGE_WRITE;
2567 if (pgprot_val(pgprot) & _PAGE_EXEC_4V)
2568 pmd_val(pmd) |= PMD_HUGE_EXEC;
2569
2570 if (!for_modify) {
2571 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V)
2572 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2573 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V)
2574 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2575 }
2576 } else {
2577 if (pgprot_val(pgprot) & _PAGE_WRITE_4U)
2578 pmd_val(pmd) |= PMD_HUGE_WRITE;
2579 if (pgprot_val(pgprot) & _PAGE_EXEC_4U)
2580 pmd_val(pmd) |= PMD_HUGE_EXEC;
2581
2582 if (!for_modify) {
2583 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U)
2584 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2585 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U)
2586 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2587 }
2588 }
2589
2590 return pmd;
2591}
2592
2593pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
2594{
2595 pmd_t pmd;
2596
2597 pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT)));
2598 pmd_val(pmd) |= PMD_ISHUGE;
2599 pmd = pmd_set_protbits(pmd, pgprot, false);
2600 return pmd;
2601}
2602
2603pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
2604{
2605 pmd_val(pmd) &= ~(PMD_HUGE_PRESENT |
2606 PMD_HUGE_WRITE |
2607 PMD_HUGE_EXEC);
2608 pmd = pmd_set_protbits(pmd, newprot, true);
2609 return pmd;
2610}
2611
2612pgprot_t pmd_pgprot(pmd_t entry)
2613{
2614 unsigned long pte = 0;
2615
2616 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2617 pte |= _PAGE_VALID;
2618
2619 if (tlb_type == hypervisor) {
2620 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2621 pte |= _PAGE_PRESENT_4V;
2622 if (pmd_val(entry) & PMD_HUGE_EXEC)
2623 pte |= _PAGE_EXEC_4V;
2624 if (pmd_val(entry) & PMD_HUGE_WRITE)
2625 pte |= _PAGE_W_4V;
2626 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2627 pte |= _PAGE_ACCESSED_4V;
2628 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2629 pte |= _PAGE_MODIFIED_4V;
2630 pte |= _PAGE_CP_4V|_PAGE_CV_4V;
2631 } else {
2632 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2633 pte |= _PAGE_PRESENT_4U;
2634 if (pmd_val(entry) & PMD_HUGE_EXEC)
2635 pte |= _PAGE_EXEC_4U;
2636 if (pmd_val(entry) & PMD_HUGE_WRITE)
2637 pte |= _PAGE_W_4U;
2638 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2639 pte |= _PAGE_ACCESSED_4U;
2640 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2641 pte |= _PAGE_MODIFIED_4U;
2642 pte |= _PAGE_CP_4U|_PAGE_CV_4U;
2643 }
2644
2645 return __pgprot(pte);
2646}
2647
2648void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 2606void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2649 pmd_t *pmd) 2607 pmd_t *pmd)
2650{ 2608{
2651 unsigned long pte, flags; 2609 unsigned long pte, flags;
2652 struct mm_struct *mm; 2610 struct mm_struct *mm;
2653 pmd_t entry = *pmd; 2611 pmd_t entry = *pmd;
2654 pgprot_t prot;
2655 2612
2656 if (!pmd_large(entry) || !pmd_young(entry)) 2613 if (!pmd_large(entry) || !pmd_young(entry))
2657 return; 2614 return;
2658 2615
2659 pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS); 2616 pte = pmd_val(entry);
2660 pte <<= PMD_PADDR_SHIFT;
2661 pte |= _PAGE_VALID;
2662
2663 prot = pmd_pgprot(entry);
2664
2665 if (tlb_type == hypervisor)
2666 pgprot_val(prot) |= _PAGE_SZHUGE_4V;
2667 else
2668 pgprot_val(prot) |= _PAGE_SZHUGE_4U;
2669 2617
2670 pte |= pgprot_val(prot); 2618 /* We are fabricating 8MB pages using 4MB real hw pages. */
2619 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2671 2620
2672 mm = vma->vm_mm; 2621 mm = vma->vm_mm;
2673 2622
2674 spin_lock_irqsave(&mm->context.lock, flags); 2623 spin_lock_irqsave(&mm->context.lock, flags);
2675 2624
2676 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) 2625 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2677 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, 2626 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2678 addr, pte); 2627 addr, pte);
2679 2628
2680 spin_unlock_irqrestore(&mm->context.lock, flags); 2629 spin_unlock_irqrestore(&mm->context.lock, flags);
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
index 0661aa606dec..5d3782deb403 100644
--- a/arch/sparc/mm/init_64.h
+++ b/arch/sparc/mm/init_64.h
@@ -1,11 +1,13 @@
1#ifndef _SPARC64_MM_INIT_H 1#ifndef _SPARC64_MM_INIT_H
2#define _SPARC64_MM_INIT_H 2#define _SPARC64_MM_INIT_H
3 3
4#include <asm/page.h>
5
4/* Most of the symbols in this file are defined in init.c and 6/* Most of the symbols in this file are defined in init.c and
5 * marked non-static so that assembler code can get at them. 7 * marked non-static so that assembler code can get at them.
6 */ 8 */
7 9
8#define MAX_PHYS_ADDRESS (1UL << 41UL) 10#define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS)
9#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) 11#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
10#define KPTE_BITMAP_BYTES \ 12#define KPTE_BITMAP_BYTES \
11 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4) 13 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 5d721df48a72..869023abe5a4 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -345,7 +345,10 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
345 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) 345 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
346 return NULL; 346 return NULL;
347 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); 347 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
348 pgtable_page_ctor(page); 348 if (!pgtable_page_ctor(page)) {
349 __free_page(page);
350 return NULL;
351 }
349 return page; 352 return page;
350} 353}
351 354
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 7a91f288c708..ad3bf4b4324d 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -161,8 +161,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
161 if (mm == &init_mm) 161 if (mm == &init_mm)
162 return; 162 return;
163 163
164 if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) { 164 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
165 if (pmd_val(pmd) & PMD_ISHUGE) 165 if (pmd_val(pmd) & _PAGE_PMD_HUGE)
166 mm->context.huge_pte_count++; 166 mm->context.huge_pte_count++;
167 else 167 else
168 mm->context.huge_pte_count--; 168 mm->context.huge_pte_count--;
@@ -178,13 +178,16 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
178 } 178 }
179 179
180 if (!pmd_none(orig)) { 180 if (!pmd_none(orig)) {
181 bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0); 181 pte_t orig_pte = __pte(pmd_val(orig));
182 bool exec = pte_exec(orig_pte);
182 183
183 addr &= HPAGE_MASK; 184 addr &= HPAGE_MASK;
184 if (pmd_val(orig) & PMD_ISHUGE) 185 if (pmd_trans_huge(orig)) {
185 tlb_batch_add_one(mm, addr, exec); 186 tlb_batch_add_one(mm, addr, exec);
186 else 187 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
188 } else {
187 tlb_batch_pmd_scan(mm, addr, orig, exec); 189 tlb_batch_pmd_scan(mm, addr, orig, exec);
190 }
188 } 191 }
189} 192}
190 193
@@ -196,11 +199,11 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
196 assert_spin_locked(&mm->page_table_lock); 199 assert_spin_locked(&mm->page_table_lock);
197 200
198 /* FIFO */ 201 /* FIFO */
199 if (!mm->pmd_huge_pte) 202 if (!pmd_huge_pte(mm, pmdp))
200 INIT_LIST_HEAD(lh); 203 INIT_LIST_HEAD(lh);
201 else 204 else
202 list_add(lh, (struct list_head *) mm->pmd_huge_pte); 205 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
203 mm->pmd_huge_pte = pgtable; 206 pmd_huge_pte(mm, pmdp) = pgtable;
204} 207}
205 208
206pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 209pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
@@ -211,12 +214,12 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
211 assert_spin_locked(&mm->page_table_lock); 214 assert_spin_locked(&mm->page_table_lock);
212 215
213 /* FIFO */ 216 /* FIFO */
214 pgtable = mm->pmd_huge_pte; 217 pgtable = pmd_huge_pte(mm, pmdp);
215 lh = (struct list_head *) pgtable; 218 lh = (struct list_head *) pgtable;
216 if (list_empty(lh)) 219 if (list_empty(lh))
217 mm->pmd_huge_pte = NULL; 220 pmd_huge_pte(mm, pmdp) = NULL;
218 else { 221 else {
219 mm->pmd_huge_pte = (pgtable_t) lh->next; 222 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
220 list_del(lh); 223 list_del(lh);
221 } 224 }
222 pte_val(pgtable[0]) = 0; 225 pte_val(pgtable[0]) = 0;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 2cc3bce5ee91..3b3a360b429a 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -87,7 +87,7 @@ void flush_tsb_user(struct tlb_batch *tb)
87 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 87 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
88 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 88 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
89 base = __pa(base); 89 base = __pa(base);
90 __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); 90 __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
91 } 91 }
92#endif 92#endif
93 spin_unlock_irqrestore(&mm->context.lock, flags); 93 spin_unlock_irqrestore(&mm->context.lock, flags);
@@ -111,7 +111,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
111 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 111 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
112 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 112 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
113 base = __pa(base); 113 base = __pa(base);
114 __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); 114 __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
115 } 115 }
116#endif 116#endif
117 spin_unlock_irqrestore(&mm->context.lock, flags); 117 spin_unlock_irqrestore(&mm->context.lock, flags);
@@ -472,8 +472,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
472 mm->context.huge_pte_count = 0; 472 mm->context.huge_pte_count = 0;
473#endif 473#endif
474 474
475 mm->context.pgtable_page = NULL;
476
477 /* copy_mm() copies over the parent's mm_struct before calling 475 /* copy_mm() copies over the parent's mm_struct before calling
478 * us, so we need to zero out the TSB pointer or else tsb_grow() 476 * us, so we need to zero out the TSB pointer or else tsb_grow()
479 * will be confused and think there is an older TSB to free up. 477 * will be confused and think there is an older TSB to free up.
@@ -512,17 +510,10 @@ static void tsb_destroy_one(struct tsb_config *tp)
512void destroy_context(struct mm_struct *mm) 510void destroy_context(struct mm_struct *mm)
513{ 511{
514 unsigned long flags, i; 512 unsigned long flags, i;
515 struct page *page;
516 513
517 for (i = 0; i < MM_NUM_TSBS; i++) 514 for (i = 0; i < MM_NUM_TSBS; i++)
518 tsb_destroy_one(&mm->context.tsb_block[i]); 515 tsb_destroy_one(&mm->context.tsb_block[i]);
519 516
520 page = mm->context.pgtable_page;
521 if (page && put_page_testzero(page)) {
522 pgtable_page_dtor(page);
523 free_hot_cold_page(page, 0);
524 }
525
526 spin_lock_irqsave(&ctx_alloc_lock, flags); 517 spin_lock_irqsave(&ctx_alloc_lock, flags);
527 518
528 if (CTX_VALID(mm->context)) { 519 if (CTX_VALID(mm->context)) {
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 432aa0cb1b38..b4f4733abc6e 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -153,10 +153,10 @@ __spitfire_flush_tlb_mm_slow:
153 .globl __flush_icache_page 153 .globl __flush_icache_page
154__flush_icache_page: /* %o0 = phys_page */ 154__flush_icache_page: /* %o0 = phys_page */
155 srlx %o0, PAGE_SHIFT, %o0 155 srlx %o0, PAGE_SHIFT, %o0
156 sethi %uhi(PAGE_OFFSET), %g1 156 sethi %hi(PAGE_OFFSET), %g1
157 sllx %o0, PAGE_SHIFT, %o0 157 sllx %o0, PAGE_SHIFT, %o0
158 sethi %hi(PAGE_SIZE), %g2 158 sethi %hi(PAGE_SIZE), %g2
159 sllx %g1, 32, %g1 159 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
160 add %o0, %g1, %o0 160 add %o0, %g1, %o0
1611: subcc %g2, 32, %g2 1611: subcc %g2, 32, %g2
162 bne,pt %icc, 1b 162 bne,pt %icc, 1b
@@ -178,8 +178,8 @@ __flush_icache_page: /* %o0 = phys_page */
178 .align 64 178 .align 64
179 .globl __flush_dcache_page 179 .globl __flush_dcache_page
180__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ 180__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
181 sethi %uhi(PAGE_OFFSET), %g1 181 sethi %hi(PAGE_OFFSET), %g1
182 sllx %g1, 32, %g1 182 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
183 sub %o0, %g1, %o0 ! physical address 183 sub %o0, %g1, %o0 ! physical address
184 srlx %o0, 11, %o0 ! make D-cache TAG 184 srlx %o0, 11, %o0 ! make D-cache TAG
185 sethi %hi(1 << 14), %o2 ! D-cache size 185 sethi %hi(1 << 14), %o2 ! D-cache size
@@ -287,8 +287,8 @@ __cheetah_flush_tlb_pending: /* 27 insns */
287 287
288#ifdef DCACHE_ALIASING_POSSIBLE 288#ifdef DCACHE_ALIASING_POSSIBLE
289__cheetah_flush_dcache_page: /* 11 insns */ 289__cheetah_flush_dcache_page: /* 11 insns */
290 sethi %uhi(PAGE_OFFSET), %g1 290 sethi %hi(PAGE_OFFSET), %g1
291 sllx %g1, 32, %g1 291 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
292 sub %o0, %g1, %o0 292 sub %o0, %g1, %o0
293 sethi %hi(PAGE_SIZE), %o4 293 sethi %hi(PAGE_SIZE), %o4
2941: subcc %o4, (1 << 5), %o4 2941: subcc %o4, (1 << 5), %o4