aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorSteven J. Hill <Steven.Hill@imgtec.com>2015-02-26 19:16:38 -0500
committerRalf Baechle <ralf@linux-mips.org>2015-03-19 12:39:49 -0400
commitc5b367835cfc7a8ef53b9670a409ffcc95194344 (patch)
tree23a6be89021f93b09bb0f2340bc995c21bcab79b /arch/mips/mm
parentbe0c37c985eddc46d0d67543898c086f60460e2e (diff)
MIPS: Add support for XPA.
Add support for extended physical addressing (XPA) so that 32-bit platforms can access equal to or greater than 40 bits of physical addresses. NOTE: 1) XPA and EVA are not the same and cannot be used simultaneously. 2) If you configure your kernel for XPA, the PTEs and all address sizes become 64-bit. 3) Your platform MUST have working HIGHMEM support. Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9355/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mm/tlb-r4k.c12
-rw-r--r--arch/mips/mm/tlbex.c90
3 files changed, 95 insertions, 14 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 448cde372af0..faa5c9822ecc 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -96,7 +96,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
96 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 96 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
97 pte = mk_pte(page, prot); 97 pte = mk_pte(page, prot);
98#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 98#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
99 entrylo = pte.pte_high; 99 entrylo = pte_to_entrylo(pte.pte_high);
100#else 100#else
101 entrylo = pte_to_entrylo(pte_val(pte)); 101 entrylo = pte_to_entrylo(pte_val(pte));
102#endif 102#endif
@@ -106,6 +106,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
106 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); 106 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
107 write_c0_entrylo0(entrylo); 107 write_c0_entrylo0(entrylo);
108 write_c0_entrylo1(entrylo); 108 write_c0_entrylo1(entrylo);
109#ifdef CONFIG_XPA
110 entrylo = (pte.pte_low & _PFNX_MASK);
111 writex_c0_entrylo0(entrylo);
112 writex_c0_entrylo1(entrylo);
113#endif
109 tlbidx = read_c0_wired(); 114 tlbidx = read_c0_wired();
110 write_c0_wired(tlbidx + 1); 115 write_c0_wired(tlbidx + 1);
111 write_c0_index(tlbidx); 116 write_c0_index(tlbidx);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index b2afa49beab0..c2500f4cb1d1 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -333,9 +333,17 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
333 ptep = pte_offset_map(pmdp, address); 333 ptep = pte_offset_map(pmdp, address);
334 334
335#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 335#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
336#ifdef CONFIG_XPA
337 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
338 writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
339 ptep++;
340 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
341 writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
342#else
336 write_c0_entrylo0(ptep->pte_high); 343 write_c0_entrylo0(ptep->pte_high);
337 ptep++; 344 ptep++;
338 write_c0_entrylo1(ptep->pte_high); 345 write_c0_entrylo1(ptep->pte_high);
346#endif
339#else 347#else
340 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); 348 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
341 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); 349 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
@@ -355,6 +363,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
355void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 363void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
356 unsigned long entryhi, unsigned long pagemask) 364 unsigned long entryhi, unsigned long pagemask)
357{ 365{
366#ifdef CONFIG_XPA
367 panic("Broken for XPA kernels");
368#else
358 unsigned long flags; 369 unsigned long flags;
359 unsigned long wired; 370 unsigned long wired;
360 unsigned long old_pagemask; 371 unsigned long old_pagemask;
@@ -383,6 +394,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
383 write_c0_pagemask(old_pagemask); 394 write_c0_pagemask(old_pagemask);
384 local_flush_tlb_all(); 395 local_flush_tlb_all();
385 local_irq_restore(flags); 396 local_irq_restore(flags);
397#endif
386} 398}
387 399
388#ifdef CONFIG_TRANSPARENT_HUGEPAGE 400#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 20d985901e44..7709920e0cef 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -35,6 +35,17 @@
35#include <asm/uasm.h> 35#include <asm/uasm.h>
36#include <asm/setup.h> 36#include <asm/setup.h>
37 37
38static int __cpuinitdata mips_xpa_disabled;
39
40static int __init xpa_disable(char *s)
41{
42 mips_xpa_disabled = 1;
43
44 return 1;
45}
46
47__setup("noxpa", xpa_disable);
48
38/* 49/*
39 * TLB load/store/modify handlers. 50 * TLB load/store/modify handlers.
40 * 51 *
@@ -1027,12 +1038,27 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1027 } else { 1038 } else {
1028 int pte_off_even = sizeof(pte_t) / 2; 1039 int pte_off_even = sizeof(pte_t) / 2;
1029 int pte_off_odd = pte_off_even + sizeof(pte_t); 1040 int pte_off_odd = pte_off_even + sizeof(pte_t);
1041#ifdef CONFIG_XPA
1042 const int scratch = 1; /* Our extra working register */
1030 1043
1031 /* The pte entries are pre-shifted */ 1044 uasm_i_addu(p, scratch, 0, ptep);
1032 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 1045#endif
1033 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1046 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
1034 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 1047 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
1035 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1048 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1049 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
1050 UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
1051 UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
1052#ifdef CONFIG_XPA
1053 uasm_i_lw(p, tmp, 0, scratch);
1054 uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
1055 uasm_i_lui(p, scratch, 0xff);
1056 uasm_i_ori(p, scratch, scratch, 0xffff);
1057 uasm_i_and(p, tmp, scratch, tmp);
1058 uasm_i_and(p, ptep, scratch, ptep);
1059 uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
1060 uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
1061#endif
1036 } 1062 }
1037#else 1063#else
1038 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ 1064 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
@@ -1533,8 +1559,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1533{ 1559{
1534#ifdef CONFIG_PHYS_ADDR_T_64BIT 1560#ifdef CONFIG_PHYS_ADDR_T_64BIT
1535 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1561 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1536#endif
1537 1562
1563 if (!cpu_has_64bits) {
1564 const int scratch = 1; /* Our extra working register */
1565
1566 uasm_i_lui(p, scratch, (mode >> 16));
1567 uasm_i_or(p, pte, pte, scratch);
1568 } else
1569#endif
1538 uasm_i_ori(p, pte, pte, mode); 1570 uasm_i_ori(p, pte, pte, mode);
1539#ifdef CONFIG_SMP 1571#ifdef CONFIG_SMP
1540# ifdef CONFIG_PHYS_ADDR_T_64BIT 1572# ifdef CONFIG_PHYS_ADDR_T_64BIT
@@ -1598,15 +1630,17 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
1598 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1630 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1599 uasm_i_nop(p); 1631 uasm_i_nop(p);
1600 } else { 1632 } else {
1601 uasm_i_andi(p, t, pte, _PAGE_PRESENT); 1633 uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
1634 uasm_i_andi(p, t, t, 1);
1602 uasm_il_beqz(p, r, t, lid); 1635 uasm_il_beqz(p, r, t, lid);
1603 if (pte == t) 1636 if (pte == t)
1604 /* You lose the SMP race :-(*/ 1637 /* You lose the SMP race :-(*/
1605 iPTE_LW(p, pte, ptr); 1638 iPTE_LW(p, pte, ptr);
1606 } 1639 }
1607 } else { 1640 } else {
1608 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); 1641 uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
1609 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); 1642 uasm_i_andi(p, t, t, 3);
1643 uasm_i_xori(p, t, t, 3);
1610 uasm_il_bnez(p, r, t, lid); 1644 uasm_il_bnez(p, r, t, lid);
1611 if (pte == t) 1645 if (pte == t)
1612 /* You lose the SMP race :-(*/ 1646 /* You lose the SMP race :-(*/
@@ -1635,8 +1669,9 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
1635{ 1669{
1636 int t = scratch >= 0 ? scratch : pte; 1670 int t = scratch >= 0 ? scratch : pte;
1637 1671
1638 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); 1672 uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
1639 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); 1673 uasm_i_andi(p, t, t, 5);
1674 uasm_i_xori(p, t, t, 5);
1640 uasm_il_bnez(p, r, t, lid); 1675 uasm_il_bnez(p, r, t, lid);
1641 if (pte == t) 1676 if (pte == t)
1642 /* You lose the SMP race :-(*/ 1677 /* You lose the SMP race :-(*/
@@ -1672,7 +1707,8 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1672 uasm_i_nop(p); 1707 uasm_i_nop(p);
1673 } else { 1708 } else {
1674 int t = scratch >= 0 ? scratch : pte; 1709 int t = scratch >= 0 ? scratch : pte;
1675 uasm_i_andi(p, t, pte, _PAGE_WRITE); 1710 uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
1711 uasm_i_andi(p, t, t, 1);
1676 uasm_il_beqz(p, r, t, lid); 1712 uasm_il_beqz(p, r, t, lid);
1677 if (pte == t) 1713 if (pte == t)
1678 /* You lose the SMP race :-(*/ 1714 /* You lose the SMP race :-(*/
@@ -2285,6 +2321,11 @@ static void config_htw_params(void)
2285 2321
2286 pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; 2322 pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
2287 pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; 2323 pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
2324
2325 /* If XPA has been enabled, PTEs are 64-bit in size. */
2326 if (read_c0_pagegrain() & PG_ELPA)
2327 pwsize |= 1;
2328
2288 write_c0_pwsize(pwsize); 2329 write_c0_pwsize(pwsize);
2289 2330
2290 /* Make sure everything is set before we enable the HTW */ 2331 /* Make sure everything is set before we enable the HTW */
@@ -2298,6 +2339,28 @@ static void config_htw_params(void)
2298 print_htw_config(); 2339 print_htw_config();
2299} 2340}
2300 2341
2342static void config_xpa_params(void)
2343{
2344#ifdef CONFIG_XPA
2345 unsigned int pagegrain;
2346
2347 if (mips_xpa_disabled) {
2348 pr_info("Extended Physical Addressing (XPA) disabled\n");
2349 return;
2350 }
2351
2352 pagegrain = read_c0_pagegrain();
2353 write_c0_pagegrain(pagegrain | PG_ELPA);
2354 back_to_back_c0_hazard();
2355 pagegrain = read_c0_pagegrain();
2356
2357 if (pagegrain & PG_ELPA)
2358 pr_info("Extended Physical Addressing (XPA) enabled\n");
2359 else
2360 panic("Extended Physical Addressing (XPA) disabled");
2361#endif
2362}
2363
2301void build_tlb_refill_handler(void) 2364void build_tlb_refill_handler(void)
2302{ 2365{
2303 /* 2366 /*
@@ -2362,8 +2425,9 @@ void build_tlb_refill_handler(void)
2362 } 2425 }
2363 if (cpu_has_local_ebase) 2426 if (cpu_has_local_ebase)
2364 build_r4000_tlb_refill_handler(); 2427 build_r4000_tlb_refill_handler();
2428 if (cpu_has_xpa)
2429 config_xpa_params();
2365 if (cpu_has_htw) 2430 if (cpu_has_htw)
2366 config_htw_params(); 2431 config_htw_params();
2367
2368 } 2432 }
2369} 2433}