aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSteven J. Hill <Steven.Hill@imgtec.com>2015-02-26 19:16:38 -0500
committerRalf Baechle <ralf@linux-mips.org>2015-03-19 12:39:49 -0400
commitc5b367835cfc7a8ef53b9670a409ffcc95194344 (patch)
tree23a6be89021f93b09bb0f2340bc995c21bcab79b /arch
parentbe0c37c985eddc46d0d67543898c086f60460e2e (diff)
MIPS: Add support for XPA.
Add support for extended physical addressing (XPA) so that 32-bit platforms can access equal to or greater than 40 bits of physical addresses. NOTE: 1) XPA and EVA are not the same and cannot be used simultaneously. 2) If you configure your kernel for XPA, the PTEs and all address sizes become 64-bit. 3) Your platform MUST have working HIGHMEM support. Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9355/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/Kconfig35
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/cpu.h1
-rw-r--r--arch/mips/include/asm/pgtable-32.h15
-rw-r--r--arch/mips/include/asm/pgtable-bits.h13
-rw-r--r--arch/mips/include/asm/pgtable.h36
-rw-r--r--arch/mips/kernel/cpu-probe.c4
-rw-r--r--arch/mips/kernel/proc.c1
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mm/tlb-r4k.c12
-rw-r--r--arch/mips/mm/tlbex.c90
11 files changed, 173 insertions, 44 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c7a16904cd03..69a3b0fab926 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -377,6 +377,7 @@ config MIPS_MALTA
377 select SYS_HAS_CPU_MIPS32_R1 377 select SYS_HAS_CPU_MIPS32_R1
378 select SYS_HAS_CPU_MIPS32_R2 378 select SYS_HAS_CPU_MIPS32_R2
379 select SYS_HAS_CPU_MIPS32_R3_5 379 select SYS_HAS_CPU_MIPS32_R3_5
380 select SYS_HAS_CPU_MIPS32_R5
380 select SYS_HAS_CPU_MIPS32_R6 381 select SYS_HAS_CPU_MIPS32_R6
381 select SYS_HAS_CPU_MIPS64_R1 382 select SYS_HAS_CPU_MIPS64_R1
382 select SYS_HAS_CPU_MIPS64_R2 383 select SYS_HAS_CPU_MIPS64_R2
@@ -386,6 +387,7 @@ config MIPS_MALTA
386 select SYS_SUPPORTS_32BIT_KERNEL 387 select SYS_SUPPORTS_32BIT_KERNEL
387 select SYS_SUPPORTS_64BIT_KERNEL 388 select SYS_SUPPORTS_64BIT_KERNEL
388 select SYS_SUPPORTS_BIG_ENDIAN 389 select SYS_SUPPORTS_BIG_ENDIAN
390 select SYS_SUPPORTS_HIGHMEM
389 select SYS_SUPPORTS_LITTLE_ENDIAN 391 select SYS_SUPPORTS_LITTLE_ENDIAN
390 select SYS_SUPPORTS_MICROMIPS 392 select SYS_SUPPORTS_MICROMIPS
391 select SYS_SUPPORTS_MIPS_CMP 393 select SYS_SUPPORTS_MIPS_CMP
@@ -1596,6 +1598,33 @@ config CPU_MIPS32_3_5_EVA
1596 One of its primary benefits is an increase in the maximum size 1598 One of its primary benefits is an increase in the maximum size
1597 of lowmem (up to 3GB). If unsure, say 'N' here. 1599 of lowmem (up to 3GB). If unsure, say 'N' here.
1598 1600
1601config CPU_MIPS32_R5_FEATURES
1602 bool "MIPS32 Release 5 Features"
1603 depends on SYS_HAS_CPU_MIPS32_R5
1604 depends on CPU_MIPS32_R2
1605 help
1606 Choose this option to build a kernel for release 2 or later of the
1607 MIPS32 architecture including features from release 5 such as
1608 support for Extended Physical Addressing (XPA).
1609
1610config CPU_MIPS32_R5_XPA
1611 bool "Extended Physical Addressing (XPA)"
1612 depends on CPU_MIPS32_R5_FEATURES
1613 depends on !EVA
1614 depends on !PAGE_SIZE_4KB
1615 depends on SYS_SUPPORTS_HIGHMEM
1616 select XPA
1617 select HIGHMEM
1618 select ARCH_PHYS_ADDR_T_64BIT
1619 default n
1620 help
1621 Choose this option if you want to enable the Extended Physical
1622 Addressing (XPA) on your MIPS32 core (such as P5600 series). The
1623 benefit is to increase physical addressing equal to or greater
1624 than 40 bits. Note that this has the side effect of turning on
1625 64-bit addressing which in turn makes the PTEs 64-bit in size.
1626 If unsure, say 'N' here.
1627
1599if CPU_LOONGSON2F 1628if CPU_LOONGSON2F
1600config CPU_NOP_WORKAROUNDS 1629config CPU_NOP_WORKAROUNDS
1601 bool 1630 bool
@@ -1699,6 +1728,9 @@ config SYS_HAS_CPU_MIPS32_R2
1699config SYS_HAS_CPU_MIPS32_R3_5 1728config SYS_HAS_CPU_MIPS32_R3_5
1700 bool 1729 bool
1701 1730
1731config SYS_HAS_CPU_MIPS32_R5
1732 bool
1733
1702config SYS_HAS_CPU_MIPS32_R6 1734config SYS_HAS_CPU_MIPS32_R6
1703 bool 1735 bool
1704 1736
@@ -1836,6 +1868,9 @@ config CPU_MIPSR6
1836config EVA 1868config EVA
1837 bool 1869 bool
1838 1870
1871config XPA
1872 bool
1873
1839config SYS_SUPPORTS_32BIT_KERNEL 1874config SYS_SUPPORTS_32BIT_KERNEL
1840 bool 1875 bool
1841config SYS_SUPPORTS_64BIT_KERNEL 1876config SYS_SUPPORTS_64BIT_KERNEL
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 0d8208de9a3f..a324751b02ff 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -139,6 +139,9 @@
139# endif 139# endif
140#endif 140#endif
141 141
142#ifndef cpu_has_xpa
143#define cpu_has_xpa (cpu_data[0].options & MIPS_CPU_XPA)
144#endif
142#ifndef cpu_has_vtag_icache 145#ifndef cpu_has_vtag_icache
143#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) 146#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
144#endif 147#endif
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 15687234d70a..e492c740bb94 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -377,6 +377,7 @@ enum cpu_type_enum {
377#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ 377#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */
378#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ 378#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */
379#define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */ 379#define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */
380#define MIPS_CPU_XPA 0x2000000000ull /* CPU supports Extended Physical Addressing */
380 381
381/* 382/*
382 * CPU ASE encodings 383 * CPU ASE encodings
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index a6be006b6f75..7d56686c0e62 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -105,13 +105,16 @@ static inline void pmd_clear(pmd_t *pmdp)
105 105
106#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 106#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
107#define pte_page(x) pfn_to_page(pte_pfn(x)) 107#define pte_page(x) pfn_to_page(pte_pfn(x))
108#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) 108#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
109static inline pte_t 109static inline pte_t
110pfn_pte(unsigned long pfn, pgprot_t prot) 110pfn_pte(unsigned long pfn, pgprot_t prot)
111{ 111{
112 pte_t pte; 112 pte_t pte;
113 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f); 113
114 pte.pte_low = pgprot_val(prot); 114 pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
115 (pgprot_val(prot) & ~_PFNX_MASK);
116 pte.pte_high = (pfn << _PFN_SHIFT) |
117 (pgprot_val(prot) & ~_PFN_MASK);
115 return pte; 118 return pte;
116} 119}
117 120
@@ -166,9 +169,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
166#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 169#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
167 170
168/* Swap entries must have VALID and GLOBAL bits cleared. */ 171/* Swap entries must have VALID and GLOBAL bits cleared. */
169#define __swp_type(x) (((x).val >> 2) & 0x1f) 172#define __swp_type(x) (((x).val >> 4) & 0x1f)
170#define __swp_offset(x) ((x).val >> 7) 173#define __swp_offset(x) ((x).val >> 9)
171#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) 174#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) })
172#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) 175#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
173#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) 176#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
174 177
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 8e432a8ec4fe..18ae5ddef118 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -37,7 +37,11 @@
37/* 37/*
38 * The following bits are implemented by the TLB hardware 38 * The following bits are implemented by the TLB hardware
39 */ 39 */
40#define _PAGE_GLOBAL_SHIFT 0 40#define _PAGE_NO_EXEC_SHIFT 0
41#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
42#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
43#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
44#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
41#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 45#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
42#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 46#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
43#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 47#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
@@ -49,7 +53,7 @@
49/* 53/*
50 * The following bits are implemented in software 54 * The following bits are implemented in software
51 */ 55 */
52#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3) 56#define _PAGE_PRESENT_SHIFT (24)
53#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 57#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
54#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) 58#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
55#define _PAGE_READ (1 << _PAGE_READ_SHIFT) 59#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
@@ -62,6 +66,11 @@
62 66
63#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 67#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
64 68
69/*
70 * Bits for extended EntryLo0/EntryLo1 registers
71 */
72#define _PFNX_MASK 0xffffff
73
65#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 74#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
66 75
67/* 76/*
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index e1fec0237ce2..bffd46ca3694 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -133,7 +133,7 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
133 133
134#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 134#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
135 135
136#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 136#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
137#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 137#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
138 138
139static inline void set_pte(pte_t *ptep, pte_t pte) 139static inline void set_pte(pte_t *ptep, pte_t pte)
@@ -142,16 +142,14 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
142 smp_wmb(); 142 smp_wmb();
143 ptep->pte_low = pte.pte_low; 143 ptep->pte_low = pte.pte_low;
144 144
145 if (pte.pte_low & _PAGE_GLOBAL) { 145 if (pte.pte_high & _PAGE_GLOBAL) {
146 pte_t *buddy = ptep_buddy(ptep); 146 pte_t *buddy = ptep_buddy(ptep);
147 /* 147 /*
148 * Make sure the buddy is global too (if it's !none, 148 * Make sure the buddy is global too (if it's !none,
149 * it better already be global) 149 * it better already be global)
150 */ 150 */
151 if (pte_none(*buddy)) { 151 if (pte_none(*buddy))
152 buddy->pte_low |= _PAGE_GLOBAL;
153 buddy->pte_high |= _PAGE_GLOBAL; 152 buddy->pte_high |= _PAGE_GLOBAL;
154 }
155 } 153 }
156} 154}
157 155
@@ -161,8 +159,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
161 159
162 htw_stop(); 160 htw_stop();
163 /* Preserve global status for the pair */ 161 /* Preserve global status for the pair */
164 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 162 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
165 null.pte_low = null.pte_high = _PAGE_GLOBAL; 163 null.pte_high = _PAGE_GLOBAL;
166 164
167 set_pte_at(mm, addr, ptep, null); 165 set_pte_at(mm, addr, ptep, null);
168 htw_start(); 166 htw_start();
@@ -242,21 +240,21 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
242 240
243static inline pte_t pte_wrprotect(pte_t pte) 241static inline pte_t pte_wrprotect(pte_t pte)
244{ 242{
245 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 243 pte.pte_low &= ~_PAGE_WRITE;
246 pte.pte_high &= ~_PAGE_SILENT_WRITE; 244 pte.pte_high &= ~_PAGE_SILENT_WRITE;
247 return pte; 245 return pte;
248} 246}
249 247
250static inline pte_t pte_mkclean(pte_t pte) 248static inline pte_t pte_mkclean(pte_t pte)
251{ 249{
252 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 250 pte.pte_low &= ~_PAGE_MODIFIED;
253 pte.pte_high &= ~_PAGE_SILENT_WRITE; 251 pte.pte_high &= ~_PAGE_SILENT_WRITE;
254 return pte; 252 return pte;
255} 253}
256 254
257static inline pte_t pte_mkold(pte_t pte) 255static inline pte_t pte_mkold(pte_t pte)
258{ 256{
259 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 257 pte.pte_low &= ~_PAGE_ACCESSED;
260 pte.pte_high &= ~_PAGE_SILENT_READ; 258 pte.pte_high &= ~_PAGE_SILENT_READ;
261 return pte; 259 return pte;
262} 260}
@@ -264,30 +262,24 @@ static inline pte_t pte_mkold(pte_t pte)
264static inline pte_t pte_mkwrite(pte_t pte) 262static inline pte_t pte_mkwrite(pte_t pte)
265{ 263{
266 pte.pte_low |= _PAGE_WRITE; 264 pte.pte_low |= _PAGE_WRITE;
267 if (pte.pte_low & _PAGE_MODIFIED) { 265 if (pte.pte_low & _PAGE_MODIFIED)
268 pte.pte_low |= _PAGE_SILENT_WRITE;
269 pte.pte_high |= _PAGE_SILENT_WRITE; 266 pte.pte_high |= _PAGE_SILENT_WRITE;
270 }
271 return pte; 267 return pte;
272} 268}
273 269
274static inline pte_t pte_mkdirty(pte_t pte) 270static inline pte_t pte_mkdirty(pte_t pte)
275{ 271{
276 pte.pte_low |= _PAGE_MODIFIED; 272 pte.pte_low |= _PAGE_MODIFIED;
277 if (pte.pte_low & _PAGE_WRITE) { 273 if (pte.pte_low & _PAGE_WRITE)
278 pte.pte_low |= _PAGE_SILENT_WRITE;
279 pte.pte_high |= _PAGE_SILENT_WRITE; 274 pte.pte_high |= _PAGE_SILENT_WRITE;
280 }
281 return pte; 275 return pte;
282} 276}
283 277
284static inline pte_t pte_mkyoung(pte_t pte) 278static inline pte_t pte_mkyoung(pte_t pte)
285{ 279{
286 pte.pte_low |= _PAGE_ACCESSED; 280 pte.pte_low |= _PAGE_ACCESSED;
287 if (pte.pte_low & _PAGE_READ) { 281 if (pte.pte_low & _PAGE_READ)
288 pte.pte_low |= _PAGE_SILENT_READ;
289 pte.pte_high |= _PAGE_SILENT_READ; 282 pte.pte_high |= _PAGE_SILENT_READ;
290 }
291 return pte; 283 return pte;
292} 284}
293#else 285#else
@@ -391,10 +383,10 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
391#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 383#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
392static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 384static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
393{ 385{
394 pte.pte_low &= _PAGE_CHG_MASK; 386 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
395 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 387 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
396 pte.pte_low |= pgprot_val(newprot); 388 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
397 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 389 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
398 return pte; 390 return pte;
399} 391}
400#else 392#else
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 48dfb9de853d..ac96817d1f99 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -516,6 +516,10 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
516 c->options |= MIPS_CPU_MAAR; 516 c->options |= MIPS_CPU_MAAR;
517 if (config5 & MIPS_CONF5_LLB) 517 if (config5 & MIPS_CONF5_LLB)
518 c->options |= MIPS_CPU_RW_LLB; 518 c->options |= MIPS_CPU_RW_LLB;
519#ifdef CONFIG_XPA
520 if (config5 & MIPS_CONF5_MVH)
521 c->options |= MIPS_CPU_XPA;
522#endif
519 523
520 return config5 & MIPS_CONF_M; 524 return config5 & MIPS_CONF_M;
521} 525}
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 130af7d26a9c..298b2b773d12 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -120,6 +120,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
120 if (cpu_has_msa) seq_printf(m, "%s", " msa"); 120 if (cpu_has_msa) seq_printf(m, "%s", " msa");
121 if (cpu_has_eva) seq_printf(m, "%s", " eva"); 121 if (cpu_has_eva) seq_printf(m, "%s", " eva");
122 if (cpu_has_htw) seq_printf(m, "%s", " htw"); 122 if (cpu_has_htw) seq_printf(m, "%s", " htw");
123 if (cpu_has_xpa) seq_printf(m, "%s", " xpa");
123 seq_printf(m, "\n"); 124 seq_printf(m, "\n");
124 125
125 if (cpu_has_mmips) { 126 if (cpu_has_mmips) {
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 448cde372af0..faa5c9822ecc 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -96,7 +96,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
96 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 96 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
97 pte = mk_pte(page, prot); 97 pte = mk_pte(page, prot);
98#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 98#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
99 entrylo = pte.pte_high; 99 entrylo = pte_to_entrylo(pte.pte_high);
100#else 100#else
101 entrylo = pte_to_entrylo(pte_val(pte)); 101 entrylo = pte_to_entrylo(pte_val(pte));
102#endif 102#endif
@@ -106,6 +106,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
106 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); 106 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
107 write_c0_entrylo0(entrylo); 107 write_c0_entrylo0(entrylo);
108 write_c0_entrylo1(entrylo); 108 write_c0_entrylo1(entrylo);
109#ifdef CONFIG_XPA
110 entrylo = (pte.pte_low & _PFNX_MASK);
111 writex_c0_entrylo0(entrylo);
112 writex_c0_entrylo1(entrylo);
113#endif
109 tlbidx = read_c0_wired(); 114 tlbidx = read_c0_wired();
110 write_c0_wired(tlbidx + 1); 115 write_c0_wired(tlbidx + 1);
111 write_c0_index(tlbidx); 116 write_c0_index(tlbidx);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index b2afa49beab0..c2500f4cb1d1 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -333,9 +333,17 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
333 ptep = pte_offset_map(pmdp, address); 333 ptep = pte_offset_map(pmdp, address);
334 334
335#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 335#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
336#ifdef CONFIG_XPA
337 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
338 writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
339 ptep++;
340 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
341 writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
342#else
336 write_c0_entrylo0(ptep->pte_high); 343 write_c0_entrylo0(ptep->pte_high);
337 ptep++; 344 ptep++;
338 write_c0_entrylo1(ptep->pte_high); 345 write_c0_entrylo1(ptep->pte_high);
346#endif
339#else 347#else
340 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); 348 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
341 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); 349 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
@@ -355,6 +363,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
355void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 363void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
356 unsigned long entryhi, unsigned long pagemask) 364 unsigned long entryhi, unsigned long pagemask)
357{ 365{
366#ifdef CONFIG_XPA
367 panic("Broken for XPA kernels");
368#else
358 unsigned long flags; 369 unsigned long flags;
359 unsigned long wired; 370 unsigned long wired;
360 unsigned long old_pagemask; 371 unsigned long old_pagemask;
@@ -383,6 +394,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
383 write_c0_pagemask(old_pagemask); 394 write_c0_pagemask(old_pagemask);
384 local_flush_tlb_all(); 395 local_flush_tlb_all();
385 local_irq_restore(flags); 396 local_irq_restore(flags);
397#endif
386} 398}
387 399
388#ifdef CONFIG_TRANSPARENT_HUGEPAGE 400#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 20d985901e44..7709920e0cef 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -35,6 +35,17 @@
35#include <asm/uasm.h> 35#include <asm/uasm.h>
36#include <asm/setup.h> 36#include <asm/setup.h>
37 37
38static int __cpuinitdata mips_xpa_disabled;
39
40static int __init xpa_disable(char *s)
41{
42 mips_xpa_disabled = 1;
43
44 return 1;
45}
46
47__setup("noxpa", xpa_disable);
48
38/* 49/*
39 * TLB load/store/modify handlers. 50 * TLB load/store/modify handlers.
40 * 51 *
@@ -1027,12 +1038,27 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1027 } else { 1038 } else {
1028 int pte_off_even = sizeof(pte_t) / 2; 1039 int pte_off_even = sizeof(pte_t) / 2;
1029 int pte_off_odd = pte_off_even + sizeof(pte_t); 1040 int pte_off_odd = pte_off_even + sizeof(pte_t);
1041#ifdef CONFIG_XPA
1042 const int scratch = 1; /* Our extra working register */
1030 1043
1031 /* The pte entries are pre-shifted */ 1044 uasm_i_addu(p, scratch, 0, ptep);
1032 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 1045#endif
1033 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 1046 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
1034 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 1047 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
1035 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 1048 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1049 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
1050 UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
1051 UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
1052#ifdef CONFIG_XPA
1053 uasm_i_lw(p, tmp, 0, scratch);
1054 uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
1055 uasm_i_lui(p, scratch, 0xff);
1056 uasm_i_ori(p, scratch, scratch, 0xffff);
1057 uasm_i_and(p, tmp, scratch, tmp);
1058 uasm_i_and(p, ptep, scratch, ptep);
1059 uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
1060 uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
1061#endif
1036 } 1062 }
1037#else 1063#else
1038 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ 1064 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
@@ -1533,8 +1559,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1533{ 1559{
1534#ifdef CONFIG_PHYS_ADDR_T_64BIT 1560#ifdef CONFIG_PHYS_ADDR_T_64BIT
1535 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1561 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1536#endif
1537 1562
1563 if (!cpu_has_64bits) {
1564 const int scratch = 1; /* Our extra working register */
1565
1566 uasm_i_lui(p, scratch, (mode >> 16));
1567 uasm_i_or(p, pte, pte, scratch);
1568 } else
1569#endif
1538 uasm_i_ori(p, pte, pte, mode); 1570 uasm_i_ori(p, pte, pte, mode);
1539#ifdef CONFIG_SMP 1571#ifdef CONFIG_SMP
1540# ifdef CONFIG_PHYS_ADDR_T_64BIT 1572# ifdef CONFIG_PHYS_ADDR_T_64BIT
@@ -1598,15 +1630,17 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
1598 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1630 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1599 uasm_i_nop(p); 1631 uasm_i_nop(p);
1600 } else { 1632 } else {
1601 uasm_i_andi(p, t, pte, _PAGE_PRESENT); 1633 uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
1634 uasm_i_andi(p, t, t, 1);
1602 uasm_il_beqz(p, r, t, lid); 1635 uasm_il_beqz(p, r, t, lid);
1603 if (pte == t) 1636 if (pte == t)
1604 /* You lose the SMP race :-(*/ 1637 /* You lose the SMP race :-(*/
1605 iPTE_LW(p, pte, ptr); 1638 iPTE_LW(p, pte, ptr);
1606 } 1639 }
1607 } else { 1640 } else {
1608 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); 1641 uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
1609 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); 1642 uasm_i_andi(p, t, t, 3);
1643 uasm_i_xori(p, t, t, 3);
1610 uasm_il_bnez(p, r, t, lid); 1644 uasm_il_bnez(p, r, t, lid);
1611 if (pte == t) 1645 if (pte == t)
1612 /* You lose the SMP race :-(*/ 1646 /* You lose the SMP race :-(*/
@@ -1635,8 +1669,9 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
1635{ 1669{
1636 int t = scratch >= 0 ? scratch : pte; 1670 int t = scratch >= 0 ? scratch : pte;
1637 1671
1638 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); 1672 uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
1639 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); 1673 uasm_i_andi(p, t, t, 5);
1674 uasm_i_xori(p, t, t, 5);
1640 uasm_il_bnez(p, r, t, lid); 1675 uasm_il_bnez(p, r, t, lid);
1641 if (pte == t) 1676 if (pte == t)
1642 /* You lose the SMP race :-(*/ 1677 /* You lose the SMP race :-(*/
@@ -1672,7 +1707,8 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1672 uasm_i_nop(p); 1707 uasm_i_nop(p);
1673 } else { 1708 } else {
1674 int t = scratch >= 0 ? scratch : pte; 1709 int t = scratch >= 0 ? scratch : pte;
1675 uasm_i_andi(p, t, pte, _PAGE_WRITE); 1710 uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
1711 uasm_i_andi(p, t, t, 1);
1676 uasm_il_beqz(p, r, t, lid); 1712 uasm_il_beqz(p, r, t, lid);
1677 if (pte == t) 1713 if (pte == t)
1678 /* You lose the SMP race :-(*/ 1714 /* You lose the SMP race :-(*/
@@ -2285,6 +2321,11 @@ static void config_htw_params(void)
2285 2321
2286 pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; 2322 pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
2287 pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; 2323 pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
2324
2325 /* If XPA has been enabled, PTEs are 64-bit in size. */
2326 if (read_c0_pagegrain() & PG_ELPA)
2327 pwsize |= 1;
2328
2288 write_c0_pwsize(pwsize); 2329 write_c0_pwsize(pwsize);
2289 2330
2290 /* Make sure everything is set before we enable the HTW */ 2331 /* Make sure everything is set before we enable the HTW */
@@ -2298,6 +2339,28 @@ static void config_htw_params(void)
2298 print_htw_config(); 2339 print_htw_config();
2299} 2340}
2300 2341
2342static void config_xpa_params(void)
2343{
2344#ifdef CONFIG_XPA
2345 unsigned int pagegrain;
2346
2347 if (mips_xpa_disabled) {
2348 pr_info("Extended Physical Addressing (XPA) disabled\n");
2349 return;
2350 }
2351
2352 pagegrain = read_c0_pagegrain();
2353 write_c0_pagegrain(pagegrain | PG_ELPA);
2354 back_to_back_c0_hazard();
2355 pagegrain = read_c0_pagegrain();
2356
2357 if (pagegrain & PG_ELPA)
2358 pr_info("Extended Physical Addressing (XPA) enabled\n");
2359 else
2360 panic("Extended Physical Addressing (XPA) disabled");
2361#endif
2362}
2363
2301void build_tlb_refill_handler(void) 2364void build_tlb_refill_handler(void)
2302{ 2365{
2303 /* 2366 /*
@@ -2362,8 +2425,9 @@ void build_tlb_refill_handler(void)
2362 } 2425 }
2363 if (cpu_has_local_ebase) 2426 if (cpu_has_local_ebase)
2364 build_r4000_tlb_refill_handler(); 2427 build_r4000_tlb_refill_handler();
2428 if (cpu_has_xpa)
2429 config_xpa_params();
2365 if (cpu_has_htw) 2430 if (cpu_has_htw)
2366 config_htw_params(); 2431 config_htw_params();
2367
2368 } 2432 }
2369} 2433}