summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2016-04-19 04:25:05 -0400
committerRalf Baechle <ralf@linux-mips.org>2016-05-13 09:30:25 -0400
commit7b2cb64f91f25a7293b10054e20d1c0734ffab6f (patch)
tree6631b4d6dc4e3294bfecd1835000578ef374bd0f
parent745f35587846249b392aa548b4c5f54cd69ed688 (diff)
MIPS: mm: Fix MIPS32 36b physical addressing (alchemy, netlogic)
There are 2 distinct cases in which a kernel for a MIPS32 CPU (CONFIG_CPU_MIPS32=y) may use 64 bit physical addresses (CONFIG_PHYS_ADDR_T_64BIT=y): - 36 bit physical addressing as used by RMI Alchemy & Netlogic XLP/XLR CPUs. - MIPS32r5 eXtended Physical Addressing (XPA). These 2 cases are distinct in that they require different behaviour from the kernel - the EntryLo registers have different formats. Until Linux v4.1 we only supported the first case, with code conditional upon the 2 aforementioned Kconfig variables being set. Commit c5b367835cfc ("MIPS: Add support for XPA.") added support for the second case, but did so by modifying the code that existed for the first case rather than treating the 2 cases as distinct. Since the EntryLo registers have different formats this breaks the 36 bit Alchemy/XLP/XLR case. Fix this by splitting the 2 cases, with XPA cases now being conditional upon CONFIG_XPA and the non-XPA case matching the code as it existed prior to commit c5b367835cfc ("MIPS: Add support for XPA."). Signed-off-by: Paul Burton <paul.burton@imgtec.com> Reported-by: Manuel Lauss <manuel.lauss@gmail.com> Tested-by: Manuel Lauss <manuel.lauss@gmail.com> Fixes: c5b367835cfc ("MIPS: Add support for XPA.") Cc: James Hogan <james.hogan@imgtec.com> Cc: David Daney <david.daney@cavium.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: Maciej W. Rozycki <macro@linux-mips.org> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David Hildenbrand <dahi@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Alex Smith <alex.smith@imgtec.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: stable@vger.kernel.org # v4.1+ Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/13119/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/include/asm/pgtable-32.h27
-rw-r--r--arch/mips/include/asm/pgtable-bits.h29
-rw-r--r--arch/mips/include/asm/pgtable.h57
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/tlbex.c35
5 files changed, 125 insertions, 27 deletions
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 181bd8e7db7d..d21f3da7bdb6 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -103,7 +103,7 @@ static inline void pmd_clear(pmd_t *pmdp)
103 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); 103 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
104} 104}
105 105
106#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 106#if defined(CONFIG_XPA)
107 107
108#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT)) 108#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
109static inline pte_t 109static inline pte_t
@@ -118,6 +118,20 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
118 return pte; 118 return pte;
119} 119}
120 120
121#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
122
123#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
124
125static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
126{
127 pte_t pte;
128
129 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
130 pte.pte_low = pgprot_val(prot);
131
132 return pte;
133}
134
121#else 135#else
122 136
123#ifdef CONFIG_CPU_VR41XX 137#ifdef CONFIG_CPU_VR41XX
@@ -166,7 +180,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
166 180
167#else 181#else
168 182
169#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 183#if defined(CONFIG_XPA)
170 184
171/* Swap entries must have VALID and GLOBAL bits cleared. */ 185/* Swap entries must have VALID and GLOBAL bits cleared. */
172#define __swp_type(x) (((x).val >> 4) & 0x1f) 186#define __swp_type(x) (((x).val >> 4) & 0x1f)
@@ -175,6 +189,15 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
175#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) 189#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
176#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) 190#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
177 191
192#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
193
194/* Swap entries must have VALID and GLOBAL bits cleared. */
195#define __swp_type(x) (((x).val >> 2) & 0x1f)
196#define __swp_offset(x) ((x).val >> 7)
197#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
198#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
199#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
200
178#else 201#else
179/* 202/*
180 * Constraints: 203 * Constraints:
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 5bc663d330d2..58e8bf815404 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -32,11 +32,11 @@
32 * unpredictable things. The code (when it is written) to deal with 32 * unpredictable things. The code (when it is written) to deal with
33 * this problem will be in the update_mmu_cache() code for the r4k. 33 * this problem will be in the update_mmu_cache() code for the r4k.
34 */ 34 */
35#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 35#if defined(CONFIG_XPA)
36 36
37/* 37/*
38 * Page table bit offsets used for 64 bit physical addressing on MIPS32, 38 * Page table bit offsets used for 64 bit physical addressing on
39 * for example with Alchemy, Netlogic XLP/XLR or XPA. 39 * MIPS32r5 with XPA.
40 */ 40 */
41enum pgtable_bits { 41enum pgtable_bits {
42 /* Used by TLB hardware (placed in EntryLo*) */ 42 /* Used by TLB hardware (placed in EntryLo*) */
@@ -59,6 +59,27 @@ enum pgtable_bits {
59 */ 59 */
60#define _PFNX_MASK 0xffffff 60#define _PFNX_MASK 0xffffff
61 61
62#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
63
64/*
65 * Page table bit offsets used for 36 bit physical addressing on MIPS32,
66 * for example with Alchemy or Netlogic XLP/XLR.
67 */
68enum pgtable_bits {
69 /* Used by TLB hardware (placed in EntryLo*) */
70 _PAGE_GLOBAL_SHIFT,
71 _PAGE_VALID_SHIFT,
72 _PAGE_DIRTY_SHIFT,
73 _CACHE_SHIFT,
74
75 /* Used only by software (masked out before writing EntryLo*) */
76 _PAGE_PRESENT_SHIFT = _CACHE_SHIFT + 3,
77 _PAGE_NO_READ_SHIFT,
78 _PAGE_WRITE_SHIFT,
79 _PAGE_ACCESSED_SHIFT,
80 _PAGE_MODIFIED_SHIFT,
81};
82
62#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 83#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
63 84
64/* Page table bits used for r3k systems */ 85/* Page table bits used for r3k systems */
@@ -116,7 +137,7 @@ enum pgtable_bits {
116#endif 137#endif
117 138
118/* Used by TLB hardware (placed in EntryLo*) */ 139/* Used by TLB hardware (placed in EntryLo*) */
119#if (defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)) 140#if defined(CONFIG_XPA)
120# define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) 141# define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
121#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 142#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
122# define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0) 143# define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index f6de4f49b1b2..e07a105cafc2 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -133,7 +133,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
133 133
134#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 134#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
135 135
136#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) 136#ifdef CONFIG_XPA
137# define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
138#else
139# define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
140#endif
141
137#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 142#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
138#define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) 143#define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
139 144
@@ -143,14 +148,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
143 smp_wmb(); 148 smp_wmb();
144 ptep->pte_low = pte.pte_low; 149 ptep->pte_low = pte.pte_low;
145 150
151#ifdef CONFIG_XPA
146 if (pte.pte_high & _PAGE_GLOBAL) { 152 if (pte.pte_high & _PAGE_GLOBAL) {
153#else
154 if (pte.pte_low & _PAGE_GLOBAL) {
155#endif
147 pte_t *buddy = ptep_buddy(ptep); 156 pte_t *buddy = ptep_buddy(ptep);
148 /* 157 /*
149 * Make sure the buddy is global too (if it's !none, 158 * Make sure the buddy is global too (if it's !none,
150 * it better already be global) 159 * it better already be global)
151 */ 160 */
152 if (pte_none(*buddy)) 161 if (pte_none(*buddy)) {
162 if (!config_enabled(CONFIG_XPA))
163 buddy->pte_low |= _PAGE_GLOBAL;
153 buddy->pte_high |= _PAGE_GLOBAL; 164 buddy->pte_high |= _PAGE_GLOBAL;
165 }
154 } 166 }
155} 167}
156 168
@@ -160,8 +172,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
160 172
161 htw_stop(); 173 htw_stop();
162 /* Preserve global status for the pair */ 174 /* Preserve global status for the pair */
163 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) 175 if (config_enabled(CONFIG_XPA)) {
164 null.pte_high = _PAGE_GLOBAL; 176 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
177 null.pte_high = _PAGE_GLOBAL;
178 } else {
179 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
180 null.pte_low = null.pte_high = _PAGE_GLOBAL;
181 }
165 182
166 set_pte_at(mm, addr, ptep, null); 183 set_pte_at(mm, addr, ptep, null);
167 htw_start(); 184 htw_start();
@@ -302,6 +319,8 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
302static inline pte_t pte_wrprotect(pte_t pte) 319static inline pte_t pte_wrprotect(pte_t pte)
303{ 320{
304 pte.pte_low &= ~_PAGE_WRITE; 321 pte.pte_low &= ~_PAGE_WRITE;
322 if (!config_enabled(CONFIG_XPA))
323 pte.pte_low &= ~_PAGE_SILENT_WRITE;
305 pte.pte_high &= ~_PAGE_SILENT_WRITE; 324 pte.pte_high &= ~_PAGE_SILENT_WRITE;
306 return pte; 325 return pte;
307} 326}
@@ -309,6 +328,8 @@ static inline pte_t pte_wrprotect(pte_t pte)
309static inline pte_t pte_mkclean(pte_t pte) 328static inline pte_t pte_mkclean(pte_t pte)
310{ 329{
311 pte.pte_low &= ~_PAGE_MODIFIED; 330 pte.pte_low &= ~_PAGE_MODIFIED;
331 if (!config_enabled(CONFIG_XPA))
332 pte.pte_low &= ~_PAGE_SILENT_WRITE;
312 pte.pte_high &= ~_PAGE_SILENT_WRITE; 333 pte.pte_high &= ~_PAGE_SILENT_WRITE;
313 return pte; 334 return pte;
314} 335}
@@ -316,6 +337,8 @@ static inline pte_t pte_mkclean(pte_t pte)
316static inline pte_t pte_mkold(pte_t pte) 337static inline pte_t pte_mkold(pte_t pte)
317{ 338{
318 pte.pte_low &= ~_PAGE_ACCESSED; 339 pte.pte_low &= ~_PAGE_ACCESSED;
340 if (!config_enabled(CONFIG_XPA))
341 pte.pte_low &= ~_PAGE_SILENT_READ;
319 pte.pte_high &= ~_PAGE_SILENT_READ; 342 pte.pte_high &= ~_PAGE_SILENT_READ;
320 return pte; 343 return pte;
321} 344}
@@ -323,24 +346,33 @@ static inline pte_t pte_mkold(pte_t pte)
323static inline pte_t pte_mkwrite(pte_t pte) 346static inline pte_t pte_mkwrite(pte_t pte)
324{ 347{
325 pte.pte_low |= _PAGE_WRITE; 348 pte.pte_low |= _PAGE_WRITE;
326 if (pte.pte_low & _PAGE_MODIFIED) 349 if (pte.pte_low & _PAGE_MODIFIED) {
350 if (!config_enabled(CONFIG_XPA))
351 pte.pte_low |= _PAGE_SILENT_WRITE;
327 pte.pte_high |= _PAGE_SILENT_WRITE; 352 pte.pte_high |= _PAGE_SILENT_WRITE;
353 }
328 return pte; 354 return pte;
329} 355}
330 356
331static inline pte_t pte_mkdirty(pte_t pte) 357static inline pte_t pte_mkdirty(pte_t pte)
332{ 358{
333 pte.pte_low |= _PAGE_MODIFIED; 359 pte.pte_low |= _PAGE_MODIFIED;
334 if (pte.pte_low & _PAGE_WRITE) 360 if (pte.pte_low & _PAGE_WRITE) {
361 if (!config_enabled(CONFIG_XPA))
362 pte.pte_low |= _PAGE_SILENT_WRITE;
335 pte.pte_high |= _PAGE_SILENT_WRITE; 363 pte.pte_high |= _PAGE_SILENT_WRITE;
364 }
336 return pte; 365 return pte;
337} 366}
338 367
339static inline pte_t pte_mkyoung(pte_t pte) 368static inline pte_t pte_mkyoung(pte_t pte)
340{ 369{
341 pte.pte_low |= _PAGE_ACCESSED; 370 pte.pte_low |= _PAGE_ACCESSED;
342 if (!(pte.pte_low & _PAGE_NO_READ)) 371 if (!(pte.pte_low & _PAGE_NO_READ)) {
372 if (!config_enabled(CONFIG_XPA))
373 pte.pte_low |= _PAGE_SILENT_READ;
343 pte.pte_high |= _PAGE_SILENT_READ; 374 pte.pte_high |= _PAGE_SILENT_READ;
375 }
344 return pte; 376 return pte;
345} 377}
346#else 378#else
@@ -438,7 +470,7 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
438 */ 470 */
439#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 471#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
440 472
441#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 473#if defined(CONFIG_XPA)
442static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 474static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
443{ 475{
444 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); 476 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
@@ -447,6 +479,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
447 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; 479 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
448 return pte; 480 return pte;
449} 481}
482#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
483static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
484{
485 pte.pte_low &= _PAGE_CHG_MASK;
486 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
487 pte.pte_low |= pgprot_val(newprot);
488 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
489 return pte;
490}
450#else 491#else
451static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 492static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
452{ 493{
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 6c6a843b0d17..134c988bc61f 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -98,8 +98,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
98 idx += in_interrupt() ? FIX_N_COLOURS : 0; 98 idx += in_interrupt() ? FIX_N_COLOURS : 0;
99 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 99 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
100 pte = mk_pte(page, prot); 100 pte = mk_pte(page, prot);
101#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 101#if defined(CONFIG_XPA)
102 entrylo = pte_to_entrylo(pte.pte_high); 102 entrylo = pte_to_entrylo(pte.pte_high);
103#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
104 entrylo = pte.pte_high;
103#else 105#else
104 entrylo = pte_to_entrylo(pte_val(pte)); 106 entrylo = pte_to_entrylo(pte_val(pte));
105#endif 107#endif
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 67966f30c522..db4adf9cc65c 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1011,25 +1011,21 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1011 1011
1012static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) 1012static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1013{ 1013{
1014 /* 1014 if (config_enabled(CONFIG_XPA)) {
1015 * 64bit address support (36bit on a 32bit CPU) in a 32bit
1016 * Kernel is a special case. Only a few CPUs use it.
1017 */
1018 if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
1019 int pte_off_even = sizeof(pte_t) / 2; 1015 int pte_off_even = sizeof(pte_t) / 2;
1020 int pte_off_odd = pte_off_even + sizeof(pte_t); 1016 int pte_off_odd = pte_off_even + sizeof(pte_t);
1021#ifdef CONFIG_XPA
1022 const int scratch = 1; /* Our extra working register */ 1017 const int scratch = 1; /* Our extra working register */
1023 1018
1024 uasm_i_addu(p, scratch, 0, ptep); 1019 uasm_i_addu(p, scratch, 0, ptep);
1025#endif 1020
1026 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ 1021 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
1027 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
1028 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); 1022 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1029 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
1030 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); 1023 UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
1024
1025 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
1026 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
1031 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); 1027 UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
1032#ifdef CONFIG_XPA 1028
1033 uasm_i_lw(p, tmp, 0, scratch); 1029 uasm_i_lw(p, tmp, 0, scratch);
1034 uasm_i_lw(p, ptep, sizeof(pte_t), scratch); 1030 uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
1035 uasm_i_lui(p, scratch, 0xff); 1031 uasm_i_lui(p, scratch, 0xff);
@@ -1038,7 +1034,22 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1038 uasm_i_and(p, ptep, scratch, ptep); 1034 uasm_i_and(p, ptep, scratch, ptep);
1039 uasm_i_mthc0(p, tmp, C0_ENTRYLO0); 1035 uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
1040 uasm_i_mthc0(p, ptep, C0_ENTRYLO1); 1036 uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
1041#endif 1037 return;
1038 }
1039
1040 /*
1041 * 64bit address support (36bit on a 32bit CPU) in a 32bit
1042 * Kernel is a special case. Only a few CPUs use it.
1043 */
1044 if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
1045 int pte_off_even = sizeof(pte_t) / 2;
1046 int pte_off_odd = pte_off_even + sizeof(pte_t);
1047
1048 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
1049 UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
1050
1051 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
1052 UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
1042 return; 1053 return;
1043 } 1054 }
1044 1055
@@ -1637,7 +1648,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1637#ifdef CONFIG_PHYS_ADDR_T_64BIT 1648#ifdef CONFIG_PHYS_ADDR_T_64BIT
1638 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1649 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1639 1650
1640 if (!cpu_has_64bits) { 1651 if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) {
1641 const int scratch = 1; /* Our extra working register */ 1652 const int scratch = 1; /* Our extra working register */
1642 1653
1643 uasm_i_lui(p, scratch, (mode >> 16)); 1654 uasm_i_lui(p, scratch, (mode >> 16));