aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2010-02-10 18:12:47 -0500
committerRalf Baechle <ralf@linux-mips.org>2010-02-27 06:53:26 -0500
commit6dd9344cfc41bcc60a01cdc828cb278be7a10e01 (patch)
tree9c62d563eba8f3acfd1c826a63e6999261b06f5a /arch
parent32546f38fab839eee6f62b3f06c2774eade4188a (diff)
MIPS: Implement Read Inhibit/eXecute Inhibit
The SmartMIPS ASE specifies how Read Inhibit (RI) and eXecute Inhibit (XI) bits in the page tables work. The upper two bits of EntryLo{0,1} are RI and XI when the feature is enabled in the PageGrain register. SmartMIPS only covers 32-bit systems. Cavium Octeon+ extends this to 64-bit systems by continuing to place the RI and XI bits in the top of EntryLo even when EntryLo is 64-bits wide. Because we need to carry the RI and XI bits in the PTE, the layout of the PTE is changed. There is a two instruction overhead in the TLB refill hot path to get the EntryLo bits into the proper position. Also the TLB load exception has to probe the TLB to check if RI or XI caused the exception. Also of note is that the layout of the PTE bits is done at compile and runtime rather than statically. In the 32-bit case this allows for the same number of PFN bits as before the patch as the _PAGE_HUGE is not supported in 32-bit kernels (we have _PAGE_NO_EXEC and _PAGE_NO_READ instead of _PAGE_READ and _PAGE_HUGE). The patch is tested on Cavium Octeon+, but should also work on 32-bit systems with the Smart-MIPS ASE. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org Patchwork: http://patchwork.linux-mips.org/patch/952/ Patchwork: http://patchwork.linux-mips.org/patch/956/ Patchwork: http://patchwork.linux-mips.org/patch/962/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/pgtable-32.h4
-rw-r--r--arch/mips/include/asm/pgtable-64.h4
-rw-r--r--arch/mips/include/asm/pgtable-bits.h120
-rw-r--r--arch/mips/include/asm/pgtable.h26
-rw-r--r--arch/mips/mm/cache.c53
-rw-r--r--arch/mips/mm/fault.c27
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c19
-rw-r--r--arch/mips/mm/tlbex.c169
10 files changed, 348 insertions, 79 deletions
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 272c5ef35bbb..ac73cede3a0a 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -95,6 +95,9 @@
95#ifndef cpu_has_smartmips 95#ifndef cpu_has_smartmips
96#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS) 96#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
97#endif 97#endif
98#ifndef kernel_uses_smartmips_rixi
99#define kernel_uses_smartmips_rixi 0
100#endif
98#ifndef cpu_has_vtag_icache 101#ifndef cpu_has_vtag_icache
99#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) 102#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
100#endif 103#endif
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 55813d6150c7..ae90412556d0 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -127,8 +127,8 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
127#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) 127#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
128#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) 128#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
129#else 129#else
130#define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) 130#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
131#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 131#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
132#endif 132#endif
133#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */ 133#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
134 134
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 24314d21a708..26dc69d792a6 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -211,8 +211,8 @@ static inline void pud_clear(pud_t *pudp)
211#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) 211#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
212#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) 212#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
213#else 213#else
214#define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) 214#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
215#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 215#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
216#endif 216#endif
217 217
218#define __pgd_offset(address) pgd_index(address) 218#define __pgd_offset(address) pgd_index(address)
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 1073e6df8621..e9fe7e97ce4c 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -50,7 +50,7 @@
50#define _CACHE_SHIFT 3 50#define _CACHE_SHIFT 3
51#define _CACHE_MASK (7<<3) 51#define _CACHE_MASK (7<<3)
52 52
53#else 53#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
54 54
55#define _PAGE_PRESENT (1<<0) /* implemented in software */ 55#define _PAGE_PRESENT (1<<0) /* implemented in software */
56#define _PAGE_READ (1<<1) /* implemented in software */ 56#define _PAGE_READ (1<<1) /* implemented in software */
@@ -59,8 +59,6 @@
59#define _PAGE_MODIFIED (1<<4) /* implemented in software */ 59#define _PAGE_MODIFIED (1<<4) /* implemented in software */
60#define _PAGE_FILE (1<<4) /* set:pagecache unset:swap */ 60#define _PAGE_FILE (1<<4) /* set:pagecache unset:swap */
61 61
62#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
63
64#define _PAGE_GLOBAL (1<<8) 62#define _PAGE_GLOBAL (1<<8)
65#define _PAGE_VALID (1<<9) 63#define _PAGE_VALID (1<<9)
66#define _PAGE_SILENT_READ (1<<9) /* synonym */ 64#define _PAGE_SILENT_READ (1<<9) /* synonym */
@@ -69,21 +67,113 @@
69#define _CACHE_UNCACHED (1<<11) 67#define _CACHE_UNCACHED (1<<11)
70#define _CACHE_MASK (1<<11) 68#define _CACHE_MASK (1<<11)
71 69
70#else /* 'Normal' r4K case */
71/*
72 * When using the RI/XI bit support, we have 13 bits of flags below
73 * the physical address. The RI/XI bits are placed such that a SRL 5
74 * can strip off the software bits, then a ROTR 2 can move the RI/XI
75 * into bits [63:62]. This also limits physical address to 56 bits,
76 * which is more than we need right now.
77 */
78
79/* implemented in software */
80#define _PAGE_PRESENT_SHIFT (0)
81#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
82/* implemented in software, should be unused if kernel_uses_smartmips_rixi. */
83#define _PAGE_READ_SHIFT (kernel_uses_smartmips_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
84#define _PAGE_READ ({if (kernel_uses_smartmips_rixi) BUG(); 1 << _PAGE_READ_SHIFT; })
85/* implemented in software */
86#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
87#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
88/* implemented in software */
89#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
90#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
91/* implemented in software */
92#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
93#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
94/* set:pagecache unset:swap */
95#define _PAGE_FILE (_PAGE_MODIFIED)
96
97#ifdef CONFIG_HUGETLB_PAGE
98/* huge tlb page */
99#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
100#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
72#else 101#else
102#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
103#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
104#endif
73 105
74#define _PAGE_R4KBUG (1<<5) /* workaround for r4k bug */ 106/* Page cannot be executed */
75#define _PAGE_HUGE (1<<5) /* huge tlb page */ 107#define _PAGE_NO_EXEC_SHIFT (kernel_uses_smartmips_rixi ? _PAGE_HUGE_SHIFT + 1 : _PAGE_HUGE_SHIFT)
76#define _PAGE_GLOBAL (1<<6) 108#define _PAGE_NO_EXEC ({if (!kernel_uses_smartmips_rixi) BUG(); 1 << _PAGE_NO_EXEC_SHIFT; })
77#define _PAGE_VALID (1<<7) 109
78#define _PAGE_SILENT_READ (1<<7) /* synonym */ 110/* Page cannot be read */
79#define _PAGE_DIRTY (1<<8) /* The MIPS dirty bit */ 111#define _PAGE_NO_READ_SHIFT (kernel_uses_smartmips_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT)
80#define _PAGE_SILENT_WRITE (1<<8) 112#define _PAGE_NO_READ ({if (!kernel_uses_smartmips_rixi) BUG(); 1 << _PAGE_NO_READ_SHIFT; })
81#define _CACHE_SHIFT 9 113
82#define _CACHE_MASK (7<<9) 114#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
115#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
116
117#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
118#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
119/* synonym */
120#define _PAGE_SILENT_READ (_PAGE_VALID)
121
122/* The MIPS dirty bit */
123#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
124#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
125#define _PAGE_SILENT_WRITE (_PAGE_DIRTY)
126
127#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
128#define _CACHE_MASK (7 << _CACHE_SHIFT)
129
130#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
83 131
84#endif
85#endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */ 132#endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */
86 133
134#ifndef _PFN_SHIFT
135#define _PFN_SHIFT PAGE_SHIFT
136#endif
137#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
138
139#ifndef _PAGE_NO_READ
140#define _PAGE_NO_READ ({BUG(); 0; })
141#define _PAGE_NO_READ_SHIFT ({BUG(); 0; })
142#endif
143#ifndef _PAGE_NO_EXEC
144#define _PAGE_NO_EXEC ({BUG(); 0; })
145#endif
146#ifndef _PAGE_GLOBAL_SHIFT
147#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL)
148#endif
149
150
151#ifndef __ASSEMBLY__
152/*
153 * pte_to_entrylo converts a page table entry (PTE) into a Mips
154 * entrylo0/1 value.
155 */
156static inline uint64_t pte_to_entrylo(unsigned long pte_val)
157{
158 if (kernel_uses_smartmips_rixi) {
159 int sa;
160#ifdef CONFIG_32BIT
161 sa = 31 - _PAGE_NO_READ_SHIFT;
162#else
163 sa = 63 - _PAGE_NO_READ_SHIFT;
164#endif
165 /*
166 * C has no way to express that this is a DSRL
167 * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2. Luckily
168 * in the fast path this is done in assembly
169 */
170 return (pte_val >> _PAGE_GLOBAL_SHIFT) |
171 ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
172 }
173
174 return pte_val >> _PAGE_GLOBAL_SHIFT;
175}
176#endif
87 177
88/* 178/*
89 * Cache attributes 179 * Cache attributes
@@ -130,9 +220,9 @@
130 220
131#endif 221#endif
132 222
133#define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) 223#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ))
134#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) 224#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
135 225
136#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) 226#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
137 227
138#endif /* _ASM_PGTABLE_BITS_H */ 228#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 02335fda9e77..93598ba01355 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -22,23 +22,24 @@ struct mm_struct;
22struct vm_area_struct; 22struct vm_area_struct;
23 23
24#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) 24#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
25#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 25#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | \
26 _page_cachable_default) 26 _page_cachable_default)
27#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 27#define PAGE_COPY __pgprot(_PAGE_PRESENT | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | \
28 _page_cachable_default) 28 (kernel_uses_smartmips_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
29#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 29#define PAGE_READONLY __pgprot(_PAGE_PRESENT | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | \
30 _page_cachable_default) 30 _page_cachable_default)
31#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 31#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
32 _PAGE_GLOBAL | _page_cachable_default) 32 _PAGE_GLOBAL | _page_cachable_default)
33#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 33#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
34 _page_cachable_default) 34 _page_cachable_default)
35#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 35#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
36 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) 36 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
37 37
38/* 38/*
39 * MIPS can't do page protection for execute, and considers that the same like 39 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
40 * read. Also, write permissions imply read permissions. This is the closest 40 * execute, and consider it to be the same as read. Also, write
41 * we can get by reasonable means.. 41 * permissions imply read permissions. This is the closest we can get
42 * by reasonable means..
42 */ 43 */
43 44
44/* 45/*
@@ -298,8 +299,13 @@ static inline pte_t pte_mkdirty(pte_t pte)
298static inline pte_t pte_mkyoung(pte_t pte) 299static inline pte_t pte_mkyoung(pte_t pte)
299{ 300{
300 pte_val(pte) |= _PAGE_ACCESSED; 301 pte_val(pte) |= _PAGE_ACCESSED;
301 if (pte_val(pte) & _PAGE_READ) 302 if (kernel_uses_smartmips_rixi) {
302 pte_val(pte) |= _PAGE_SILENT_READ; 303 if (!(pte_val(pte) & _PAGE_NO_READ))
304 pte_val(pte) |= _PAGE_SILENT_READ;
305 } else {
306 if (pte_val(pte) & _PAGE_READ)
307 pte_val(pte) |= _PAGE_SILENT_READ;
308 }
303 return pte; 309 return pte;
304} 310}
305 311
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index e716cafc346d..be8627bc5b02 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -137,22 +137,43 @@ EXPORT_SYMBOL_GPL(_page_cachable_default);
137 137
138static inline void setup_protection_map(void) 138static inline void setup_protection_map(void)
139{ 139{
140 protection_map[0] = PAGE_NONE; 140 if (kernel_uses_smartmips_rixi) {
141 protection_map[1] = PAGE_READONLY; 141 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
142 protection_map[2] = PAGE_COPY; 142 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
143 protection_map[3] = PAGE_COPY; 143 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
144 protection_map[4] = PAGE_READONLY; 144 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
145 protection_map[5] = PAGE_READONLY; 145 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
146 protection_map[6] = PAGE_COPY; 146 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
147 protection_map[7] = PAGE_COPY; 147 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
148 protection_map[8] = PAGE_NONE; 148 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
149 protection_map[9] = PAGE_READONLY; 149
150 protection_map[10] = PAGE_SHARED; 150 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
151 protection_map[11] = PAGE_SHARED; 151 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
152 protection_map[12] = PAGE_READONLY; 152 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
153 protection_map[13] = PAGE_READONLY; 153 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
154 protection_map[14] = PAGE_SHARED; 154 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
155 protection_map[15] = PAGE_SHARED; 155 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
156 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
157 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
158
159 } else {
160 protection_map[0] = PAGE_NONE;
161 protection_map[1] = PAGE_READONLY;
162 protection_map[2] = PAGE_COPY;
163 protection_map[3] = PAGE_COPY;
164 protection_map[4] = PAGE_READONLY;
165 protection_map[5] = PAGE_READONLY;
166 protection_map[6] = PAGE_COPY;
167 protection_map[7] = PAGE_COPY;
168 protection_map[8] = PAGE_NONE;
169 protection_map[9] = PAGE_READONLY;
170 protection_map[10] = PAGE_SHARED;
171 protection_map[11] = PAGE_SHARED;
172 protection_map[12] = PAGE_READONLY;
173 protection_map[13] = PAGE_READONLY;
174 protection_map[14] = PAGE_SHARED;
175 protection_map[15] = PAGE_SHARED;
176 }
156} 177}
157 178
158void __cpuinit cpu_cache_init(void) 179void __cpuinit cpu_cache_init(void)
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index e97a7a2fb2c0..b78f7d913ca4 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -99,8 +99,31 @@ good_area:
99 if (!(vma->vm_flags & VM_WRITE)) 99 if (!(vma->vm_flags & VM_WRITE))
100 goto bad_area; 100 goto bad_area;
101 } else { 101 } else {
102 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 102 if (kernel_uses_smartmips_rixi) {
103 goto bad_area; 103 if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
104#if 0
105 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
106 raw_smp_processor_id(),
107 current->comm, current->pid,
108 field, address, write,
109 field, regs->cp0_epc);
110#endif
111 goto bad_area;
112 }
113 if (!(vma->vm_flags & VM_READ)) {
114#if 0
115 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
116 raw_smp_processor_id(),
117 current->comm, current->pid,
118 field, address, write,
119 field, regs->cp0_epc);
120#endif
121 goto bad_area;
122 }
123 } else {
124 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
125 goto bad_area;
126 }
104 } 127 }
105 128
106 /* 129 /*
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 3c5b7de10af5..f34c26439a32 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -143,7 +143,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
143#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 143#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
144 entrylo = pte.pte_high; 144 entrylo = pte.pte_high;
145#else 145#else
146 entrylo = pte_val(pte) >> 6; 146 entrylo = pte_to_entrylo(pte_val(pte));
147#endif 147#endif
148 148
149 ENTER_CRITICAL(flags); 149 ENTER_CRITICAL(flags);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 21d04dfa11db..c618eed933a1 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -303,7 +303,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
303 unsigned long lo; 303 unsigned long lo;
304 write_c0_pagemask(PM_HUGE_MASK); 304 write_c0_pagemask(PM_HUGE_MASK);
305 ptep = (pte_t *)pmdp; 305 ptep = (pte_t *)pmdp;
306 lo = pte_val(*ptep) >> 6; 306 lo = pte_to_entrylo(pte_val(*ptep));
307 write_c0_entrylo0(lo); 307 write_c0_entrylo0(lo);
308 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); 308 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
309 309
@@ -323,8 +323,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
323 ptep++; 323 ptep++;
324 write_c0_entrylo1(ptep->pte_high); 324 write_c0_entrylo1(ptep->pte_high);
325#else 325#else
326 write_c0_entrylo0(pte_val(*ptep++) >> 6); 326 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
327 write_c0_entrylo1(pte_val(*ptep) >> 6); 327 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
328#endif 328#endif
329 mtc0_tlbw_hazard(); 329 mtc0_tlbw_hazard();
330 if (idx < 0) 330 if (idx < 0)
@@ -437,6 +437,19 @@ void __cpuinit tlb_init(void)
437 current_cpu_type() == CPU_R12000 || 437 current_cpu_type() == CPU_R12000 ||
438 current_cpu_type() == CPU_R14000) 438 current_cpu_type() == CPU_R14000)
439 write_c0_framemask(0); 439 write_c0_framemask(0);
440
441 if (kernel_uses_smartmips_rixi) {
442 /*
443 * Enable the no read, no exec bits, and enable large virtual
444 * address.
445 */
446 u32 pg = PG_RIE | PG_XIE;
447#ifdef CONFIG_64BIT
448 pg |= PG_ELPA;
449#endif
450 write_c0_pagegrain(pg);
451 }
452
440 temp_tlb_entry = current_cpu_data.tlbsize - 1; 453 temp_tlb_entry = current_cpu_data.tlbsize - 1;
441 454
442 /* From this point on the ARC firmware is dead. */ 455 /* From this point on the ARC firmware is dead. */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 4a2907c59569..0de0e4127d66 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -76,6 +76,8 @@ enum label_id {
76 label_vmalloc_done, 76 label_vmalloc_done,
77 label_tlbw_hazard, 77 label_tlbw_hazard,
78 label_split, 78 label_split,
79 label_tlbl_goaround1,
80 label_tlbl_goaround2,
79 label_nopage_tlbl, 81 label_nopage_tlbl,
80 label_nopage_tlbs, 82 label_nopage_tlbs,
81 label_nopage_tlbm, 83 label_nopage_tlbm,
@@ -92,6 +94,8 @@ UASM_L_LA(_vmalloc)
92UASM_L_LA(_vmalloc_done) 94UASM_L_LA(_vmalloc_done)
93UASM_L_LA(_tlbw_hazard) 95UASM_L_LA(_tlbw_hazard)
94UASM_L_LA(_split) 96UASM_L_LA(_split)
97UASM_L_LA(_tlbl_goaround1)
98UASM_L_LA(_tlbl_goaround2)
95UASM_L_LA(_nopage_tlbl) 99UASM_L_LA(_nopage_tlbl)
96UASM_L_LA(_nopage_tlbs) 100UASM_L_LA(_nopage_tlbs)
97UASM_L_LA(_nopage_tlbm) 101UASM_L_LA(_nopage_tlbm)
@@ -396,36 +400,60 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
396 } 400 }
397} 401}
398 402
399#ifdef CONFIG_HUGETLB_PAGE 403static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
400static __cpuinit void build_huge_tlb_write_entry(u32 **p, 404 unsigned int reg)
401 struct uasm_label **l,
402 struct uasm_reloc **r,
403 unsigned int tmp,
404 enum tlb_write_entry wmode)
405{ 405{
406 /* Set huge page tlb entry size */ 406 if (kernel_uses_smartmips_rixi) {
407 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 407 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
408 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); 408 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
409 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 409 } else {
410#ifdef CONFIG_64BIT_PHYS_ADDR
411 uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL));
412#else
413 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
414#endif
415 }
416}
410 417
411 build_tlb_write_entry(p, l, r, wmode); 418#ifdef CONFIG_HUGETLB_PAGE
412 419
420static __cpuinit void build_restore_pagemask(u32 **p,
421 struct uasm_reloc **r,
422 unsigned int tmp,
423 enum label_id lid)
424{
413 /* Reset default page size */ 425 /* Reset default page size */
414 if (PM_DEFAULT_MASK >> 16) { 426 if (PM_DEFAULT_MASK >> 16) {
415 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 427 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
416 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 428 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
417 uasm_il_b(p, r, label_leave); 429 uasm_il_b(p, r, lid);
418 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 430 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
419 } else if (PM_DEFAULT_MASK) { 431 } else if (PM_DEFAULT_MASK) {
420 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 432 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
421 uasm_il_b(p, r, label_leave); 433 uasm_il_b(p, r, lid);
422 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 434 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
423 } else { 435 } else {
424 uasm_il_b(p, r, label_leave); 436 uasm_il_b(p, r, lid);
425 uasm_i_mtc0(p, 0, C0_PAGEMASK); 437 uasm_i_mtc0(p, 0, C0_PAGEMASK);
426 } 438 }
427} 439}
428 440
441static __cpuinit void build_huge_tlb_write_entry(u32 **p,
442 struct uasm_label **l,
443 struct uasm_reloc **r,
444 unsigned int tmp,
445 enum tlb_write_entry wmode)
446{
447 /* Set huge page tlb entry size */
448 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
449 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
450 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
451
452 build_tlb_write_entry(p, l, r, wmode);
453
454 build_restore_pagemask(p, r, tmp, label_leave);
455}
456
429/* 457/*
430 * Check if Huge PTE is present, if so then jump to LABEL. 458 * Check if Huge PTE is present, if so then jump to LABEL.
431 */ 459 */
@@ -459,7 +487,7 @@ static __cpuinit void build_huge_update_entries(u32 **p,
459 if (!small_sequence) 487 if (!small_sequence)
460 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); 488 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
461 489
462 UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ 490 build_convert_pte_to_entrylo(p, pte);
463 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ 491 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
464 /* convert to entrylo1 */ 492 /* convert to entrylo1 */
465 if (small_sequence) 493 if (small_sequence)
@@ -685,9 +713,17 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
685 if (cpu_has_64bits) { 713 if (cpu_has_64bits) {
686 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ 714 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
687 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 715 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
688 uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ 716 if (kernel_uses_smartmips_rixi) {
689 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 717 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
690 uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ 718 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
719 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
720 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
721 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
722 } else {
723 uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
724 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
725 uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
726 }
691 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 727 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
692 } else { 728 } else {
693 int pte_off_even = sizeof(pte_t) / 2; 729 int pte_off_even = sizeof(pte_t) / 2;
@@ -704,13 +740,23 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
704 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 740 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
705 if (r45k_bvahwbug()) 741 if (r45k_bvahwbug())
706 build_tlb_probe_entry(p); 742 build_tlb_probe_entry(p);
707 UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ 743 if (kernel_uses_smartmips_rixi) {
708 if (r4k_250MHZhwbug()) 744 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
709 UASM_i_MTC0(p, 0, C0_ENTRYLO0); 745 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
710 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 746 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
711 UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ 747 if (r4k_250MHZhwbug())
712 if (r45k_bvahwbug()) 748 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
713 uasm_i_mfc0(p, tmp, C0_INDEX); 749 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
750 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
751 } else {
752 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
753 if (r4k_250MHZhwbug())
754 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
755 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
756 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
757 if (r45k_bvahwbug())
758 uasm_i_mfc0(p, tmp, C0_INDEX);
759 }
714 if (r4k_250MHZhwbug()) 760 if (r4k_250MHZhwbug())
715 UASM_i_MTC0(p, 0, C0_ENTRYLO1); 761 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
716 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 762 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
@@ -986,9 +1032,14 @@ static void __cpuinit
986build_pte_present(u32 **p, struct uasm_reloc **r, 1032build_pte_present(u32 **p, struct uasm_reloc **r,
987 unsigned int pte, unsigned int ptr, enum label_id lid) 1033 unsigned int pte, unsigned int ptr, enum label_id lid)
988{ 1034{
989 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1035 if (kernel_uses_smartmips_rixi) {
990 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1036 uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
991 uasm_il_bnez(p, r, pte, lid); 1037 uasm_il_beqz(p, r, pte, lid);
1038 } else {
1039 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1040 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1041 uasm_il_bnez(p, r, pte, lid);
1042 }
992 iPTE_LW(p, pte, ptr); 1043 iPTE_LW(p, pte, ptr);
993} 1044}
994 1045
@@ -1273,6 +1324,34 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1273 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1324 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1274 if (m4kc_tlbp_war()) 1325 if (m4kc_tlbp_war())
1275 build_tlb_probe_entry(&p); 1326 build_tlb_probe_entry(&p);
1327
1328 if (kernel_uses_smartmips_rixi) {
1329 /*
1330 * If the page is not _PAGE_VALID, RI or XI could not
1331 * have triggered it. Skip the expensive test..
1332 */
1333 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1334 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
1335 uasm_i_nop(&p);
1336
1337 uasm_i_tlbr(&p);
1338 /* Examine entrylo 0 or 1 based on ptr. */
1339 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1340 uasm_i_beqz(&p, K0, 8);
1341
1342 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1343 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1344 /*
1345 * If the entryLo (now in K0) is valid (bit 1), RI or
1346 * XI must have triggered it.
1347 */
1348 uasm_i_andi(&p, K0, K0, 2);
1349 uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
1350
1351 uasm_l_tlbl_goaround1(&l, p);
1352 /* Reload the PTE value */
1353 iPTE_LW(&p, K0, K1);
1354 }
1276 build_make_valid(&p, &r, K0, K1); 1355 build_make_valid(&p, &r, K0, K1);
1277 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1356 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1278 1357
@@ -1285,6 +1364,40 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1285 iPTE_LW(&p, K0, K1); 1364 iPTE_LW(&p, K0, K1);
1286 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1365 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1287 build_tlb_probe_entry(&p); 1366 build_tlb_probe_entry(&p);
1367
1368 if (kernel_uses_smartmips_rixi) {
1369 /*
1370 * If the page is not _PAGE_VALID, RI or XI could not
1371 * have triggered it. Skip the expensive test..
1372 */
1373 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1374 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1375 uasm_i_nop(&p);
1376
1377 uasm_i_tlbr(&p);
1378 /* Examine entrylo 0 or 1 based on ptr. */
1379 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1380 uasm_i_beqz(&p, K0, 8);
1381
1382 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1383 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1384 /*
1385 * If the entryLo (now in K0) is valid (bit 1), RI or
1386 * XI must have triggered it.
1387 */
1388 uasm_i_andi(&p, K0, K0, 2);
1389 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1390 /* Reload the PTE value */
1391 iPTE_LW(&p, K0, K1);
1392
1393 /*
1394 * We clobbered C0_PAGEMASK, restore it. On the other branch
1395 * it is restored in build_huge_tlb_write_entry.
1396 */
1397 build_restore_pagemask(&p, &r, K0, label_nopage_tlbl);
1398
1399 uasm_l_tlbl_goaround2(&l, p);
1400 }
1288 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); 1401 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
1289 build_huge_handler_tail(&p, &r, &l, K0, K1); 1402 build_huge_handler_tail(&p, &r, &l, K0, K1);
1290#endif 1403#endif