aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /arch/mips/mm
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-octeon.c12
-rw-r--r--arch/mips/mm/cache.c59
-rw-r--r--arch/mips/mm/cerr-sb1.c7
-rw-r--r--arch/mips/mm/dma-default.c1
-rw-r--r--arch/mips/mm/fault.c27
-rw-r--r--arch/mips/mm/highmem.c1
-rw-r--r--arch/mips/mm/hugetlbpage.c2
-rw-r--r--arch/mips/mm/init.c11
-rw-r--r--arch/mips/mm/ioremap.c1
-rw-r--r--arch/mips/mm/page.c2
-rw-r--r--arch/mips/mm/pgtable-64.c44
-rw-r--r--arch/mips/mm/tlb-r4k.c84
-rw-r--r--arch/mips/mm/tlbex.c366
-rw-r--r--arch/mips/mm/uasm.c47
-rw-r--r--arch/mips/mm/uasm.h184
15 files changed, 449 insertions, 399 deletions
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 94e05e5733c1..0f9c488044d1 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -174,7 +174,7 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma,
174 * Probe Octeon's caches 174 * Probe Octeon's caches
175 * 175 *
176 */ 176 */
177static void __devinit probe_octeon(void) 177static void __cpuinit probe_octeon(void)
178{ 178{
179 unsigned long icache_size; 179 unsigned long icache_size;
180 unsigned long dcache_size; 180 unsigned long dcache_size;
@@ -183,6 +183,7 @@ static void __devinit probe_octeon(void)
183 183
184 switch (c->cputype) { 184 switch (c->cputype) {
185 case CPU_CAVIUM_OCTEON: 185 case CPU_CAVIUM_OCTEON:
186 case CPU_CAVIUM_OCTEON_PLUS:
186 config1 = read_c0_config1(); 187 config1 = read_c0_config1();
187 c->icache.linesz = 2 << ((config1 >> 19) & 7); 188 c->icache.linesz = 2 << ((config1 >> 19) & 7);
188 c->icache.sets = 64 << ((config1 >> 22) & 7); 189 c->icache.sets = 64 << ((config1 >> 22) & 7);
@@ -192,10 +193,10 @@ static void __devinit probe_octeon(void)
192 c->icache.sets * c->icache.ways * c->icache.linesz; 193 c->icache.sets * c->icache.ways * c->icache.linesz;
193 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; 194 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
194 c->dcache.linesz = 128; 195 c->dcache.linesz = 128;
195 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) 196 if (c->cputype == CPU_CAVIUM_OCTEON_PLUS)
196 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
197 else
198 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ 197 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
198 else
199 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
199 c->dcache.ways = 64; 200 c->dcache.ways = 64;
200 dcache_size = 201 dcache_size =
201 c->dcache.sets * c->dcache.ways * c->dcache.linesz; 202 c->dcache.sets * c->dcache.ways * c->dcache.linesz;
@@ -235,7 +236,7 @@ static void __devinit probe_octeon(void)
235 * Setup the Octeon cache flush routines 236 * Setup the Octeon cache flush routines
236 * 237 *
237 */ 238 */
238void __devinit octeon_cache_init(void) 239void __cpuinit octeon_cache_init(void)
239{ 240{
240 extern unsigned long ebase; 241 extern unsigned long ebase;
241 extern char except_vec2_octeon; 242 extern char except_vec2_octeon;
@@ -305,4 +306,3 @@ asmlinkage void cache_parity_error_octeon_non_recoverable(void)
305{ 306{
306 cache_parity_error_octeon(1); 307 cache_parity_error_octeon(1);
307} 308}
308
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 694d51f523d1..12af739048fa 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -133,29 +133,50 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
133} 133}
134 134
135unsigned long _page_cachable_default; 135unsigned long _page_cachable_default;
136EXPORT_SYMBOL_GPL(_page_cachable_default); 136EXPORT_SYMBOL(_page_cachable_default);
137 137
138static inline void setup_protection_map(void) 138static inline void setup_protection_map(void)
139{ 139{
140 protection_map[0] = PAGE_NONE; 140 if (kernel_uses_smartmips_rixi) {
141 protection_map[1] = PAGE_READONLY; 141 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
142 protection_map[2] = PAGE_COPY; 142 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
143 protection_map[3] = PAGE_COPY; 143 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
144 protection_map[4] = PAGE_READONLY; 144 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
145 protection_map[5] = PAGE_READONLY; 145 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
146 protection_map[6] = PAGE_COPY; 146 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
147 protection_map[7] = PAGE_COPY; 147 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
148 protection_map[8] = PAGE_NONE; 148 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
149 protection_map[9] = PAGE_READONLY; 149
150 protection_map[10] = PAGE_SHARED; 150 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
151 protection_map[11] = PAGE_SHARED; 151 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
152 protection_map[12] = PAGE_READONLY; 152 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
153 protection_map[13] = PAGE_READONLY; 153 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
154 protection_map[14] = PAGE_SHARED; 154 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
155 protection_map[15] = PAGE_SHARED; 155 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
156 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
157 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
158
159 } else {
160 protection_map[0] = PAGE_NONE;
161 protection_map[1] = PAGE_READONLY;
162 protection_map[2] = PAGE_COPY;
163 protection_map[3] = PAGE_COPY;
164 protection_map[4] = PAGE_READONLY;
165 protection_map[5] = PAGE_READONLY;
166 protection_map[6] = PAGE_COPY;
167 protection_map[7] = PAGE_COPY;
168 protection_map[8] = PAGE_NONE;
169 protection_map[9] = PAGE_READONLY;
170 protection_map[10] = PAGE_SHARED;
171 protection_map[11] = PAGE_SHARED;
172 protection_map[12] = PAGE_READONLY;
173 protection_map[13] = PAGE_READONLY;
174 protection_map[14] = PAGE_SHARED;
175 protection_map[15] = PAGE_SHARED;
176 }
156} 177}
157 178
158void __devinit cpu_cache_init(void) 179void __cpuinit cpu_cache_init(void)
159{ 180{
160 if (cpu_has_3k_cache) { 181 if (cpu_has_3k_cache) {
161 extern void __weak r3k_cache_init(void); 182 extern void __weak r3k_cache_init(void);
@@ -194,7 +215,7 @@ void __devinit cpu_cache_init(void)
194 215
195int __weak __uncached_access(struct file *file, unsigned long addr) 216int __weak __uncached_access(struct file *file, unsigned long addr)
196{ 217{
197 if (file->f_flags & O_SYNC) 218 if (file->f_flags & O_DSYNC)
198 return 1; 219 return 1;
199 220
200 return addr >= __pa(high_memory); 221 return addr >= __pa(high_memory);
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
index 1bd1f18ac23c..3571090ba178 100644
--- a/arch/mips/mm/cerr-sb1.c
+++ b/arch/mips/mm/cerr-sb1.c
@@ -567,13 +567,10 @@ static uint32_t extract_dc(unsigned short addr, int data)
567 datalo = ((unsigned long long)datalohi << 32) | datalolo; 567 datalo = ((unsigned long long)datalohi << 32) | datalolo;
568 ecc = dc_ecc(datalo); 568 ecc = dc_ecc(datalo);
569 if (ecc != datahi) { 569 if (ecc != datahi) {
570 int bits = 0; 570 int bits;
571 bad_ecc |= 1 << (3-offset); 571 bad_ecc |= 1 << (3-offset);
572 ecc ^= datahi; 572 ecc ^= datahi;
573 while (ecc) { 573 bits = hweight8(ecc);
574 if (ecc & 1) bits++;
575 ecc >>= 1;
576 }
577 res |= (bits == 1) ? CP0_CERRD_DATA_SBE : CP0_CERRD_DATA_DBE; 574 res |= (bits == 1) ? CP0_CERRD_DATA_SBE : CP0_CERRD_DATA_DBE;
578 } 575 }
579 printk(" %02X-%016llX", datahi, datalo); 576 printk(" %02X-%016llX", datahi, datalo);
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 9367e33fbd18..9547bc0cf188 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/scatterlist.h> 15#include <linux/scatterlist.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/gfp.h>
17 18
18#include <asm/cache.h> 19#include <asm/cache.h>
19#include <asm/io.h> 20#include <asm/io.h>
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index e97a7a2fb2c0..b78f7d913ca4 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -99,8 +99,31 @@ good_area:
99 if (!(vma->vm_flags & VM_WRITE)) 99 if (!(vma->vm_flags & VM_WRITE))
100 goto bad_area; 100 goto bad_area;
101 } else { 101 } else {
102 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 102 if (kernel_uses_smartmips_rixi) {
103 goto bad_area; 103 if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
104#if 0
105 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
106 raw_smp_processor_id(),
107 current->comm, current->pid,
108 field, address, write,
109 field, regs->cp0_epc);
110#endif
111 goto bad_area;
112 }
113 if (!(vma->vm_flags & VM_READ)) {
114#if 0
115 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
116 raw_smp_processor_id(),
117 current->comm, current->pid,
118 field, address, write,
119 field, regs->cp0_epc);
120#endif
121 goto bad_area;
122 }
123 } else {
124 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
125 goto bad_area;
126 }
104 } 127 }
105 128
106 /* 129 /*
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index e274fda329f4..127d732474bf 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/highmem.h> 2#include <linux/highmem.h>
3#include <linux/sched.h>
3#include <linux/smp.h> 4#include <linux/smp.h>
4#include <asm/fixmap.h> 5#include <asm/fixmap.h>
5#include <asm/tlbflush.h> 6#include <asm/tlbflush.h>
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index 8c2834f5919d..a7fee0dfb7a9 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -16,7 +16,6 @@
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/hugetlb.h> 17#include <linux/hugetlb.h>
18#include <linux/pagemap.h> 18#include <linux/pagemap.h>
19#include <linux/slab.h>
20#include <linux/err.h> 19#include <linux/err.h>
21#include <linux/sysctl.h> 20#include <linux/sysctl.h>
22#include <asm/mman.h> 21#include <asm/mman.h>
@@ -97,4 +96,3 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
97 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); 96 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
98 return page; 97 return page;
99} 98}
100
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 8d1f4f363049..2efcbd24c82f 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -28,6 +28,7 @@
28#include <linux/proc_fs.h> 28#include <linux/proc_fs.h>
29#include <linux/pfn.h> 29#include <linux/pfn.h>
30#include <linux/hardirq.h> 30#include <linux/hardirq.h>
31#include <linux/gfp.h>
31 32
32#include <asm/asm-offsets.h> 33#include <asm/asm-offsets.h>
33#include <asm/bootinfo.h> 34#include <asm/bootinfo.h>
@@ -143,7 +144,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
143#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 144#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
144 entrylo = pte.pte_high; 145 entrylo = pte.pte_high;
145#else 146#else
146 entrylo = pte_val(pte) >> 6; 147 entrylo = pte_to_entrylo(pte_val(pte));
147#endif 148#endif
148 149
149 ENTER_CRITICAL(flags); 150 ENTER_CRITICAL(flags);
@@ -298,7 +299,7 @@ void __init fixrange_init(unsigned long start, unsigned long end,
298} 299}
299 300
300#ifndef CONFIG_NEED_MULTIPLE_NODES 301#ifndef CONFIG_NEED_MULTIPLE_NODES
301static int __init page_is_ram(unsigned long pagenr) 302int page_is_ram(unsigned long pagenr)
302{ 303{
303 int i; 304 int i;
304 305
@@ -424,7 +425,7 @@ void __init mem_init(void)
424 reservedpages << (PAGE_SHIFT-10), 425 reservedpages << (PAGE_SHIFT-10),
425 datasize >> 10, 426 datasize >> 10,
426 initsize >> 10, 427 initsize >> 10,
427 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); 428 totalhigh_pages << (PAGE_SHIFT-10));
428} 429}
429#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 430#endif /* !CONFIG_NEED_MULTIPLE_NODES */
430 431
@@ -462,7 +463,9 @@ void __init_refok free_initmem(void)
462 __pa_symbol(&__init_end)); 463 __pa_symbol(&__init_end));
463} 464}
464 465
466#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
465unsigned long pgd_current[NR_CPUS]; 467unsigned long pgd_current[NR_CPUS];
468#endif
466/* 469/*
467 * On 64-bit we've got three-level pagetables with a slightly 470 * On 64-bit we've got three-level pagetables with a slightly
468 * different layout ... 471 * different layout ...
@@ -475,7 +478,7 @@ unsigned long pgd_current[NR_CPUS];
475 * will officially be retired. 478 * will officially be retired.
476 */ 479 */
477pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER); 480pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
478#ifdef CONFIG_64BIT 481#ifndef __PAGETABLE_PMD_FOLDED
479pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER); 482pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
480#endif 483#endif
481pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); 484pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 0c43248347bd..cacfd31e8ec9 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -10,6 +10,7 @@
10#include <asm/addrspace.h> 10#include <asm/addrspace.h>
11#include <asm/byteorder.h> 11#include <asm/byteorder.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/slab.h>
13#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
14#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
15#include <asm/io.h> 16#include <asm/io.h>
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index f5c73754d664..36272f7d3744 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -35,7 +35,7 @@
35#include <asm/sibyte/sb1250_dma.h> 35#include <asm/sibyte/sb1250_dma.h>
36#endif 36#endif
37 37
38#include "uasm.h" 38#include <asm/uasm.h>
39 39
40/* Registers used in the assembled routines. */ 40/* Registers used in the assembled routines. */
41#define ZERO 0 41#define ZERO 0
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 1121019fa456..78eaa4f0b0ec 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -15,23 +15,31 @@
15void pgd_init(unsigned long page) 15void pgd_init(unsigned long page)
16{ 16{
17 unsigned long *p, *end; 17 unsigned long *p, *end;
18 unsigned long entry;
19
20#ifdef __PAGETABLE_PMD_FOLDED
21 entry = (unsigned long)invalid_pte_table;
22#else
23 entry = (unsigned long)invalid_pmd_table;
24#endif
18 25
19 p = (unsigned long *) page; 26 p = (unsigned long *) page;
20 end = p + PTRS_PER_PGD; 27 end = p + PTRS_PER_PGD;
21 28
22 while (p < end) { 29 while (p < end) {
23 p[0] = (unsigned long) invalid_pmd_table; 30 p[0] = entry;
24 p[1] = (unsigned long) invalid_pmd_table; 31 p[1] = entry;
25 p[2] = (unsigned long) invalid_pmd_table; 32 p[2] = entry;
26 p[3] = (unsigned long) invalid_pmd_table; 33 p[3] = entry;
27 p[4] = (unsigned long) invalid_pmd_table; 34 p[4] = entry;
28 p[5] = (unsigned long) invalid_pmd_table; 35 p[5] = entry;
29 p[6] = (unsigned long) invalid_pmd_table; 36 p[6] = entry;
30 p[7] = (unsigned long) invalid_pmd_table; 37 p[7] = entry;
31 p += 8; 38 p += 8;
32 } 39 }
33} 40}
34 41
42#ifndef __PAGETABLE_PMD_FOLDED
35void pmd_init(unsigned long addr, unsigned long pagetable) 43void pmd_init(unsigned long addr, unsigned long pagetable)
36{ 44{
37 unsigned long *p, *end; 45 unsigned long *p, *end;
@@ -40,17 +48,18 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
40 end = p + PTRS_PER_PMD; 48 end = p + PTRS_PER_PMD;
41 49
42 while (p < end) { 50 while (p < end) {
43 p[0] = (unsigned long)pagetable; 51 p[0] = pagetable;
44 p[1] = (unsigned long)pagetable; 52 p[1] = pagetable;
45 p[2] = (unsigned long)pagetable; 53 p[2] = pagetable;
46 p[3] = (unsigned long)pagetable; 54 p[3] = pagetable;
47 p[4] = (unsigned long)pagetable; 55 p[4] = pagetable;
48 p[5] = (unsigned long)pagetable; 56 p[5] = pagetable;
49 p[6] = (unsigned long)pagetable; 57 p[6] = pagetable;
50 p[7] = (unsigned long)pagetable; 58 p[7] = pagetable;
51 p += 8; 59 p += 8;
52 } 60 }
53} 61}
62#endif
54 63
55void __init pagetable_init(void) 64void __init pagetable_init(void)
56{ 65{
@@ -59,8 +68,9 @@ void __init pagetable_init(void)
59 68
60 /* Initialize the entire pgd. */ 69 /* Initialize the entire pgd. */
61 pgd_init((unsigned long)swapper_pg_dir); 70 pgd_init((unsigned long)swapper_pg_dir);
71#ifndef __PAGETABLE_PMD_FOLDED
62 pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); 72 pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
63 73#endif
64 pgd_base = swapper_pg_dir; 74 pgd_base = swapper_pg_dir;
65 /* 75 /*
66 * Fixed mappings: 76 * Fixed mappings:
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index d73428b18b0a..c618eed933a1 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -303,7 +303,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
303 unsigned long lo; 303 unsigned long lo;
304 write_c0_pagemask(PM_HUGE_MASK); 304 write_c0_pagemask(PM_HUGE_MASK);
305 ptep = (pte_t *)pmdp; 305 ptep = (pte_t *)pmdp;
306 lo = pte_val(*ptep) >> 6; 306 lo = pte_to_entrylo(pte_val(*ptep));
307 write_c0_entrylo0(lo); 307 write_c0_entrylo0(lo);
308 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); 308 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
309 309
@@ -323,8 +323,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
323 ptep++; 323 ptep++;
324 write_c0_entrylo1(ptep->pte_high); 324 write_c0_entrylo1(ptep->pte_high);
325#else 325#else
326 write_c0_entrylo0(pte_val(*ptep++) >> 6); 326 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
327 write_c0_entrylo1(pte_val(*ptep) >> 6); 327 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
328#endif 328#endif
329 mtc0_tlbw_hazard(); 329 mtc0_tlbw_hazard();
330 if (idx < 0) 330 if (idx < 0)
@@ -337,40 +337,6 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
337 EXIT_CRITICAL(flags); 337 EXIT_CRITICAL(flags);
338} 338}
339 339
340#if 0
341static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
342 unsigned long address, pte_t pte)
343{
344 unsigned long flags;
345 unsigned int asid;
346 pgd_t *pgdp;
347 pmd_t *pmdp;
348 pte_t *ptep;
349 int idx;
350
351 ENTER_CRITICAL(flags);
352 address &= (PAGE_MASK << 1);
353 asid = read_c0_entryhi() & ASID_MASK;
354 write_c0_entryhi(address | asid);
355 pgdp = pgd_offset(vma->vm_mm, address);
356 mtc0_tlbw_hazard();
357 tlb_probe();
358 tlb_probe_hazard();
359 pmdp = pmd_offset(pgdp, address);
360 idx = read_c0_index();
361 ptep = pte_offset_map(pmdp, address);
362 write_c0_entrylo0(pte_val(*ptep++) >> 6);
363 write_c0_entrylo1(pte_val(*ptep) >> 6);
364 mtc0_tlbw_hazard();
365 if (idx < 0)
366 tlb_write_random();
367 else
368 tlb_write_indexed();
369 tlbw_use_hazard();
370 EXIT_CRITICAL(flags);
371}
372#endif
373
374void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 340void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
375 unsigned long entryhi, unsigned long pagemask) 341 unsigned long entryhi, unsigned long pagemask)
376{ 342{
@@ -447,34 +413,6 @@ out:
447 return ret; 413 return ret;
448} 414}
449 415
450static void __cpuinit probe_tlb(unsigned long config)
451{
452 struct cpuinfo_mips *c = &current_cpu_data;
453 unsigned int reg;
454
455 /*
456 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
457 * is not supported, we assume R4k style. Cpu probing already figured
458 * out the number of tlb entries.
459 */
460 if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
461 return;
462#ifdef CONFIG_MIPS_MT_SMTC
463 /*
464 * If TLB is shared in SMTC system, total size already
465 * has been calculated and written into cpu_data tlbsize
466 */
467 if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
468 return;
469#endif /* CONFIG_MIPS_MT_SMTC */
470
471 reg = read_c0_config1();
472 if (!((config >> 7) & 3))
473 panic("No TLB present");
474
475 c->tlbsize = ((reg >> 25) & 0x3f) + 1;
476}
477
478static int __cpuinitdata ntlb; 416static int __cpuinitdata ntlb;
479static int __init set_ntlb(char *str) 417static int __init set_ntlb(char *str)
480{ 418{
@@ -486,8 +424,6 @@ __setup("ntlb=", set_ntlb);
486 424
487void __cpuinit tlb_init(void) 425void __cpuinit tlb_init(void)
488{ 426{
489 unsigned int config = read_c0_config();
490
491 /* 427 /*
492 * You should never change this register: 428 * You should never change this register:
493 * - On R4600 1.7 the tlbp never hits for pages smaller than 429 * - On R4600 1.7 the tlbp never hits for pages smaller than
@@ -495,13 +431,25 @@ void __cpuinit tlb_init(void)
495 * - The entire mm handling assumes the c0_pagemask register to 431 * - The entire mm handling assumes the c0_pagemask register to
496 * be set to fixed-size pages. 432 * be set to fixed-size pages.
497 */ 433 */
498 probe_tlb(config);
499 write_c0_pagemask(PM_DEFAULT_MASK); 434 write_c0_pagemask(PM_DEFAULT_MASK);
500 write_c0_wired(0); 435 write_c0_wired(0);
501 if (current_cpu_type() == CPU_R10000 || 436 if (current_cpu_type() == CPU_R10000 ||
502 current_cpu_type() == CPU_R12000 || 437 current_cpu_type() == CPU_R12000 ||
503 current_cpu_type() == CPU_R14000) 438 current_cpu_type() == CPU_R14000)
504 write_c0_framemask(0); 439 write_c0_framemask(0);
440
441 if (kernel_uses_smartmips_rixi) {
442 /*
443 * Enable the no read, no exec bits, and enable large virtual
444 * address.
445 */
446 u32 pg = PG_RIE | PG_XIE;
447#ifdef CONFIG_64BIT
448 pg |= PG_ELPA;
449#endif
450 write_c0_pagegrain(pg);
451 }
452
505 temp_tlb_entry = current_cpu_data.tlbsize - 1; 453 temp_tlb_entry = current_cpu_data.tlbsize - 1;
506 454
507 /* From this point on the ARC firmware is dead. */ 455 /* From this point on the ARC firmware is dead. */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index bb1719a55d22..86f004dc8355 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,8 +29,17 @@
29 29
30#include <asm/mmu_context.h> 30#include <asm/mmu_context.h>
31#include <asm/war.h> 31#include <asm/war.h>
32#include <asm/uasm.h>
33
34/*
35 * TLB load/store/modify handlers.
36 *
37 * Only the fastpath gets synthesized at runtime, the slowpath for
38 * do_page_fault remains normal asm.
39 */
40extern void tlb_do_page_fault_0(void);
41extern void tlb_do_page_fault_1(void);
32 42
33#include "uasm.h"
34 43
35static inline int r45k_bvahwbug(void) 44static inline int r45k_bvahwbug(void)
36{ 45{
@@ -73,18 +82,18 @@ static int __cpuinit m4kc_tlbp_war(void)
73enum label_id { 82enum label_id {
74 label_second_part = 1, 83 label_second_part = 1,
75 label_leave, 84 label_leave,
76#ifdef MODULE_START
77 label_module_alloc,
78#endif
79 label_vmalloc, 85 label_vmalloc,
80 label_vmalloc_done, 86 label_vmalloc_done,
81 label_tlbw_hazard, 87 label_tlbw_hazard,
82 label_split, 88 label_split,
89 label_tlbl_goaround1,
90 label_tlbl_goaround2,
83 label_nopage_tlbl, 91 label_nopage_tlbl,
84 label_nopage_tlbs, 92 label_nopage_tlbs,
85 label_nopage_tlbm, 93 label_nopage_tlbm,
86 label_smp_pgtable_change, 94 label_smp_pgtable_change,
87 label_r3000_write_probe_fail, 95 label_r3000_write_probe_fail,
96 label_large_segbits_fault,
88#ifdef CONFIG_HUGETLB_PAGE 97#ifdef CONFIG_HUGETLB_PAGE
89 label_tlb_huge_update, 98 label_tlb_huge_update,
90#endif 99#endif
@@ -92,18 +101,18 @@ enum label_id {
92 101
93UASM_L_LA(_second_part) 102UASM_L_LA(_second_part)
94UASM_L_LA(_leave) 103UASM_L_LA(_leave)
95#ifdef MODULE_START
96UASM_L_LA(_module_alloc)
97#endif
98UASM_L_LA(_vmalloc) 104UASM_L_LA(_vmalloc)
99UASM_L_LA(_vmalloc_done) 105UASM_L_LA(_vmalloc_done)
100UASM_L_LA(_tlbw_hazard) 106UASM_L_LA(_tlbw_hazard)
101UASM_L_LA(_split) 107UASM_L_LA(_split)
108UASM_L_LA(_tlbl_goaround1)
109UASM_L_LA(_tlbl_goaround2)
102UASM_L_LA(_nopage_tlbl) 110UASM_L_LA(_nopage_tlbl)
103UASM_L_LA(_nopage_tlbs) 111UASM_L_LA(_nopage_tlbs)
104UASM_L_LA(_nopage_tlbm) 112UASM_L_LA(_nopage_tlbm)
105UASM_L_LA(_smp_pgtable_change) 113UASM_L_LA(_smp_pgtable_change)
106UASM_L_LA(_r3000_write_probe_fail) 114UASM_L_LA(_r3000_write_probe_fail)
115UASM_L_LA(_large_segbits_fault)
107#ifdef CONFIG_HUGETLB_PAGE 116#ifdef CONFIG_HUGETLB_PAGE
108UASM_L_LA(_tlb_huge_update) 117UASM_L_LA(_tlb_huge_update)
109#endif 118#endif
@@ -160,6 +169,16 @@ static u32 tlb_handler[128] __cpuinitdata;
160static struct uasm_label labels[128] __cpuinitdata; 169static struct uasm_label labels[128] __cpuinitdata;
161static struct uasm_reloc relocs[128] __cpuinitdata; 170static struct uasm_reloc relocs[128] __cpuinitdata;
162 171
172#ifdef CONFIG_64BIT
173static int check_for_high_segbits __cpuinitdata;
174#endif
175
176#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
177/*
178 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
179 * we cannot do r3000 under these circumstances.
180 */
181
163/* 182/*
164 * The R3000 TLB handler is simple. 183 * The R3000 TLB handler is simple.
165 */ 184 */
@@ -199,6 +218,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
199 218
200 dump_handler((u32 *)ebase, 32); 219 dump_handler((u32 *)ebase, 32);
201} 220}
221#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
202 222
203/* 223/*
204 * The R4000 TLB handler is much more complicated. We have two 224 * The R4000 TLB handler is much more complicated. We have two
@@ -396,36 +416,60 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
396 } 416 }
397} 417}
398 418
399#ifdef CONFIG_HUGETLB_PAGE 419static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
400static __cpuinit void build_huge_tlb_write_entry(u32 **p, 420 unsigned int reg)
401 struct uasm_label **l,
402 struct uasm_reloc **r,
403 unsigned int tmp,
404 enum tlb_write_entry wmode)
405{ 421{
406 /* Set huge page tlb entry size */ 422 if (kernel_uses_smartmips_rixi) {
407 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 423 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
408 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); 424 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
409 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 425 } else {
426#ifdef CONFIG_64BIT_PHYS_ADDR
427 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
428#else
429 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
430#endif
431 }
432}
410 433
411 build_tlb_write_entry(p, l, r, wmode); 434#ifdef CONFIG_HUGETLB_PAGE
412 435
436static __cpuinit void build_restore_pagemask(u32 **p,
437 struct uasm_reloc **r,
438 unsigned int tmp,
439 enum label_id lid)
440{
413 /* Reset default page size */ 441 /* Reset default page size */
414 if (PM_DEFAULT_MASK >> 16) { 442 if (PM_DEFAULT_MASK >> 16) {
415 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 443 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
416 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 444 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
417 uasm_il_b(p, r, label_leave); 445 uasm_il_b(p, r, lid);
418 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 446 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
419 } else if (PM_DEFAULT_MASK) { 447 } else if (PM_DEFAULT_MASK) {
420 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 448 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
421 uasm_il_b(p, r, label_leave); 449 uasm_il_b(p, r, lid);
422 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 450 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
423 } else { 451 } else {
424 uasm_il_b(p, r, label_leave); 452 uasm_il_b(p, r, lid);
425 uasm_i_mtc0(p, 0, C0_PAGEMASK); 453 uasm_i_mtc0(p, 0, C0_PAGEMASK);
426 } 454 }
427} 455}
428 456
457static __cpuinit void build_huge_tlb_write_entry(u32 **p,
458 struct uasm_label **l,
459 struct uasm_reloc **r,
460 unsigned int tmp,
461 enum tlb_write_entry wmode)
462{
463 /* Set huge page tlb entry size */
464 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
465 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
466 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
467
468 build_tlb_write_entry(p, l, r, wmode);
469
470 build_restore_pagemask(p, r, tmp, label_leave);
471}
472
429/* 473/*
430 * Check if Huge PTE is present, if so then jump to LABEL. 474 * Check if Huge PTE is present, if so then jump to LABEL.
431 */ 475 */
@@ -459,15 +503,15 @@ static __cpuinit void build_huge_update_entries(u32 **p,
459 if (!small_sequence) 503 if (!small_sequence)
460 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); 504 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
461 505
462 UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ 506 build_convert_pte_to_entrylo(p, pte);
463 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */ 507 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
464 /* convert to entrylo1 */ 508 /* convert to entrylo1 */
465 if (small_sequence) 509 if (small_sequence)
466 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); 510 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
467 else 511 else
468 UASM_i_ADDU(p, pte, pte, tmp); 512 UASM_i_ADDU(p, pte, pte, tmp);
469 513
470 uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */ 514 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
471} 515}
472 516
473static __cpuinit void build_huge_handler_tail(u32 **p, 517static __cpuinit void build_huge_handler_tail(u32 **p,
@@ -497,30 +541,56 @@ static void __cpuinit
497build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 541build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
498 unsigned int tmp, unsigned int ptr) 542 unsigned int tmp, unsigned int ptr)
499{ 543{
544#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
500 long pgdc = (long)pgd_current; 545 long pgdc = (long)pgd_current;
501 546#endif
502 /* 547 /*
503 * The vmalloc handling is not in the hotpath. 548 * The vmalloc handling is not in the hotpath.
504 */ 549 */
505 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 550 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
506 uasm_il_bltz(p, r, tmp, label_vmalloc); 551
552 if (check_for_high_segbits) {
553 /*
554 * The kernel currently implicitely assumes that the
555 * MIPS SEGBITS parameter for the processor is
556 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
557 * allocate virtual addresses outside the maximum
558 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
559 * that doesn't prevent user code from accessing the
560 * higher xuseg addresses. Here, we make sure that
561 * everything but the lower xuseg addresses goes down
562 * the module_alloc/vmalloc path.
563 */
564 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
565 uasm_il_bnez(p, r, ptr, label_vmalloc);
566 } else {
567 uasm_il_bltz(p, r, tmp, label_vmalloc);
568 }
507 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ 569 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
508 570
509#ifdef CONFIG_SMP 571#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
572 /*
573 * &pgd << 11 stored in CONTEXT [23..63].
574 */
575 UASM_i_MFC0(p, ptr, C0_CONTEXT);
576 uasm_i_dins(p, ptr, 0, 0, 23); /* Clear lower 23 bits of context. */
577 uasm_i_ori(p, ptr, ptr, 0x540); /* 1 0 1 0 1 << 6 xkphys cached */
578 uasm_i_drotr(p, ptr, ptr, 11);
579#elif defined(CONFIG_SMP)
510# ifdef CONFIG_MIPS_MT_SMTC 580# ifdef CONFIG_MIPS_MT_SMTC
511 /* 581 /*
512 * SMTC uses TCBind value as "CPU" index 582 * SMTC uses TCBind value as "CPU" index
513 */ 583 */
514 uasm_i_mfc0(p, ptr, C0_TCBIND); 584 uasm_i_mfc0(p, ptr, C0_TCBIND);
515 uasm_i_dsrl(p, ptr, ptr, 19); 585 uasm_i_dsrl_safe(p, ptr, ptr, 19);
516# else 586# else
517 /* 587 /*
518 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 588 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
519 * stored in CONTEXT. 589 * stored in CONTEXT.
520 */ 590 */
521 uasm_i_dmfc0(p, ptr, C0_CONTEXT); 591 uasm_i_dmfc0(p, ptr, C0_CONTEXT);
522 uasm_i_dsrl(p, ptr, ptr, 23); 592 uasm_i_dsrl_safe(p, ptr, ptr, 23);
523#endif 593# endif
524 UASM_i_LA_mostly(p, tmp, pgdc); 594 UASM_i_LA_mostly(p, tmp, pgdc);
525 uasm_i_daddu(p, ptr, ptr, tmp); 595 uasm_i_daddu(p, ptr, ptr, tmp);
526 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 596 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
@@ -532,42 +602,78 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
532 602
533 uasm_l_vmalloc_done(l, *p); 603 uasm_l_vmalloc_done(l, *p);
534 604
535 if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ 605 /* get pgd offset in bytes */
536 uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); 606 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
537 else
538 uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
539 607
540 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 608 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
541 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 609 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
610#ifndef __PAGETABLE_PMD_FOLDED
542 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 611 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
543 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 612 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
544 uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 613 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
545 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 614 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
546 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 615 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
616#endif
547} 617}
548 618
619enum vmalloc64_mode {not_refill, refill};
549/* 620/*
550 * BVADDR is the faulting address, PTR is scratch. 621 * BVADDR is the faulting address, PTR is scratch.
551 * PTR will hold the pgd for vmalloc. 622 * PTR will hold the pgd for vmalloc.
552 */ 623 */
553static void __cpuinit 624static void __cpuinit
554build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 625build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
555 unsigned int bvaddr, unsigned int ptr) 626 unsigned int bvaddr, unsigned int ptr,
627 enum vmalloc64_mode mode)
556{ 628{
557 long swpd = (long)swapper_pg_dir; 629 long swpd = (long)swapper_pg_dir;
630 int single_insn_swpd;
631 int did_vmalloc_branch = 0;
632
633 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
558 634
559 uasm_l_vmalloc(l, *p); 635 uasm_l_vmalloc(l, *p);
560 636
561 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { 637 if (mode == refill && check_for_high_segbits) {
562 uasm_il_b(p, r, label_vmalloc_done); 638 if (single_insn_swpd) {
563 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 639 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
564 } else { 640 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
565 UASM_i_LA_mostly(p, ptr, swpd); 641 did_vmalloc_branch = 1;
566 uasm_il_b(p, r, label_vmalloc_done); 642 /* fall through */
567 if (uasm_in_compat_space_p(swpd)) 643 } else {
568 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); 644 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
569 else 645 }
570 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); 646 }
647 if (!did_vmalloc_branch) {
648 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
649 uasm_il_b(p, r, label_vmalloc_done);
650 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
651 } else {
652 UASM_i_LA_mostly(p, ptr, swpd);
653 uasm_il_b(p, r, label_vmalloc_done);
654 if (uasm_in_compat_space_p(swpd))
655 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
656 else
657 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
658 }
659 }
660 if (mode == refill && check_for_high_segbits) {
661 uasm_l_large_segbits_fault(l, *p);
662 /*
663 * We get here if we are an xsseg address, or if we are
664 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
665 *
666 * Ignoring xsseg (assume disabled so would generate
667 * (address errors?), the only remaining possibility
668 * is the upper xuseg addresses. On processors with
669 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
670 * addresses would have taken an address error. We try
671 * to mimic that here by taking a load/istream page
672 * fault.
673 */
674 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
675 uasm_i_jr(p, ptr);
676 uasm_i_nop(p);
571 } 677 }
572} 678}
573 679
@@ -674,35 +780,53 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
674 if (cpu_has_64bits) { 780 if (cpu_has_64bits) {
675 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ 781 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
676 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 782 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
677 uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ 783 if (kernel_uses_smartmips_rixi) {
678 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 784 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
679 uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ 785 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
680 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 786 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
787 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
788 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
789 } else {
790 uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
791 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
792 uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
793 }
794 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
681 } else { 795 } else {
682 int pte_off_even = sizeof(pte_t) / 2; 796 int pte_off_even = sizeof(pte_t) / 2;
683 int pte_off_odd = pte_off_even + sizeof(pte_t); 797 int pte_off_odd = pte_off_even + sizeof(pte_t);
684 798
685 /* The pte entries are pre-shifted */ 799 /* The pte entries are pre-shifted */
686 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 800 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
687 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 801 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
688 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 802 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
689 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 803 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
690 } 804 }
691#else 805#else
692 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ 806 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
693 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 807 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
694 if (r45k_bvahwbug()) 808 if (r45k_bvahwbug())
695 build_tlb_probe_entry(p); 809 build_tlb_probe_entry(p);
696 UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ 810 if (kernel_uses_smartmips_rixi) {
697 if (r4k_250MHZhwbug()) 811 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
698 uasm_i_mtc0(p, 0, C0_ENTRYLO0); 812 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
699 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 813 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
700 UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ 814 if (r4k_250MHZhwbug())
701 if (r45k_bvahwbug()) 815 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
702 uasm_i_mfc0(p, tmp, C0_INDEX); 816 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
817 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
818 } else {
819 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
820 if (r4k_250MHZhwbug())
821 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
822 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
823 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
824 if (r45k_bvahwbug())
825 uasm_i_mfc0(p, tmp, C0_INDEX);
826 }
703 if (r4k_250MHZhwbug()) 827 if (r4k_250MHZhwbug())
704 uasm_i_mtc0(p, 0, C0_ENTRYLO1); 828 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
705 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 829 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
706#endif 830#endif
707} 831}
708 832
@@ -731,10 +855,15 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
731 * create the plain linear handler 855 * create the plain linear handler
732 */ 856 */
733 if (bcm1250_m3_war()) { 857 if (bcm1250_m3_war()) {
734 UASM_i_MFC0(&p, K0, C0_BADVADDR); 858 unsigned int segbits = 44;
735 UASM_i_MFC0(&p, K1, C0_ENTRYHI); 859
860 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
861 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
736 uasm_i_xor(&p, K0, K0, K1); 862 uasm_i_xor(&p, K0, K0, K1);
737 UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 863 uasm_i_dsrl_safe(&p, K1, K0, 62);
864 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
865 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
866 uasm_i_or(&p, K0, K0, K1);
738 uasm_il_bnez(&p, &r, K0, label_leave); 867 uasm_il_bnez(&p, &r, K0, label_leave);
739 /* No need for uasm_i_nop */ 868 /* No need for uasm_i_nop */
740 } 869 }
@@ -763,7 +892,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
763#endif 892#endif
764 893
765#ifdef CONFIG_64BIT 894#ifdef CONFIG_64BIT
766 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); 895 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill);
767#endif 896#endif
768 897
769 /* 898 /*
@@ -802,8 +931,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
802 } else { 931 } else {
803#if defined(CONFIG_HUGETLB_PAGE) 932#if defined(CONFIG_HUGETLB_PAGE)
804 const enum label_id ls = label_tlb_huge_update; 933 const enum label_id ls = label_tlb_huge_update;
805#elif defined(MODULE_START)
806 const enum label_id ls = label_module_alloc;
807#else 934#else
808 const enum label_id ls = label_vmalloc; 935 const enum label_id ls = label_vmalloc;
809#endif 936#endif
@@ -875,15 +1002,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
875} 1002}
876 1003
877/* 1004/*
878 * TLB load/store/modify handlers.
879 *
880 * Only the fastpath gets synthesized at runtime, the slowpath for
881 * do_page_fault remains normal asm.
882 */
883extern void tlb_do_page_fault_0(void);
884extern void tlb_do_page_fault_1(void);
885
886/*
887 * 128 instructions for the fastpath handler is generous and should 1005 * 128 instructions for the fastpath handler is generous and should
888 * never be exceeded. 1006 * never be exceeded.
889 */ 1007 */
@@ -977,9 +1095,14 @@ static void __cpuinit
977build_pte_present(u32 **p, struct uasm_reloc **r, 1095build_pte_present(u32 **p, struct uasm_reloc **r,
978 unsigned int pte, unsigned int ptr, enum label_id lid) 1096 unsigned int pte, unsigned int ptr, enum label_id lid)
979{ 1097{
980 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1098 if (kernel_uses_smartmips_rixi) {
981 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1099 uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
982 uasm_il_bnez(p, r, pte, lid); 1100 uasm_il_beqz(p, r, pte, lid);
1101 } else {
1102 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1103 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1104 uasm_il_bnez(p, r, pte, lid);
1105 }
983 iPTE_LW(p, pte, ptr); 1106 iPTE_LW(p, pte, ptr);
984} 1107}
985 1108
@@ -1033,6 +1156,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1033 iPTE_LW(p, pte, ptr); 1156 iPTE_LW(p, pte, ptr);
1034} 1157}
1035 1158
1159#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1036/* 1160/*
1037 * R3000 style TLB load/store/modify handlers. 1161 * R3000 style TLB load/store/modify handlers.
1038 */ 1162 */
@@ -1184,6 +1308,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1184 1308
1185 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); 1309 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
1186} 1310}
1311#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1187 1312
1188/* 1313/*
1189 * R4000 style TLB load/store/modify handlers. 1314 * R4000 style TLB load/store/modify handlers.
@@ -1235,7 +1360,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1235 uasm_i_eret(p); /* return from trap */ 1360 uasm_i_eret(p); /* return from trap */
1236 1361
1237#ifdef CONFIG_64BIT 1362#ifdef CONFIG_64BIT
1238 build_get_pgd_vmalloc64(p, l, r, tmp, ptr); 1363 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
1239#endif 1364#endif
1240} 1365}
1241 1366
@@ -1250,10 +1375,15 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1250 memset(relocs, 0, sizeof(relocs)); 1375 memset(relocs, 0, sizeof(relocs));
1251 1376
1252 if (bcm1250_m3_war()) { 1377 if (bcm1250_m3_war()) {
1253 UASM_i_MFC0(&p, K0, C0_BADVADDR); 1378 unsigned int segbits = 44;
1254 UASM_i_MFC0(&p, K1, C0_ENTRYHI); 1379
1380 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1381 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1255 uasm_i_xor(&p, K0, K0, K1); 1382 uasm_i_xor(&p, K0, K0, K1);
1256 UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 1383 uasm_i_dsrl_safe(&p, K1, K0, 62);
1384 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1385 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1386 uasm_i_or(&p, K0, K0, K1);
1257 uasm_il_bnez(&p, &r, K0, label_leave); 1387 uasm_il_bnez(&p, &r, K0, label_leave);
1258 /* No need for uasm_i_nop */ 1388 /* No need for uasm_i_nop */
1259 } 1389 }
@@ -1262,6 +1392,34 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1262 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1392 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1263 if (m4kc_tlbp_war()) 1393 if (m4kc_tlbp_war())
1264 build_tlb_probe_entry(&p); 1394 build_tlb_probe_entry(&p);
1395
1396 if (kernel_uses_smartmips_rixi) {
1397 /*
1398 * If the page is not _PAGE_VALID, RI or XI could not
1399 * have triggered it. Skip the expensive test..
1400 */
1401 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1402 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
1403 uasm_i_nop(&p);
1404
1405 uasm_i_tlbr(&p);
1406 /* Examine entrylo 0 or 1 based on ptr. */
1407 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1408 uasm_i_beqz(&p, K0, 8);
1409
1410 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1411 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1412 /*
1413 * If the entryLo (now in K0) is valid (bit 1), RI or
1414 * XI must have triggered it.
1415 */
1416 uasm_i_andi(&p, K0, K0, 2);
1417 uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
1418
1419 uasm_l_tlbl_goaround1(&l, p);
1420 /* Reload the PTE value */
1421 iPTE_LW(&p, K0, K1);
1422 }
1265 build_make_valid(&p, &r, K0, K1); 1423 build_make_valid(&p, &r, K0, K1);
1266 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1424 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1267 1425
@@ -1274,6 +1432,40 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1274 iPTE_LW(&p, K0, K1); 1432 iPTE_LW(&p, K0, K1);
1275 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1433 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1276 build_tlb_probe_entry(&p); 1434 build_tlb_probe_entry(&p);
1435
1436 if (kernel_uses_smartmips_rixi) {
1437 /*
1438 * If the page is not _PAGE_VALID, RI or XI could not
1439 * have triggered it. Skip the expensive test..
1440 */
1441 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1442 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1443 uasm_i_nop(&p);
1444
1445 uasm_i_tlbr(&p);
1446 /* Examine entrylo 0 or 1 based on ptr. */
1447 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1448 uasm_i_beqz(&p, K0, 8);
1449
1450 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1451 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1452 /*
1453 * If the entryLo (now in K0) is valid (bit 1), RI or
1454 * XI must have triggered it.
1455 */
1456 uasm_i_andi(&p, K0, K0, 2);
1457 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1458 /* Reload the PTE value */
1459 iPTE_LW(&p, K0, K1);
1460
1461 /*
1462 * We clobbered C0_PAGEMASK, restore it. On the other branch
1463 * it is restored in build_huge_tlb_write_entry.
1464 */
1465 build_restore_pagemask(&p, &r, K0, label_nopage_tlbl);
1466
1467 uasm_l_tlbl_goaround2(&l, p);
1468 }
1277 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); 1469 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
1278 build_huge_handler_tail(&p, &r, &l, K0, K1); 1470 build_huge_handler_tail(&p, &r, &l, K0, K1);
1279#endif 1471#endif
@@ -1392,6 +1584,10 @@ void __cpuinit build_tlb_refill_handler(void)
1392 */ 1584 */
1393 static int run_once = 0; 1585 static int run_once = 0;
1394 1586
1587#ifdef CONFIG_64BIT
1588 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1589#endif
1590
1395 switch (current_cpu_type()) { 1591 switch (current_cpu_type()) {
1396 case CPU_R2000: 1592 case CPU_R2000:
1397 case CPU_R3000: 1593 case CPU_R3000:
@@ -1400,6 +1596,7 @@ void __cpuinit build_tlb_refill_handler(void)
1400 case CPU_TX3912: 1596 case CPU_TX3912:
1401 case CPU_TX3922: 1597 case CPU_TX3922:
1402 case CPU_TX3927: 1598 case CPU_TX3927:
1599#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1403 build_r3000_tlb_refill_handler(); 1600 build_r3000_tlb_refill_handler();
1404 if (!run_once) { 1601 if (!run_once) {
1405 build_r3000_tlb_load_handler(); 1602 build_r3000_tlb_load_handler();
@@ -1407,6 +1604,9 @@ void __cpuinit build_tlb_refill_handler(void)
1407 build_r3000_tlb_modify_handler(); 1604 build_r3000_tlb_modify_handler();
1408 run_once++; 1605 run_once++;
1409 } 1606 }
1607#else
1608 panic("No R3000 TLB refill handler");
1609#endif
1410 break; 1610 break;
1411 1611
1412 case CPU_R6000: 1612 case CPU_R6000:
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index f467199676a8..611d564fdcf1 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -19,8 +19,7 @@
19#include <asm/inst.h> 19#include <asm/inst.h>
20#include <asm/elf.h> 20#include <asm/elf.h>
21#include <asm/bugs.h> 21#include <asm/bugs.h>
22 22#include <asm/uasm.h>
23#include "uasm.h"
24 23
25enum fields { 24enum fields {
26 RS = 0x001, 25 RS = 0x001,
@@ -32,7 +31,8 @@ enum fields {
32 BIMM = 0x040, 31 BIMM = 0x040,
33 JIMM = 0x080, 32 JIMM = 0x080,
34 FUNC = 0x100, 33 FUNC = 0x100,
35 SET = 0x200 34 SET = 0x200,
35 SCIMM = 0x400
36}; 36};
37 37
38#define OP_MASK 0x3f 38#define OP_MASK 0x3f
@@ -53,6 +53,8 @@ enum fields {
53#define FUNC_SH 0 53#define FUNC_SH 0
54#define SET_MASK 0x7 54#define SET_MASK 0x7
55#define SET_SH 0 55#define SET_SH 0
56#define SCIMM_MASK 0xfffff
57#define SCIMM_SH 6
56 58
57enum opcode { 59enum opcode {
58 insn_invalid, 60 insn_invalid,
@@ -60,11 +62,12 @@ enum opcode {
60 insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, 62 insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
61 insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0, 63 insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0,
62 insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, 64 insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
63 insn_dsrl32, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, 65 insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal,
64 insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, 66 insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
65 insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, 67 insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
66 insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, 68 insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw,
67 insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori 69 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
70 insn_dins, insn_syscall
68}; 71};
69 72
70struct insn { 73struct insn {
@@ -104,6 +107,7 @@ static struct insn insn_table[] __cpuinitdata = {
104 { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, 107 { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
105 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, 108 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
106 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, 109 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
110 { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
107 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, 111 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
108 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, 112 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
109 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 113 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
@@ -116,6 +120,7 @@ static struct insn insn_table[] __cpuinitdata = {
116 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 120 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
117 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, 121 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
118 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 122 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
123 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
119 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 124 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
120 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 125 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
121 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 126 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
@@ -125,13 +130,17 @@ static struct insn insn_table[] __cpuinitdata = {
125 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 130 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
126 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 131 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
127 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, 132 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
133 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
128 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, 134 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
129 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 135 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
130 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, 136 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
137 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
131 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, 138 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
132 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, 139 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
133 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, 140 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
134 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 141 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
142 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
143 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
135 { insn_invalid, 0, 0 } 144 { insn_invalid, 0, 0 }
136}; 145};
137 146
@@ -204,6 +213,14 @@ static inline __cpuinit u32 build_jimm(u32 arg)
204 return (arg >> 2) & JIMM_MASK; 213 return (arg >> 2) & JIMM_MASK;
205} 214}
206 215
216static inline __cpuinit u32 build_scimm(u32 arg)
217{
218 if (arg & ~SCIMM_MASK)
219 printk(KERN_WARNING "Micro-assembler field overflow\n");
220
221 return (arg & SCIMM_MASK) << SCIMM_SH;
222}
223
207static inline __cpuinit u32 build_func(u32 arg) 224static inline __cpuinit u32 build_func(u32 arg)
208{ 225{
209 if (arg & ~FUNC_MASK) 226 if (arg & ~FUNC_MASK)
@@ -262,6 +279,8 @@ static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
262 op |= build_func(va_arg(ap, u32)); 279 op |= build_func(va_arg(ap, u32));
263 if (ip->fields & SET) 280 if (ip->fields & SET)
264 op |= build_set(va_arg(ap, u32)); 281 op |= build_set(va_arg(ap, u32));
282 if (ip->fields & SCIMM)
283 op |= build_scimm(va_arg(ap, u32));
265 va_end(ap); 284 va_end(ap);
266 285
267 **buf = op; 286 **buf = op;
@@ -304,6 +323,12 @@ Ip_u2u1s3(op) \
304 build_insn(buf, insn##op, b, a, c); \ 323 build_insn(buf, insn##op, b, a, c); \
305} 324}
306 325
326#define I_u2u1msbu3(op) \
327Ip_u2u1msbu3(op) \
328{ \
329 build_insn(buf, insn##op, b, a, c+d-1, c); \
330}
331
307#define I_u1u2(op) \ 332#define I_u1u2(op) \
308Ip_u1u2(op) \ 333Ip_u1u2(op) \
309{ \ 334{ \
@@ -349,6 +374,7 @@ I_u2u1u3(_dsll32)
349I_u2u1u3(_dsra) 374I_u2u1u3(_dsra)
350I_u2u1u3(_dsrl) 375I_u2u1u3(_dsrl)
351I_u2u1u3(_dsrl32) 376I_u2u1u3(_dsrl32)
377I_u2u1u3(_drotr)
352I_u3u1u2(_dsubu) 378I_u3u1u2(_dsubu)
353I_0(_eret) 379I_0(_eret)
354I_u1(_j) 380I_u1(_j)
@@ -362,6 +388,7 @@ I_u2s3u1(_lw)
362I_u1u2u3(_mfc0) 388I_u1u2u3(_mfc0)
363I_u1u2u3(_mtc0) 389I_u1u2u3(_mtc0)
364I_u2u1u3(_ori) 390I_u2u1u3(_ori)
391I_u3u1u2(_or)
365I_u2s3u1(_pref) 392I_u2s3u1(_pref)
366I_0(_rfe) 393I_0(_rfe)
367I_u2s3u1(_sc) 394I_u2s3u1(_sc)
@@ -370,13 +397,17 @@ I_u2s3u1(_sd)
370I_u2u1u3(_sll) 397I_u2u1u3(_sll)
371I_u2u1u3(_sra) 398I_u2u1u3(_sra)
372I_u2u1u3(_srl) 399I_u2u1u3(_srl)
400I_u2u1u3(_rotr)
373I_u3u1u2(_subu) 401I_u3u1u2(_subu)
374I_u2s3u1(_sw) 402I_u2s3u1(_sw)
375I_0(_tlbp) 403I_0(_tlbp)
404I_0(_tlbr)
376I_0(_tlbwi) 405I_0(_tlbwi)
377I_0(_tlbwr) 406I_0(_tlbwr)
378I_u3u1u2(_xor) 407I_u3u1u2(_xor)
379I_u2u1u3(_xori) 408I_u2u1u3(_xori)
409I_u2u1msbu3(_dins);
410I_u1(_syscall);
380 411
381/* Handle labels. */ 412/* Handle labels. */
382void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) 413void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
diff --git a/arch/mips/mm/uasm.h b/arch/mips/mm/uasm.h
deleted file mode 100644
index c6d1e3dd82d4..000000000000
--- a/arch/mips/mm/uasm.h
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
7 * Copyright (C) 2005 Maciej W. Rozycki
8 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
9 */
10
11#include <linux/types.h>
12
13#define Ip_u1u2u3(op) \
14void __cpuinit \
15uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
16
17#define Ip_u2u1u3(op) \
18void __cpuinit \
19uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
20
21#define Ip_u3u1u2(op) \
22void __cpuinit \
23uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
24
25#define Ip_u1u2s3(op) \
26void __cpuinit \
27uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
28
29#define Ip_u2s3u1(op) \
30void __cpuinit \
31uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
32
33#define Ip_u2u1s3(op) \
34void __cpuinit \
35uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
36
37#define Ip_u1u2(op) \
38void __cpuinit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
39
40#define Ip_u1s2(op) \
41void __cpuinit uasm_i##op(u32 **buf, unsigned int a, signed int b)
42
43#define Ip_u1(op) void __cpuinit uasm_i##op(u32 **buf, unsigned int a)
44
45#define Ip_0(op) void __cpuinit uasm_i##op(u32 **buf)
46
47Ip_u2u1s3(_addiu);
48Ip_u3u1u2(_addu);
49Ip_u2u1u3(_andi);
50Ip_u3u1u2(_and);
51Ip_u1u2s3(_beq);
52Ip_u1u2s3(_beql);
53Ip_u1s2(_bgez);
54Ip_u1s2(_bgezl);
55Ip_u1s2(_bltz);
56Ip_u1s2(_bltzl);
57Ip_u1u2s3(_bne);
58Ip_u2s3u1(_cache);
59Ip_u1u2u3(_dmfc0);
60Ip_u1u2u3(_dmtc0);
61Ip_u2u1s3(_daddiu);
62Ip_u3u1u2(_daddu);
63Ip_u2u1u3(_dsll);
64Ip_u2u1u3(_dsll32);
65Ip_u2u1u3(_dsra);
66Ip_u2u1u3(_dsrl);
67Ip_u2u1u3(_dsrl32);
68Ip_u3u1u2(_dsubu);
69Ip_0(_eret);
70Ip_u1(_j);
71Ip_u1(_jal);
72Ip_u1(_jr);
73Ip_u2s3u1(_ld);
74Ip_u2s3u1(_ll);
75Ip_u2s3u1(_lld);
76Ip_u1s2(_lui);
77Ip_u2s3u1(_lw);
78Ip_u1u2u3(_mfc0);
79Ip_u1u2u3(_mtc0);
80Ip_u2u1u3(_ori);
81Ip_u2s3u1(_pref);
82Ip_0(_rfe);
83Ip_u2s3u1(_sc);
84Ip_u2s3u1(_scd);
85Ip_u2s3u1(_sd);
86Ip_u2u1u3(_sll);
87Ip_u2u1u3(_sra);
88Ip_u2u1u3(_srl);
89Ip_u3u1u2(_subu);
90Ip_u2s3u1(_sw);
91Ip_0(_tlbp);
92Ip_0(_tlbwi);
93Ip_0(_tlbwr);
94Ip_u3u1u2(_xor);
95Ip_u2u1u3(_xori);
96
97/* Handle labels. */
98struct uasm_label {
99 u32 *addr;
100 int lab;
101};
102
103void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
104#ifdef CONFIG_64BIT
105int uasm_in_compat_space_p(long addr);
106#endif
107int uasm_rel_hi(long val);
108int uasm_rel_lo(long val);
109void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
110void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
111
112#define UASM_L_LA(lb) \
113static inline void __cpuinit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
114{ \
115 uasm_build_label(lab, addr, label##lb); \
116}
117
118/* convenience macros for instructions */
119#ifdef CONFIG_64BIT
120# define UASM_i_LW(buf, rs, rt, off) uasm_i_ld(buf, rs, rt, off)
121# define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off)
122# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
123# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
124# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
125# define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
126# define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
127# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val)
128# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd)
129# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd)
130# define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off)
131# define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off)
132#else
133# define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off)
134# define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
135# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
136# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
137# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
138# define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
139# define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
140# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val)
141# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd)
142# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd)
143# define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
144# define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
145#endif
146
147#define uasm_i_b(buf, off) uasm_i_beq(buf, 0, 0, off)
148#define uasm_i_beqz(buf, rs, off) uasm_i_beq(buf, rs, 0, off)
149#define uasm_i_beqzl(buf, rs, off) uasm_i_beql(buf, rs, 0, off)
150#define uasm_i_bnez(buf, rs, off) uasm_i_bne(buf, rs, 0, off)
151#define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off)
152#define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b)
153#define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0)
154#define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1)
155#define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3)
156
157/* Handle relocations. */
158struct uasm_reloc {
159 u32 *addr;
160 unsigned int type;
161 int lab;
162};
163
164/* This is zero so we can use zeroed label arrays. */
165#define UASM_LABEL_INVALID 0
166
167void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
168void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
169void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
170void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
171void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
172 u32 *first, u32 *end, u32 *target);
173int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
174
175/* Convenience functions for labeled branches. */
176void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
177void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
178void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
179void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
180void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
181 unsigned int reg2, int lid);
182void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
183void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
184void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);