aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-27 19:19:22 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-27 19:19:22 -0500
commitf1dd6ad599732fc89f36fdd65a2c2cf3c63a8711 (patch)
tree5092207128e47cba99dc0fe373fff6a36f4cb4b8 /arch/mips/mm
parent8d37a371b6869920e6c40c495c68eabba1ef3909 (diff)
parente10b234b3c4e255d3300a486c4ac15b43253ac6d (diff)
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (141 commits) MIPS: Alchemy: defconfig updates MIPS: Alchemy: Fix Au1100 ethernet build failure MIPS: Alchemy: Repair db1500/bosporus builds MIPS: ARC: Cleanup unused definitions from sgialib.h MIPS: Cobalt: convert legacy port addresses to GT-64111 bus addresses MIPS: Alchemy: use 36bit addresses for PCMCIA resources. MIPS: Cobalt: Fix theoretical port aliasing issue MIPS: Use ALIGN(x, bytes) instead of __ALIGN_MASK(x, bytes - 1) MIPS: Crazy spinlock speed test. MIPS: Optimize spinlocks. MIPS: Alchemy: devboard PM needs to save CPLD registers. MIPS: PowerTV: Eliminate duplicate opcode definition macros MIPS: Lemote 2F: Move printks out of port_access_lock. MIPS: PNX833x: Convert IRQ controller locks to raw spinlocks. MIPS: Octeon: Replace spinlock with raw_spinlocks in dma-octeon.c. MIPS: Octeon: Replace rwlocks in irq_chip handlers with raw_spinlocks. MIPS: Octeon: Convert octeon_irq_msi_lock to raw spinlock. MIPS: Loongson: Remove pointless sample_lock from oprofile code. MIPS: SNI: Convert sni_rm200_i8259A_lock to raw spinlock. MIPS: i8259: Convert IRQ controller lock to raw spinlock. ...
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-octeon.c8
-rw-r--r--arch/mips/mm/cache.c53
-rw-r--r--arch/mips/mm/fault.c27
-rw-r--r--arch/mips/mm/hugetlbpage.c1
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/page.c2
-rw-r--r--arch/mips/mm/pgtable-64.c44
-rw-r--r--arch/mips/mm/tlb-r4k.c84
-rw-r--r--arch/mips/mm/tlbex.c188
-rw-r--r--arch/mips/mm/uasm.c12
-rw-r--r--arch/mips/mm/uasm.h191
11 files changed, 271 insertions, 343 deletions
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index e06f1af760a..0f9c488044d 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -183,6 +183,7 @@ static void __cpuinit probe_octeon(void)
183 183
184 switch (c->cputype) { 184 switch (c->cputype) {
185 case CPU_CAVIUM_OCTEON: 185 case CPU_CAVIUM_OCTEON:
186 case CPU_CAVIUM_OCTEON_PLUS:
186 config1 = read_c0_config1(); 187 config1 = read_c0_config1();
187 c->icache.linesz = 2 << ((config1 >> 19) & 7); 188 c->icache.linesz = 2 << ((config1 >> 19) & 7);
188 c->icache.sets = 64 << ((config1 >> 22) & 7); 189 c->icache.sets = 64 << ((config1 >> 22) & 7);
@@ -192,10 +193,10 @@ static void __cpuinit probe_octeon(void)
192 c->icache.sets * c->icache.ways * c->icache.linesz; 193 c->icache.sets * c->icache.ways * c->icache.linesz;
193 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; 194 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
194 c->dcache.linesz = 128; 195 c->dcache.linesz = 128;
195 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) 196 if (c->cputype == CPU_CAVIUM_OCTEON_PLUS)
196 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
197 else
198 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ 197 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
198 else
199 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
199 c->dcache.ways = 64; 200 c->dcache.ways = 64;
200 dcache_size = 201 dcache_size =
201 c->dcache.sets * c->dcache.ways * c->dcache.linesz; 202 c->dcache.sets * c->dcache.ways * c->dcache.linesz;
@@ -305,4 +306,3 @@ asmlinkage void cache_parity_error_octeon_non_recoverable(void)
305{ 306{
306 cache_parity_error_octeon(1); 307 cache_parity_error_octeon(1);
307} 308}
308
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index e716cafc346..be8627bc5b0 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -137,22 +137,43 @@ EXPORT_SYMBOL_GPL(_page_cachable_default);
137 137
138static inline void setup_protection_map(void) 138static inline void setup_protection_map(void)
139{ 139{
140 protection_map[0] = PAGE_NONE; 140 if (kernel_uses_smartmips_rixi) {
141 protection_map[1] = PAGE_READONLY; 141 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
142 protection_map[2] = PAGE_COPY; 142 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
143 protection_map[3] = PAGE_COPY; 143 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
144 protection_map[4] = PAGE_READONLY; 144 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
145 protection_map[5] = PAGE_READONLY; 145 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
146 protection_map[6] = PAGE_COPY; 146 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
147 protection_map[7] = PAGE_COPY; 147 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
148 protection_map[8] = PAGE_NONE; 148 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
149 protection_map[9] = PAGE_READONLY; 149
150 protection_map[10] = PAGE_SHARED; 150 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
151 protection_map[11] = PAGE_SHARED; 151 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
152 protection_map[12] = PAGE_READONLY; 152 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
153 protection_map[13] = PAGE_READONLY; 153 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
154 protection_map[14] = PAGE_SHARED; 154 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
155 protection_map[15] = PAGE_SHARED; 155 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
156 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
157 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
158
159 } else {
160 protection_map[0] = PAGE_NONE;
161 protection_map[1] = PAGE_READONLY;
162 protection_map[2] = PAGE_COPY;
163 protection_map[3] = PAGE_COPY;
164 protection_map[4] = PAGE_READONLY;
165 protection_map[5] = PAGE_READONLY;
166 protection_map[6] = PAGE_COPY;
167 protection_map[7] = PAGE_COPY;
168 protection_map[8] = PAGE_NONE;
169 protection_map[9] = PAGE_READONLY;
170 protection_map[10] = PAGE_SHARED;
171 protection_map[11] = PAGE_SHARED;
172 protection_map[12] = PAGE_READONLY;
173 protection_map[13] = PAGE_READONLY;
174 protection_map[14] = PAGE_SHARED;
175 protection_map[15] = PAGE_SHARED;
176 }
156} 177}
157 178
158void __cpuinit cpu_cache_init(void) 179void __cpuinit cpu_cache_init(void)
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index e97a7a2fb2c..b78f7d913ca 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -99,8 +99,31 @@ good_area:
99 if (!(vma->vm_flags & VM_WRITE)) 99 if (!(vma->vm_flags & VM_WRITE))
100 goto bad_area; 100 goto bad_area;
101 } else { 101 } else {
102 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 102 if (kernel_uses_smartmips_rixi) {
103 goto bad_area; 103 if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
104#if 0
105 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
106 raw_smp_processor_id(),
107 current->comm, current->pid,
108 field, address, write,
109 field, regs->cp0_epc);
110#endif
111 goto bad_area;
112 }
113 if (!(vma->vm_flags & VM_READ)) {
114#if 0
115 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
116 raw_smp_processor_id(),
117 current->comm, current->pid,
118 field, address, write,
119 field, regs->cp0_epc);
120#endif
121 goto bad_area;
122 }
123 } else {
124 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
125 goto bad_area;
126 }
104 } 127 }
105 128
106 /* 129 /*
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index 8c2834f5919..cd0660c51f2 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -97,4 +97,3 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
97 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); 97 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
98 return page; 98 return page;
99} 99}
100
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 1651942f7fe..f34c26439a3 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -143,7 +143,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
143#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 143#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
144 entrylo = pte.pte_high; 144 entrylo = pte.pte_high;
145#else 145#else
146 entrylo = pte_val(pte) >> 6; 146 entrylo = pte_to_entrylo(pte_val(pte));
147#endif 147#endif
148 148
149 ENTER_CRITICAL(flags); 149 ENTER_CRITICAL(flags);
@@ -477,7 +477,7 @@ unsigned long pgd_current[NR_CPUS];
477 * will officially be retired. 477 * will officially be retired.
478 */ 478 */
479pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER); 479pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
480#ifdef CONFIG_64BIT 480#ifndef __PAGETABLE_PMD_FOLDED
481pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER); 481pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
482#endif 482#endif
483pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); 483pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index f5c73754d66..36272f7d374 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -35,7 +35,7 @@
35#include <asm/sibyte/sb1250_dma.h> 35#include <asm/sibyte/sb1250_dma.h>
36#endif 36#endif
37 37
38#include "uasm.h" 38#include <asm/uasm.h>
39 39
40/* Registers used in the assembled routines. */ 40/* Registers used in the assembled routines. */
41#define ZERO 0 41#define ZERO 0
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 1121019fa45..78eaa4f0b0e 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -15,23 +15,31 @@
15void pgd_init(unsigned long page) 15void pgd_init(unsigned long page)
16{ 16{
17 unsigned long *p, *end; 17 unsigned long *p, *end;
18 unsigned long entry;
19
20#ifdef __PAGETABLE_PMD_FOLDED
21 entry = (unsigned long)invalid_pte_table;
22#else
23 entry = (unsigned long)invalid_pmd_table;
24#endif
18 25
19 p = (unsigned long *) page; 26 p = (unsigned long *) page;
20 end = p + PTRS_PER_PGD; 27 end = p + PTRS_PER_PGD;
21 28
22 while (p < end) { 29 while (p < end) {
23 p[0] = (unsigned long) invalid_pmd_table; 30 p[0] = entry;
24 p[1] = (unsigned long) invalid_pmd_table; 31 p[1] = entry;
25 p[2] = (unsigned long) invalid_pmd_table; 32 p[2] = entry;
26 p[3] = (unsigned long) invalid_pmd_table; 33 p[3] = entry;
27 p[4] = (unsigned long) invalid_pmd_table; 34 p[4] = entry;
28 p[5] = (unsigned long) invalid_pmd_table; 35 p[5] = entry;
29 p[6] = (unsigned long) invalid_pmd_table; 36 p[6] = entry;
30 p[7] = (unsigned long) invalid_pmd_table; 37 p[7] = entry;
31 p += 8; 38 p += 8;
32 } 39 }
33} 40}
34 41
42#ifndef __PAGETABLE_PMD_FOLDED
35void pmd_init(unsigned long addr, unsigned long pagetable) 43void pmd_init(unsigned long addr, unsigned long pagetable)
36{ 44{
37 unsigned long *p, *end; 45 unsigned long *p, *end;
@@ -40,17 +48,18 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
40 end = p + PTRS_PER_PMD; 48 end = p + PTRS_PER_PMD;
41 49
42 while (p < end) { 50 while (p < end) {
43 p[0] = (unsigned long)pagetable; 51 p[0] = pagetable;
44 p[1] = (unsigned long)pagetable; 52 p[1] = pagetable;
45 p[2] = (unsigned long)pagetable; 53 p[2] = pagetable;
46 p[3] = (unsigned long)pagetable; 54 p[3] = pagetable;
47 p[4] = (unsigned long)pagetable; 55 p[4] = pagetable;
48 p[5] = (unsigned long)pagetable; 56 p[5] = pagetable;
49 p[6] = (unsigned long)pagetable; 57 p[6] = pagetable;
50 p[7] = (unsigned long)pagetable; 58 p[7] = pagetable;
51 p += 8; 59 p += 8;
52 } 60 }
53} 61}
62#endif
54 63
55void __init pagetable_init(void) 64void __init pagetable_init(void)
56{ 65{
@@ -59,8 +68,9 @@ void __init pagetable_init(void)
59 68
60 /* Initialize the entire pgd. */ 69 /* Initialize the entire pgd. */
61 pgd_init((unsigned long)swapper_pg_dir); 70 pgd_init((unsigned long)swapper_pg_dir);
71#ifndef __PAGETABLE_PMD_FOLDED
62 pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); 72 pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
63 73#endif
64 pgd_base = swapper_pg_dir; 74 pgd_base = swapper_pg_dir;
65 /* 75 /*
66 * Fixed mappings: 76 * Fixed mappings:
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index d73428b18b0..c618eed933a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -303,7 +303,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
303 unsigned long lo; 303 unsigned long lo;
304 write_c0_pagemask(PM_HUGE_MASK); 304 write_c0_pagemask(PM_HUGE_MASK);
305 ptep = (pte_t *)pmdp; 305 ptep = (pte_t *)pmdp;
306 lo = pte_val(*ptep) >> 6; 306 lo = pte_to_entrylo(pte_val(*ptep));
307 write_c0_entrylo0(lo); 307 write_c0_entrylo0(lo);
308 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); 308 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
309 309
@@ -323,8 +323,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
323 ptep++; 323 ptep++;
324 write_c0_entrylo1(ptep->pte_high); 324 write_c0_entrylo1(ptep->pte_high);
325#else 325#else
326 write_c0_entrylo0(pte_val(*ptep++) >> 6); 326 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
327 write_c0_entrylo1(pte_val(*ptep) >> 6); 327 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
328#endif 328#endif
329 mtc0_tlbw_hazard(); 329 mtc0_tlbw_hazard();
330 if (idx < 0) 330 if (idx < 0)
@@ -337,40 +337,6 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
337 EXIT_CRITICAL(flags); 337 EXIT_CRITICAL(flags);
338} 338}
339 339
340#if 0
341static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
342 unsigned long address, pte_t pte)
343{
344 unsigned long flags;
345 unsigned int asid;
346 pgd_t *pgdp;
347 pmd_t *pmdp;
348 pte_t *ptep;
349 int idx;
350
351 ENTER_CRITICAL(flags);
352 address &= (PAGE_MASK << 1);
353 asid = read_c0_entryhi() & ASID_MASK;
354 write_c0_entryhi(address | asid);
355 pgdp = pgd_offset(vma->vm_mm, address);
356 mtc0_tlbw_hazard();
357 tlb_probe();
358 tlb_probe_hazard();
359 pmdp = pmd_offset(pgdp, address);
360 idx = read_c0_index();
361 ptep = pte_offset_map(pmdp, address);
362 write_c0_entrylo0(pte_val(*ptep++) >> 6);
363 write_c0_entrylo1(pte_val(*ptep) >> 6);
364 mtc0_tlbw_hazard();
365 if (idx < 0)
366 tlb_write_random();
367 else
368 tlb_write_indexed();
369 tlbw_use_hazard();
370 EXIT_CRITICAL(flags);
371}
372#endif
373
374void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 340void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
375 unsigned long entryhi, unsigned long pagemask) 341 unsigned long entryhi, unsigned long pagemask)
376{ 342{
@@ -447,34 +413,6 @@ out:
447 return ret; 413 return ret;
448} 414}
449 415
450static void __cpuinit probe_tlb(unsigned long config)
451{
452 struct cpuinfo_mips *c = &current_cpu_data;
453 unsigned int reg;
454
455 /*
456 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
457 * is not supported, we assume R4k style. Cpu probing already figured
458 * out the number of tlb entries.
459 */
460 if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
461 return;
462#ifdef CONFIG_MIPS_MT_SMTC
463 /*
464 * If TLB is shared in SMTC system, total size already
465 * has been calculated and written into cpu_data tlbsize
466 */
467 if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
468 return;
469#endif /* CONFIG_MIPS_MT_SMTC */
470
471 reg = read_c0_config1();
472 if (!((config >> 7) & 3))
473 panic("No TLB present");
474
475 c->tlbsize = ((reg >> 25) & 0x3f) + 1;
476}
477
478static int __cpuinitdata ntlb; 416static int __cpuinitdata ntlb;
479static int __init set_ntlb(char *str) 417static int __init set_ntlb(char *str)
480{ 418{
@@ -486,8 +424,6 @@ __setup("ntlb=", set_ntlb);
486 424
487void __cpuinit tlb_init(void) 425void __cpuinit tlb_init(void)
488{ 426{
489 unsigned int config = read_c0_config();
490
491 /* 427 /*
492 * You should never change this register: 428 * You should never change this register:
493 * - On R4600 1.7 the tlbp never hits for pages smaller than 429 * - On R4600 1.7 the tlbp never hits for pages smaller than
@@ -495,13 +431,25 @@ void __cpuinit tlb_init(void)
495 * - The entire mm handling assumes the c0_pagemask register to 431 * - The entire mm handling assumes the c0_pagemask register to
496 * be set to fixed-size pages. 432 * be set to fixed-size pages.
497 */ 433 */
498 probe_tlb(config);
499 write_c0_pagemask(PM_DEFAULT_MASK); 434 write_c0_pagemask(PM_DEFAULT_MASK);
500 write_c0_wired(0); 435 write_c0_wired(0);
501 if (current_cpu_type() == CPU_R10000 || 436 if (current_cpu_type() == CPU_R10000 ||
502 current_cpu_type() == CPU_R12000 || 437 current_cpu_type() == CPU_R12000 ||
503 current_cpu_type() == CPU_R14000) 438 current_cpu_type() == CPU_R14000)
504 write_c0_framemask(0); 439 write_c0_framemask(0);
440
441 if (kernel_uses_smartmips_rixi) {
442 /*
443 * Enable the no read, no exec bits, and enable large virtual
444 * address.
445 */
446 u32 pg = PG_RIE | PG_XIE;
447#ifdef CONFIG_64BIT
448 pg |= PG_ELPA;
449#endif
450 write_c0_pagegrain(pg);
451 }
452
505 temp_tlb_entry = current_cpu_data.tlbsize - 1; 453 temp_tlb_entry = current_cpu_data.tlbsize - 1;
506 454
507 /* From this point on the ARC firmware is dead. */ 455 /* From this point on the ARC firmware is dead. */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index badcf5e8d69..0de0e4127d6 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,8 +29,7 @@
29 29
30#include <asm/mmu_context.h> 30#include <asm/mmu_context.h>
31#include <asm/war.h> 31#include <asm/war.h>
32 32#include <asm/uasm.h>
33#include "uasm.h"
34 33
35static inline int r45k_bvahwbug(void) 34static inline int r45k_bvahwbug(void)
36{ 35{
@@ -77,6 +76,8 @@ enum label_id {
77 label_vmalloc_done, 76 label_vmalloc_done,
78 label_tlbw_hazard, 77 label_tlbw_hazard,
79 label_split, 78 label_split,
79 label_tlbl_goaround1,
80 label_tlbl_goaround2,
80 label_nopage_tlbl, 81 label_nopage_tlbl,
81 label_nopage_tlbs, 82 label_nopage_tlbs,
82 label_nopage_tlbm, 83 label_nopage_tlbm,
@@ -93,6 +94,8 @@ UASM_L_LA(_vmalloc)
93UASM_L_LA(_vmalloc_done) 94UASM_L_LA(_vmalloc_done)
94UASM_L_LA(_tlbw_hazard) 95UASM_L_LA(_tlbw_hazard)
95UASM_L_LA(_split) 96UASM_L_LA(_split)
97UASM_L_LA(_tlbl_goaround1)
98UASM_L_LA(_tlbl_goaround2)
96UASM_L_LA(_nopage_tlbl) 99UASM_L_LA(_nopage_tlbl)
97UASM_L_LA(_nopage_tlbs) 100UASM_L_LA(_nopage_tlbs)
98UASM_L_LA(_nopage_tlbm) 101UASM_L_LA(_nopage_tlbm)
@@ -397,36 +400,60 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
397 } 400 }
398} 401}
399 402
400#ifdef CONFIG_HUGETLB_PAGE 403static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
401static __cpuinit void build_huge_tlb_write_entry(u32 **p, 404 unsigned int reg)
402 struct uasm_label **l,
403 struct uasm_reloc **r,
404 unsigned int tmp,
405 enum tlb_write_entry wmode)
406{ 405{
407 /* Set huge page tlb entry size */ 406 if (kernel_uses_smartmips_rixi) {
408 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 407 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
409 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); 408 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
410 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 409 } else {
410#ifdef CONFIG_64BIT_PHYS_ADDR
411 uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL));
412#else
413 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
414#endif
415 }
416}
411 417
412 build_tlb_write_entry(p, l, r, wmode); 418#ifdef CONFIG_HUGETLB_PAGE
413 419
420static __cpuinit void build_restore_pagemask(u32 **p,
421 struct uasm_reloc **r,
422 unsigned int tmp,
423 enum label_id lid)
424{
414 /* Reset default page size */ 425 /* Reset default page size */
415 if (PM_DEFAULT_MASK >> 16) { 426 if (PM_DEFAULT_MASK >> 16) {
416 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 427 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
417 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 428 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
418 uasm_il_b(p, r, label_leave); 429 uasm_il_b(p, r, lid);
419 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 430 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
420 } else if (PM_DEFAULT_MASK) { 431 } else if (PM_DEFAULT_MASK) {
421 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 432 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
422 uasm_il_b(p, r, label_leave); 433 uasm_il_b(p, r, lid);
423 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 434 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
424 } else { 435 } else {
425 uasm_il_b(p, r, label_leave); 436 uasm_il_b(p, r, lid);
426 uasm_i_mtc0(p, 0, C0_PAGEMASK); 437 uasm_i_mtc0(p, 0, C0_PAGEMASK);
427 } 438 }
428} 439}
429 440
441static __cpuinit void build_huge_tlb_write_entry(u32 **p,
442 struct uasm_label **l,
443 struct uasm_reloc **r,
444 unsigned int tmp,
445 enum tlb_write_entry wmode)
446{
447 /* Set huge page tlb entry size */
448 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
449 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
450 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
451
452 build_tlb_write_entry(p, l, r, wmode);
453
454 build_restore_pagemask(p, r, tmp, label_leave);
455}
456
430/* 457/*
431 * Check if Huge PTE is present, if so then jump to LABEL. 458 * Check if Huge PTE is present, if so then jump to LABEL.
432 */ 459 */
@@ -460,15 +487,15 @@ static __cpuinit void build_huge_update_entries(u32 **p,
460 if (!small_sequence) 487 if (!small_sequence)
461 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); 488 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
462 489
463 UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ 490 build_convert_pte_to_entrylo(p, pte);
464 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */ 491 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
465 /* convert to entrylo1 */ 492 /* convert to entrylo1 */
466 if (small_sequence) 493 if (small_sequence)
467 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); 494 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
468 else 495 else
469 UASM_i_ADDU(p, pte, pte, tmp); 496 UASM_i_ADDU(p, pte, pte, tmp);
470 497
471 uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */ 498 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
472} 499}
473 500
474static __cpuinit void build_huge_handler_tail(u32 **p, 501static __cpuinit void build_huge_handler_tail(u32 **p,
@@ -549,11 +576,13 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
549 576
550 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 577 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
551 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 578 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
579#ifndef __PAGETABLE_PMD_FOLDED
552 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 580 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
553 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 581 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
554 uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 582 uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
555 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 583 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
556 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 584 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
585#endif
557} 586}
558 587
559/* 588/*
@@ -684,35 +713,53 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
684 if (cpu_has_64bits) { 713 if (cpu_has_64bits) {
685 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ 714 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
686 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 715 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
687 uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ 716 if (kernel_uses_smartmips_rixi) {
688 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 717 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
689 uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ 718 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
690 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 719 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
720 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
721 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
722 } else {
723 uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
724 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
725 uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
726 }
727 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
691 } else { 728 } else {
692 int pte_off_even = sizeof(pte_t) / 2; 729 int pte_off_even = sizeof(pte_t) / 2;
693 int pte_off_odd = pte_off_even + sizeof(pte_t); 730 int pte_off_odd = pte_off_even + sizeof(pte_t);
694 731
695 /* The pte entries are pre-shifted */ 732 /* The pte entries are pre-shifted */
696 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 733 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
697 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 734 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
698 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 735 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
699 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 736 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
700 } 737 }
701#else 738#else
702 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ 739 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
703 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 740 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
704 if (r45k_bvahwbug()) 741 if (r45k_bvahwbug())
705 build_tlb_probe_entry(p); 742 build_tlb_probe_entry(p);
706 UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ 743 if (kernel_uses_smartmips_rixi) {
707 if (r4k_250MHZhwbug()) 744 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
708 uasm_i_mtc0(p, 0, C0_ENTRYLO0); 745 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
709 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 746 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
710 UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ 747 if (r4k_250MHZhwbug())
711 if (r45k_bvahwbug()) 748 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
712 uasm_i_mfc0(p, tmp, C0_INDEX); 749 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
750 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
751 } else {
752 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
753 if (r4k_250MHZhwbug())
754 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
755 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
756 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
757 if (r45k_bvahwbug())
758 uasm_i_mfc0(p, tmp, C0_INDEX);
759 }
713 if (r4k_250MHZhwbug()) 760 if (r4k_250MHZhwbug())
714 uasm_i_mtc0(p, 0, C0_ENTRYLO1); 761 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
715 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 762 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
716#endif 763#endif
717} 764}
718 765
@@ -985,9 +1032,14 @@ static void __cpuinit
985build_pte_present(u32 **p, struct uasm_reloc **r, 1032build_pte_present(u32 **p, struct uasm_reloc **r,
986 unsigned int pte, unsigned int ptr, enum label_id lid) 1033 unsigned int pte, unsigned int ptr, enum label_id lid)
987{ 1034{
988 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1035 if (kernel_uses_smartmips_rixi) {
989 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1036 uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
990 uasm_il_bnez(p, r, pte, lid); 1037 uasm_il_beqz(p, r, pte, lid);
1038 } else {
1039 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1040 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1041 uasm_il_bnez(p, r, pte, lid);
1042 }
991 iPTE_LW(p, pte, ptr); 1043 iPTE_LW(p, pte, ptr);
992} 1044}
993 1045
@@ -1272,6 +1324,34 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1272 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1324 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1273 if (m4kc_tlbp_war()) 1325 if (m4kc_tlbp_war())
1274 build_tlb_probe_entry(&p); 1326 build_tlb_probe_entry(&p);
1327
1328 if (kernel_uses_smartmips_rixi) {
1329 /*
1330 * If the page is not _PAGE_VALID, RI or XI could not
1331 * have triggered it. Skip the expensive test..
1332 */
1333 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1334 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
1335 uasm_i_nop(&p);
1336
1337 uasm_i_tlbr(&p);
1338 /* Examine entrylo 0 or 1 based on ptr. */
1339 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1340 uasm_i_beqz(&p, K0, 8);
1341
1342 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1343 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1344 /*
1345 * If the entryLo (now in K0) is valid (bit 1), RI or
1346 * XI must have triggered it.
1347 */
1348 uasm_i_andi(&p, K0, K0, 2);
1349 uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
1350
1351 uasm_l_tlbl_goaround1(&l, p);
1352 /* Reload the PTE value */
1353 iPTE_LW(&p, K0, K1);
1354 }
1275 build_make_valid(&p, &r, K0, K1); 1355 build_make_valid(&p, &r, K0, K1);
1276 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1356 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1277 1357
@@ -1284,6 +1364,40 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1284 iPTE_LW(&p, K0, K1); 1364 iPTE_LW(&p, K0, K1);
1285 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1365 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1286 build_tlb_probe_entry(&p); 1366 build_tlb_probe_entry(&p);
1367
1368 if (kernel_uses_smartmips_rixi) {
1369 /*
1370 * If the page is not _PAGE_VALID, RI or XI could not
1371 * have triggered it. Skip the expensive test..
1372 */
1373 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1374 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1375 uasm_i_nop(&p);
1376
1377 uasm_i_tlbr(&p);
1378 /* Examine entrylo 0 or 1 based on ptr. */
1379 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1380 uasm_i_beqz(&p, K0, 8);
1381
1382 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1383 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1384 /*
1385 * If the entryLo (now in K0) is valid (bit 1), RI or
1386 * XI must have triggered it.
1387 */
1388 uasm_i_andi(&p, K0, K0, 2);
1389 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1390 /* Reload the PTE value */
1391 iPTE_LW(&p, K0, K1);
1392
1393 /*
1394 * We clobbered C0_PAGEMASK, restore it. On the other branch
1395 * it is restored in build_huge_tlb_write_entry.
1396 */
1397 build_restore_pagemask(&p, &r, K0, label_nopage_tlbl);
1398
1399 uasm_l_tlbl_goaround2(&l, p);
1400 }
1287 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); 1401 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
1288 build_huge_handler_tail(&p, &r, &l, K0, K1); 1402 build_huge_handler_tail(&p, &r, &l, K0, K1);
1289#endif 1403#endif
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 0a165c5179a..1581e985246 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -19,8 +19,7 @@
19#include <asm/inst.h> 19#include <asm/inst.h>
20#include <asm/elf.h> 20#include <asm/elf.h>
21#include <asm/bugs.h> 21#include <asm/bugs.h>
22 22#include <asm/uasm.h>
23#include "uasm.h"
24 23
25enum fields { 24enum fields {
26 RS = 0x001, 25 RS = 0x001,
@@ -63,8 +62,9 @@ enum opcode {
63 insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal, 62 insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal,
64 insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, 63 insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
65 insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, 64 insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
66 insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, 65 insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw,
67 insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, insn_dins 66 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
67 insn_dins
68}; 68};
69 69
70struct insn { 70struct insn {
@@ -126,9 +126,11 @@ static struct insn insn_table[] __cpuinitdata = {
126 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 126 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
127 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 127 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
128 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, 128 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
129 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
129 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, 130 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
130 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 131 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
131 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, 132 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
133 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
132 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, 134 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
133 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, 135 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
134 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, 136 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
@@ -379,9 +381,11 @@ I_u2s3u1(_sd)
379I_u2u1u3(_sll) 381I_u2u1u3(_sll)
380I_u2u1u3(_sra) 382I_u2u1u3(_sra)
381I_u2u1u3(_srl) 383I_u2u1u3(_srl)
384I_u2u1u3(_rotr)
382I_u3u1u2(_subu) 385I_u3u1u2(_subu)
383I_u2s3u1(_sw) 386I_u2s3u1(_sw)
384I_0(_tlbp) 387I_0(_tlbp)
388I_0(_tlbr)
385I_0(_tlbwi) 389I_0(_tlbwi)
386I_0(_tlbwr) 390I_0(_tlbwr)
387I_u3u1u2(_xor) 391I_u3u1u2(_xor)
diff --git a/arch/mips/mm/uasm.h b/arch/mips/mm/uasm.h
deleted file mode 100644
index 3d153edaa51..00000000000
--- a/arch/mips/mm/uasm.h
+++ /dev/null
@@ -1,191 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
7 * Copyright (C) 2005 Maciej W. Rozycki
8 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
9 */
10
11#include <linux/types.h>
12
13#define Ip_u1u2u3(op) \
14void __cpuinit \
15uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
16
17#define Ip_u2u1u3(op) \
18void __cpuinit \
19uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
20
21#define Ip_u3u1u2(op) \
22void __cpuinit \
23uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
24
25#define Ip_u1u2s3(op) \
26void __cpuinit \
27uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
28
29#define Ip_u2s3u1(op) \
30void __cpuinit \
31uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
32
33#define Ip_u2u1s3(op) \
34void __cpuinit \
35uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
36
37#define Ip_u2u1msbu3(op) \
38void __cpuinit \
39uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
40 unsigned int d)
41
42#define Ip_u1u2(op) \
43void __cpuinit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
44
45#define Ip_u1s2(op) \
46void __cpuinit uasm_i##op(u32 **buf, unsigned int a, signed int b)
47
48#define Ip_u1(op) void __cpuinit uasm_i##op(u32 **buf, unsigned int a)
49
50#define Ip_0(op) void __cpuinit uasm_i##op(u32 **buf)
51
52Ip_u2u1s3(_addiu);
53Ip_u3u1u2(_addu);
54Ip_u2u1u3(_andi);
55Ip_u3u1u2(_and);
56Ip_u1u2s3(_beq);
57Ip_u1u2s3(_beql);
58Ip_u1s2(_bgez);
59Ip_u1s2(_bgezl);
60Ip_u1s2(_bltz);
61Ip_u1s2(_bltzl);
62Ip_u1u2s3(_bne);
63Ip_u2s3u1(_cache);
64Ip_u1u2u3(_dmfc0);
65Ip_u1u2u3(_dmtc0);
66Ip_u2u1s3(_daddiu);
67Ip_u3u1u2(_daddu);
68Ip_u2u1u3(_dsll);
69Ip_u2u1u3(_dsll32);
70Ip_u2u1u3(_dsra);
71Ip_u2u1u3(_dsrl);
72Ip_u2u1u3(_dsrl32);
73Ip_u2u1u3(_drotr);
74Ip_u3u1u2(_dsubu);
75Ip_0(_eret);
76Ip_u1(_j);
77Ip_u1(_jal);
78Ip_u1(_jr);
79Ip_u2s3u1(_ld);
80Ip_u2s3u1(_ll);
81Ip_u2s3u1(_lld);
82Ip_u1s2(_lui);
83Ip_u2s3u1(_lw);
84Ip_u1u2u3(_mfc0);
85Ip_u1u2u3(_mtc0);
86Ip_u2u1u3(_ori);
87Ip_u2s3u1(_pref);
88Ip_0(_rfe);
89Ip_u2s3u1(_sc);
90Ip_u2s3u1(_scd);
91Ip_u2s3u1(_sd);
92Ip_u2u1u3(_sll);
93Ip_u2u1u3(_sra);
94Ip_u2u1u3(_srl);
95Ip_u3u1u2(_subu);
96Ip_u2s3u1(_sw);
97Ip_0(_tlbp);
98Ip_0(_tlbwi);
99Ip_0(_tlbwr);
100Ip_u3u1u2(_xor);
101Ip_u2u1u3(_xori);
102Ip_u2u1msbu3(_dins);
103
104/* Handle labels. */
105struct uasm_label {
106 u32 *addr;
107 int lab;
108};
109
110void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
111#ifdef CONFIG_64BIT
112int uasm_in_compat_space_p(long addr);
113#endif
114int uasm_rel_hi(long val);
115int uasm_rel_lo(long val);
116void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
117void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
118
119#define UASM_L_LA(lb) \
120static inline void __cpuinit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
121{ \
122 uasm_build_label(lab, addr, label##lb); \
123}
124
125/* convenience macros for instructions */
126#ifdef CONFIG_64BIT
127# define UASM_i_LW(buf, rs, rt, off) uasm_i_ld(buf, rs, rt, off)
128# define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off)
129# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
130# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
131# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
132# define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
133# define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
134# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val)
135# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd)
136# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd)
137# define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off)
138# define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off)
139#else
140# define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off)
141# define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
142# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
143# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
144# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
145# define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
146# define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
147# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val)
148# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd)
149# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd)
150# define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
151# define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
152#endif
153
154#define uasm_i_b(buf, off) uasm_i_beq(buf, 0, 0, off)
155#define uasm_i_beqz(buf, rs, off) uasm_i_beq(buf, rs, 0, off)
156#define uasm_i_beqzl(buf, rs, off) uasm_i_beql(buf, rs, 0, off)
157#define uasm_i_bnez(buf, rs, off) uasm_i_bne(buf, rs, 0, off)
158#define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off)
159#define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b)
160#define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0)
161#define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1)
162#define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3)
163
164/* Handle relocations. */
165struct uasm_reloc {
166 u32 *addr;
167 unsigned int type;
168 int lab;
169};
170
171/* This is zero so we can use zeroed label arrays. */
172#define UASM_LABEL_INVALID 0
173
174void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
175void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
176void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
177void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
178void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
179 u32 *first, u32 *end, u32 *target);
180int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
181
182/* Convenience functions for labeled branches. */
183void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
184void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
185void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
186void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
187void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
188 unsigned int reg2, int lid);
189void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
190void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
191void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);