aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-10 10:48:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-10 10:48:05 -0400
commitdaf799cca8abbf7f3e253ecf1d41d244070773d7 (patch)
tree6fb27ff60b820ae0eeb906c8a5d8d7f93f89cd8b /arch/mips/mm
parent6019958d146a4f127dae727a930f902c92531e6e (diff)
parentb22d1b6a91ca4260f869e349179ae53f18c664db (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: - More work on DT support for various platforms - Various fixes that were to late to make it straight into 3.9 - Improved platform support, in particular the Netlogic XLR and BCM63xx, and the SEAD3 and Malta eval boards. - Support for several Ralink SOC families. - Complete support for the microMIPS ASE which basically reencodes the existing MIPS32/MIPS64 ISA to use non-constant size instructions. - Some fallout from LTO work which remove old cruft and will generally make the MIPS kernel easier to maintain and resistant to compiler optimization, even in absence of LTO. - KVM support. While MIPS has announced hardware virtualization extensions this KVM extension uses trap and emulate mode for virtualization of MIPS32. More KVM work to add support for VZ hardware virtualizaiton extensions and MIPS64 will probably already be merged for 3.11. Most of this has been sitting in -next for a long time. All defconfigs have been build or run time tested except three for which fixes are being sent by other maintainers. Semantic conflict with kvm updates done as per Ralf * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (118 commits) MIPS: Add new GIC clockevent driver. MIPS: Formatting clean-ups for clocksources. MIPS: Refactor GIC clocksource code. MIPS: Move 'gic_frequency' to common location. MIPS: Move 'gic_present' to common location. MIPS: MIPS16e: Add unaligned access support. MIPS: MIPS16e: Support handling of delay slots. MIPS: MIPS16e: Add instruction formats. MIPS: microMIPS: Optimise 'strnlen' core library function. MIPS: microMIPS: Optimise 'strlen' core library function. MIPS: microMIPS: Optimise 'strncpy' core library function. MIPS: microMIPS: Optimise 'memset' core library function. MIPS: microMIPS: Add configuration option for microMIPS kernel. MIPS: microMIPS: Disable LL/SC and fix linker bug. MIPS: microMIPS: Add vdso support. MIPS: microMIPS: Add unaligned access support. MIPS: microMIPS: Support handling of delay slots. MIPS: microMIPS: Add support for exception handling. MIPS: microMIPS: Floating point support. MIPS: microMIPS: Fix macro naming in micro-assembler. ...
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile4
-rw-r--r--arch/mips/mm/c-r4k.c30
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/dma-default.c25
-rw-r--r--arch/mips/mm/page.c10
-rw-r--r--arch/mips/mm/tlb-r3k.c20
-rw-r--r--arch/mips/mm/tlb-r4k.c4
-rw-r--r--arch/mips/mm/tlb-r8k.c2
-rw-r--r--arch/mips/mm/tlbex.c132
-rw-r--r--arch/mips/mm/uasm-micromips.c221
-rw-r--r--arch/mips/mm/uasm-mips.c205
-rw-r--r--arch/mips/mm/uasm.c326
12 files changed, 686 insertions, 294 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 1dcec30ad1c4..e87aae1f2e80 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-y += cache.o dma-default.o extable.o fault.o \ 5obj-y += cache.o dma-default.o extable.o fault.o \
6 gup.o init.o mmap.o page.o page-funcs.o \ 6 gup.o init.o mmap.o page.o page-funcs.o \
7 tlbex.o tlbex-fault.o uasm.o 7 tlbex.o tlbex-fault.o uasm-mips.o
8 8
9obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o 9obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
10obj-$(CONFIG_64BIT) += pgtable-64.o 10obj-$(CONFIG_64BIT) += pgtable-64.o
@@ -22,3 +22,5 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
22obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o 22obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
23obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o 23obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
24obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o 24obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
25
26obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 2078915eacb9..21813beec7a5 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -33,6 +33,7 @@
33#include <asm/war.h> 33#include <asm/war.h>
34#include <asm/cacheflush.h> /* for run_uncached() */ 34#include <asm/cacheflush.h> /* for run_uncached() */
35#include <asm/traps.h> 35#include <asm/traps.h>
36#include <asm/dma-coherence.h>
36 37
37/* 38/*
38 * Special Variant of smp_call_function for use by cache functions: 39 * Special Variant of smp_call_function for use by cache functions:
@@ -136,7 +137,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
136 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; 137 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
137} 138}
138 139
139static void (* r4k_blast_dcache)(void); 140void (* r4k_blast_dcache)(void);
141EXPORT_SYMBOL(r4k_blast_dcache);
140 142
141static void __cpuinit r4k_blast_dcache_setup(void) 143static void __cpuinit r4k_blast_dcache_setup(void)
142{ 144{
@@ -264,7 +266,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
264 r4k_blast_icache_page_indexed = blast_icache64_page_indexed; 266 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
265} 267}
266 268
267static void (* r4k_blast_icache)(void); 269void (* r4k_blast_icache)(void);
270EXPORT_SYMBOL(r4k_blast_icache);
268 271
269static void __cpuinit r4k_blast_icache_setup(void) 272static void __cpuinit r4k_blast_icache_setup(void)
270{ 273{
@@ -1377,20 +1380,6 @@ static void __cpuinit coherency_setup(void)
1377 } 1380 }
1378} 1381}
1379 1382
1380#if defined(CONFIG_DMA_NONCOHERENT)
1381
1382static int __cpuinitdata coherentio;
1383
1384static int __init setcoherentio(char *str)
1385{
1386 coherentio = 1;
1387
1388 return 0;
1389}
1390
1391early_param("coherentio", setcoherentio);
1392#endif
1393
1394static void __cpuinit r4k_cache_error_setup(void) 1383static void __cpuinit r4k_cache_error_setup(void)
1395{ 1384{
1396 extern char __weak except_vec2_generic; 1385 extern char __weak except_vec2_generic;
@@ -1472,9 +1461,14 @@ void __cpuinit r4k_cache_init(void)
1472 1461
1473 build_clear_page(); 1462 build_clear_page();
1474 build_copy_page(); 1463 build_copy_page();
1475#if !defined(CONFIG_MIPS_CMP) 1464
1465 /*
1466 * We want to run CMP kernels on core with and without coherent
1467 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1468 * or not to flush caches.
1469 */
1476 local_r4k___flush_cache_all(NULL); 1470 local_r4k___flush_cache_all(NULL);
1477#endif 1471
1478 coherency_setup(); 1472 coherency_setup();
1479 board_cache_error_setup = r4k_cache_error_setup; 1473 board_cache_error_setup = r4k_cache_error_setup;
1480} 1474}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 07cec4407b0c..5aeb3eb0b72f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -48,6 +48,7 @@ void (*flush_icache_all)(void);
48 48
49EXPORT_SYMBOL_GPL(local_flush_data_cache_page); 49EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
50EXPORT_SYMBOL(flush_data_cache_page); 50EXPORT_SYMBOL(flush_data_cache_page);
51EXPORT_SYMBOL(flush_icache_all);
51 52
52#ifdef CONFIG_DMA_NONCOHERENT 53#ifdef CONFIG_DMA_NONCOHERENT
53 54
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f9ef83829a52..caf92ecb37d6 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -22,6 +22,26 @@
22 22
23#include <dma-coherence.h> 23#include <dma-coherence.h>
24 24
25int coherentio = 0; /* User defined DMA coherency from command line. */
26EXPORT_SYMBOL_GPL(coherentio);
27int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
28
29static int __init setcoherentio(char *str)
30{
31 coherentio = 1;
32 pr_info("Hardware DMA cache coherency (command line)\n");
33 return 0;
34}
35early_param("coherentio", setcoherentio);
36
37static int __init setnocoherentio(char *str)
38{
39 coherentio = 0;
40 pr_info("Software DMA cache coherency (command line)\n");
41 return 0;
42}
43early_param("nocoherentio", setnocoherentio);
44
25static inline struct page *dma_addr_to_page(struct device *dev, 45static inline struct page *dma_addr_to_page(struct device *dev,
26 dma_addr_t dma_addr) 46 dma_addr_t dma_addr)
27{ 47{
@@ -115,7 +135,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
115 135
116 if (!plat_device_is_coherent(dev)) { 136 if (!plat_device_is_coherent(dev)) {
117 dma_cache_wback_inv((unsigned long) ret, size); 137 dma_cache_wback_inv((unsigned long) ret, size);
118 ret = UNCAC_ADDR(ret); 138 if (!hw_coherentio)
139 ret = UNCAC_ADDR(ret);
119 } 140 }
120 } 141 }
121 142
@@ -142,7 +163,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
142 163
143 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); 164 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
144 165
145 if (!plat_device_is_coherent(dev)) 166 if (!plat_device_is_coherent(dev) && !hw_coherentio)
146 addr = CAC_ADDR(addr); 167 addr = CAC_ADDR(addr);
147 168
148 free_pages(addr, get_order(size)); 169 free_pages(addr, get_order(size));
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index a29fba55b53e..4eb8dcfaf1ce 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -247,6 +247,11 @@ void __cpuinit build_clear_page(void)
247 struct uasm_label *l = labels; 247 struct uasm_label *l = labels;
248 struct uasm_reloc *r = relocs; 248 struct uasm_reloc *r = relocs;
249 int i; 249 int i;
250 static atomic_t run_once = ATOMIC_INIT(0);
251
252 if (atomic_xchg(&run_once, 1)) {
253 return;
254 }
250 255
251 memset(labels, 0, sizeof(labels)); 256 memset(labels, 0, sizeof(labels));
252 memset(relocs, 0, sizeof(relocs)); 257 memset(relocs, 0, sizeof(relocs));
@@ -389,6 +394,11 @@ void __cpuinit build_copy_page(void)
389 struct uasm_label *l = labels; 394 struct uasm_label *l = labels;
390 struct uasm_reloc *r = relocs; 395 struct uasm_reloc *r = relocs;
391 int i; 396 int i;
397 static atomic_t run_once = ATOMIC_INIT(0);
398
399 if (atomic_xchg(&run_once, 1)) {
400 return;
401 }
392 402
393 memset(labels, 0, sizeof(labels)); 403 memset(labels, 0, sizeof(labels));
394 memset(relocs, 0, sizeof(relocs)); 404 memset(relocs, 0, sizeof(relocs));
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed0827f..4a13c150f31b 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
51#endif 51#endif
52 52
53 local_irq_save(flags); 53 local_irq_save(flags);
54 old_ctx = read_c0_entryhi() & ASID_MASK; 54 old_ctx = ASID_MASK(read_c0_entryhi());
55 write_c0_entrylo0(0); 55 write_c0_entrylo0(0);
56 entry = r3k_have_wired_reg ? read_c0_wired() : 8; 56 entry = r3k_have_wired_reg ? read_c0_wired() : 8;
57 for (; entry < current_cpu_data.tlbsize; entry++) { 57 for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
87 87
88#ifdef DEBUG_TLB 88#ifdef DEBUG_TLB
89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", 89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
90 cpu_context(cpu, mm) & ASID_MASK, start, end); 90 ASID_MASK(cpu_context(cpu, mm)), start, end);
91#endif 91#endif
92 local_irq_save(flags); 92 local_irq_save(flags);
93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
94 if (size <= current_cpu_data.tlbsize) { 94 if (size <= current_cpu_data.tlbsize) {
95 int oldpid = read_c0_entryhi() & ASID_MASK; 95 int oldpid = ASID_MASK(read_c0_entryhi());
96 int newpid = cpu_context(cpu, mm) & ASID_MASK; 96 int newpid = ASID_MASK(cpu_context(cpu, mm));
97 97
98 start &= PAGE_MASK; 98 start &= PAGE_MASK;
99 end += PAGE_SIZE - 1; 99 end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
166#ifdef DEBUG_TLB 166#ifdef DEBUG_TLB
167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); 167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
168#endif 168#endif
169 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; 169 newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
170 page &= PAGE_MASK; 170 page &= PAGE_MASK;
171 local_irq_save(flags); 171 local_irq_save(flags);
172 oldpid = read_c0_entryhi() & ASID_MASK; 172 oldpid = ASID_MASK(read_c0_entryhi());
173 write_c0_entryhi(page | newpid); 173 write_c0_entryhi(page | newpid);
174 BARRIER; 174 BARRIER;
175 tlb_probe(); 175 tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
197 if (current->active_mm != vma->vm_mm) 197 if (current->active_mm != vma->vm_mm)
198 return; 198 return;
199 199
200 pid = read_c0_entryhi() & ASID_MASK; 200 pid = ASID_MASK(read_c0_entryhi());
201 201
202#ifdef DEBUG_TLB 202#ifdef DEBUG_TLB
203 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { 203 if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", 204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
205 (cpu_context(cpu, vma->vm_mm)), pid); 205 (cpu_context(cpu, vma->vm_mm)), pid);
206 } 206 }
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
241 241
242 local_irq_save(flags); 242 local_irq_save(flags);
243 /* Save old context and create impossible VPN2 value */ 243 /* Save old context and create impossible VPN2 value */
244 old_ctx = read_c0_entryhi() & ASID_MASK; 244 old_ctx = ASID_MASK(read_c0_entryhi());
245 old_pagemask = read_c0_pagemask(); 245 old_pagemask = read_c0_pagemask();
246 w = read_c0_wired(); 246 w = read_c0_wired();
247 write_c0_wired(w + 1); 247 write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
264#endif 264#endif
265 265
266 local_irq_save(flags); 266 local_irq_save(flags);
267 old_ctx = read_c0_entryhi() & ASID_MASK; 267 old_ctx = ASID_MASK(read_c0_entryhi());
268 write_c0_entrylo0(entrylo0); 268 write_c0_entrylo0(entrylo0);
269 write_c0_entryhi(entryhi); 269 write_c0_entryhi(entryhi);
270 write_c0_index(wired); 270 write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 493131c81a29..09653b290d53 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -13,6 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/hugetlb.h> 15#include <linux/hugetlb.h>
16#include <linux/module.h>
16 17
17#include <asm/cpu.h> 18#include <asm/cpu.h>
18#include <asm/bootinfo.h> 19#include <asm/bootinfo.h>
@@ -94,6 +95,7 @@ void local_flush_tlb_all(void)
94 FLUSH_ITLB; 95 FLUSH_ITLB;
95 EXIT_CRITICAL(flags); 96 EXIT_CRITICAL(flags);
96} 97}
98EXPORT_SYMBOL(local_flush_tlb_all);
97 99
98/* All entries common to a mm share an asid. To effectively flush 100/* All entries common to a mm share an asid. To effectively flush
99 these entries, we just bump the asid. */ 101 these entries, we just bump the asid. */
@@ -285,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
285 287
286 ENTER_CRITICAL(flags); 288 ENTER_CRITICAL(flags);
287 289
288 pid = read_c0_entryhi() & ASID_MASK; 290 pid = ASID_MASK(read_c0_entryhi());
289 address &= (PAGE_MASK << 1); 291 address &= (PAGE_MASK << 1);
290 write_c0_entryhi(address | pid); 292 write_c0_entryhi(address | pid);
291 pgdp = pgd_offset(vma->vm_mm, address); 293 pgdp = pgd_offset(vma->vm_mm, address);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499f806a..122f9207f49e 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
195 if (current->active_mm != vma->vm_mm) 195 if (current->active_mm != vma->vm_mm)
196 return; 196 return;
197 197
198 pid = read_c0_entryhi() & ASID_MASK; 198 pid = ASID_MASK(read_c0_entryhi());
199 199
200 local_irq_save(flags); 200 local_irq_save(flags);
201 address &= PAGE_MASK; 201 address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 820e6612d744..4d46d3787576 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,6 +29,7 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/cache.h> 30#include <linux/cache.h>
31 31
32#include <asm/mmu_context.h>
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
34#include <asm/war.h> 35#include <asm/war.h>
@@ -305,6 +306,78 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
305static int check_for_high_segbits __cpuinitdata; 306static int check_for_high_segbits __cpuinitdata;
306#endif 307#endif
307 308
309static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
310 unsigned int i_const)
311{
312 unsigned int **p;
313
314 for (p = start; p < stop; p++) {
315#ifndef CONFIG_CPU_MICROMIPS
316 unsigned int *ip;
317
318 ip = *p;
319 *ip = (*ip & 0xffff0000) | i_const;
320#else
321 unsigned short *ip;
322
323 ip = ((unsigned short *)((unsigned int)*p - 1));
324 if ((*ip & 0xf000) == 0x4000) {
325 *ip &= 0xfff1;
326 *ip |= (i_const << 1);
327 } else if ((*ip & 0xf000) == 0x6000) {
328 *ip &= 0xfff1;
329 *ip |= ((i_const >> 2) << 1);
330 } else {
331 ip++;
332 *ip = i_const;
333 }
334#endif
335 local_flush_icache_range((unsigned long)ip,
336 (unsigned long)ip + sizeof(*ip));
337 }
338}
339
340#define asid_insn_fixup(section, const) \
341do { \
342 extern unsigned int *__start_ ## section; \
343 extern unsigned int *__stop_ ## section; \
344 insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
345} while(0)
346
347/*
348 * Caller is assumed to flush the caches before the first context switch.
349 */
350static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
351 unsigned int version_mask,
352 unsigned int first_version)
353{
354 extern asmlinkage void handle_ri_rdhwr_vivt(void);
355 unsigned long *vivt_exc;
356
357#ifdef CONFIG_CPU_MICROMIPS
358 /*
359 * Worst case optimised microMIPS addiu instructions support
360 * only a 3-bit immediate value.
361 */
362 if(inc > 7)
363 panic("Invalid ASID increment value!");
364#endif
365 asid_insn_fixup(__asid_inc, inc);
366 asid_insn_fixup(__asid_mask, mask);
367 asid_insn_fixup(__asid_version_mask, version_mask);
368 asid_insn_fixup(__asid_first_version, first_version);
369
370 /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
371 vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
372#ifdef CONFIG_CPU_MICROMIPS
373 vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
374#endif
375 vivt_exc++;
376 *vivt_exc = (*vivt_exc & ~mask) | mask;
377
378 current_cpu_data.asid_cache = first_version;
379}
380
308static int check_for_high_segbits __cpuinitdata; 381static int check_for_high_segbits __cpuinitdata;
309 382
310static unsigned int kscratch_used_mask __cpuinitdata; 383static unsigned int kscratch_used_mask __cpuinitdata;
@@ -1458,17 +1531,17 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
1458u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; 1531u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
1459u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 1532u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
1460#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1533#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1461u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned; 1534u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
1462 1535
1463static void __cpuinit build_r4000_setup_pgd(void) 1536static void __cpuinit build_r4000_setup_pgd(void)
1464{ 1537{
1465 const int a0 = 4; 1538 const int a0 = 4;
1466 const int a1 = 5; 1539 const int a1 = 5;
1467 u32 *p = tlbmiss_handler_setup_pgd; 1540 u32 *p = tlbmiss_handler_setup_pgd_array;
1468 struct uasm_label *l = labels; 1541 struct uasm_label *l = labels;
1469 struct uasm_reloc *r = relocs; 1542 struct uasm_reloc *r = relocs;
1470 1543
1471 memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd)); 1544 memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
1472 memset(labels, 0, sizeof(labels)); 1545 memset(labels, 0, sizeof(labels));
1473 memset(relocs, 0, sizeof(relocs)); 1546 memset(relocs, 0, sizeof(relocs));
1474 1547
@@ -1496,15 +1569,15 @@ static void __cpuinit build_r4000_setup_pgd(void)
1496 uasm_i_jr(&p, 31); 1569 uasm_i_jr(&p, 31);
1497 UASM_i_MTC0(&p, a0, 31, pgd_reg); 1570 UASM_i_MTC0(&p, a0, 31, pgd_reg);
1498 } 1571 }
1499 if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd)) 1572 if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
1500 panic("tlbmiss_handler_setup_pgd space exceeded"); 1573 panic("tlbmiss_handler_setup_pgd_array space exceeded");
1501 uasm_resolve_relocs(relocs, labels); 1574 uasm_resolve_relocs(relocs, labels);
1502 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", 1575 pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
1503 (unsigned int)(p - tlbmiss_handler_setup_pgd)); 1576 (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
1504 1577
1505 dump_handler("tlbmiss_handler", 1578 dump_handler("tlbmiss_handler",
1506 tlbmiss_handler_setup_pgd, 1579 tlbmiss_handler_setup_pgd_array,
1507 ARRAY_SIZE(tlbmiss_handler_setup_pgd)); 1580 ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
1508} 1581}
1509#endif 1582#endif
1510 1583
@@ -2030,6 +2103,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
2030 2103
2031 uasm_l_nopage_tlbl(&l, p); 2104 uasm_l_nopage_tlbl(&l, p);
2032 build_restore_work_registers(&p); 2105 build_restore_work_registers(&p);
2106#ifdef CONFIG_CPU_MICROMIPS
2107 if ((unsigned long)tlb_do_page_fault_0 & 1) {
2108 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2109 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2110 uasm_i_jr(&p, K0);
2111 } else
2112#endif
2033 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 2113 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2034 uasm_i_nop(&p); 2114 uasm_i_nop(&p);
2035 2115
@@ -2077,6 +2157,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
2077 2157
2078 uasm_l_nopage_tlbs(&l, p); 2158 uasm_l_nopage_tlbs(&l, p);
2079 build_restore_work_registers(&p); 2159 build_restore_work_registers(&p);
2160#ifdef CONFIG_CPU_MICROMIPS
2161 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2162 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2163 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2164 uasm_i_jr(&p, K0);
2165 } else
2166#endif
2080 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2167 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2081 uasm_i_nop(&p); 2168 uasm_i_nop(&p);
2082 2169
@@ -2125,6 +2212,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
2125 2212
2126 uasm_l_nopage_tlbm(&l, p); 2213 uasm_l_nopage_tlbm(&l, p);
2127 build_restore_work_registers(&p); 2214 build_restore_work_registers(&p);
2215#ifdef CONFIG_CPU_MICROMIPS
2216 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2217 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2218 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2219 uasm_i_jr(&p, K0);
2220 } else
2221#endif
2128 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2222 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2129 uasm_i_nop(&p); 2223 uasm_i_nop(&p);
2130 2224
@@ -2162,8 +2256,12 @@ void __cpuinit build_tlb_refill_handler(void)
2162 case CPU_TX3922: 2256 case CPU_TX3922:
2163 case CPU_TX3927: 2257 case CPU_TX3927:
2164#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2258#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2165 build_r3000_tlb_refill_handler(); 2259 setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
2260 if (cpu_has_local_ebase)
2261 build_r3000_tlb_refill_handler();
2166 if (!run_once) { 2262 if (!run_once) {
2263 if (!cpu_has_local_ebase)
2264 build_r3000_tlb_refill_handler();
2167 build_r3000_tlb_load_handler(); 2265 build_r3000_tlb_load_handler();
2168 build_r3000_tlb_store_handler(); 2266 build_r3000_tlb_store_handler();
2169 build_r3000_tlb_modify_handler(); 2267 build_r3000_tlb_modify_handler();
@@ -2184,6 +2282,11 @@ void __cpuinit build_tlb_refill_handler(void)
2184 break; 2282 break;
2185 2283
2186 default: 2284 default:
2285#ifndef CONFIG_MIPS_MT_SMTC
2286 setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
2287#else
2288 setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
2289#endif
2187 if (!run_once) { 2290 if (!run_once) {
2188 scratch_reg = allocate_kscratch(); 2291 scratch_reg = allocate_kscratch();
2189#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2292#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -2192,9 +2295,12 @@ void __cpuinit build_tlb_refill_handler(void)
2192 build_r4000_tlb_load_handler(); 2295 build_r4000_tlb_load_handler();
2193 build_r4000_tlb_store_handler(); 2296 build_r4000_tlb_store_handler();
2194 build_r4000_tlb_modify_handler(); 2297 build_r4000_tlb_modify_handler();
2298 if (!cpu_has_local_ebase)
2299 build_r4000_tlb_refill_handler();
2195 run_once++; 2300 run_once++;
2196 } 2301 }
2197 build_r4000_tlb_refill_handler(); 2302 if (cpu_has_local_ebase)
2303 build_r4000_tlb_refill_handler();
2198 } 2304 }
2199} 2305}
2200 2306
@@ -2207,7 +2313,7 @@ void __cpuinit flush_tlb_handlers(void)
2207 local_flush_icache_range((unsigned long)handle_tlbm, 2313 local_flush_icache_range((unsigned long)handle_tlbm,
2208 (unsigned long)handle_tlbm + sizeof(handle_tlbm)); 2314 (unsigned long)handle_tlbm + sizeof(handle_tlbm));
2209#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2315#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2210 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, 2316 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
2211 (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm)); 2317 (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
2212#endif 2318#endif
2213} 2319}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
new file mode 100644
index 000000000000..162ee6d62788
--- /dev/null
+++ b/arch/mips/mm/uasm-micromips.c
@@ -0,0 +1,221 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * A small micro-assembler. It is intentionally kept simple, does only
7 * support a subset of instructions, and does not try to hide pipeline
8 * effects like branch delay slots.
9 *
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
13 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
14 */
15
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/init.h>
19
20#include <asm/inst.h>
21#include <asm/elf.h>
22#include <asm/bugs.h>
23#define UASM_ISA _UASM_ISA_MICROMIPS
24#include <asm/uasm.h>
25
26#define RS_MASK 0x1f
27#define RS_SH 16
28#define RT_MASK 0x1f
29#define RT_SH 21
30#define SCIMM_MASK 0x3ff
31#define SCIMM_SH 16
32
33/* This macro sets the non-variable bits of an instruction. */
34#define M(a, b, c, d, e, f) \
35 ((a) << OP_SH \
36 | (b) << RT_SH \
37 | (c) << RS_SH \
38 | (d) << RD_SH \
39 | (e) << RE_SH \
40 | (f) << FUNC_SH)
41
42/* Define these when we are not the ISA the kernel is being compiled with. */
43#ifndef CONFIG_CPU_MICROMIPS
44#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
45#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
46#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
47#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
48#endif
49
50#include "uasm.c"
51
52static struct insn insn_table_MM[] __uasminitdata = {
53 { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
54 { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
55 { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
56 { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
57 { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
58 { insn_beql, 0, 0 },
59 { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
60 { insn_bgezl, 0, 0 },
61 { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
62 { insn_bltzl, 0, 0 },
63 { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
64 { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
65 { insn_daddu, 0, 0 },
66 { insn_daddiu, 0, 0 },
67 { insn_dmfc0, 0, 0 },
68 { insn_dmtc0, 0, 0 },
69 { insn_dsll, 0, 0 },
70 { insn_dsll32, 0, 0 },
71 { insn_dsra, 0, 0 },
72 { insn_dsrl, 0, 0 },
73 { insn_dsrl32, 0, 0 },
74 { insn_drotr, 0, 0 },
75 { insn_drotr32, 0, 0 },
76 { insn_dsubu, 0, 0 },
77 { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
78 { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
79 { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
80 { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
81 { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
82 { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
83 { insn_ld, 0, 0 },
84 { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
85 { insn_lld, 0, 0 },
86 { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
87 { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
88 { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
89 { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
90 { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
91 { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
92 { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
93 { insn_rfe, 0, 0 },
94 { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
95 { insn_scd, 0, 0 },
96 { insn_sd, 0, 0 },
97 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
98 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
99 { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
100 { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
101 { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
102 { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
103 { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
104 { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
105 { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
106 { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
107 { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
108 { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
109 { insn_dins, 0, 0 },
110 { insn_dinsm, 0, 0 },
111 { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
112 { insn_bbit0, 0, 0 },
113 { insn_bbit1, 0, 0 },
114 { insn_lwx, 0, 0 },
115 { insn_ldx, 0, 0 },
116 { insn_invalid, 0, 0 }
117};
118
119#undef M
120
121static inline __uasminit u32 build_bimm(s32 arg)
122{
123 WARN(arg > 0xffff || arg < -0x10000,
124 KERN_WARNING "Micro-assembler field overflow\n");
125
126 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
127
128 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
129}
130
131static inline __uasminit u32 build_jimm(u32 arg)
132{
133
134 WARN(arg & ~((JIMM_MASK << 2) | 1),
135 KERN_WARNING "Micro-assembler field overflow\n");
136
137 return (arg >> 1) & JIMM_MASK;
138}
139
140/*
141 * The order of opcode arguments is implicitly left to right,
142 * starting with RS and ending with FUNC or IMM.
143 */
144static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
145{
146 struct insn *ip = NULL;
147 unsigned int i;
148 va_list ap;
149 u32 op;
150
151 for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
152 if (insn_table_MM[i].opcode == opc) {
153 ip = &insn_table_MM[i];
154 break;
155 }
156
157 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
158 panic("Unsupported Micro-assembler instruction %d", opc);
159
160 op = ip->match;
161 va_start(ap, opc);
162 if (ip->fields & RS) {
163 if (opc == insn_mfc0 || opc == insn_mtc0)
164 op |= build_rt(va_arg(ap, u32));
165 else
166 op |= build_rs(va_arg(ap, u32));
167 }
168 if (ip->fields & RT) {
169 if (opc == insn_mfc0 || opc == insn_mtc0)
170 op |= build_rs(va_arg(ap, u32));
171 else
172 op |= build_rt(va_arg(ap, u32));
173 }
174 if (ip->fields & RD)
175 op |= build_rd(va_arg(ap, u32));
176 if (ip->fields & RE)
177 op |= build_re(va_arg(ap, u32));
178 if (ip->fields & SIMM)
179 op |= build_simm(va_arg(ap, s32));
180 if (ip->fields & UIMM)
181 op |= build_uimm(va_arg(ap, u32));
182 if (ip->fields & BIMM)
183 op |= build_bimm(va_arg(ap, s32));
184 if (ip->fields & JIMM)
185 op |= build_jimm(va_arg(ap, u32));
186 if (ip->fields & FUNC)
187 op |= build_func(va_arg(ap, u32));
188 if (ip->fields & SET)
189 op |= build_set(va_arg(ap, u32));
190 if (ip->fields & SCIMM)
191 op |= build_scimm(va_arg(ap, u32));
192 va_end(ap);
193
194#ifdef CONFIG_CPU_LITTLE_ENDIAN
195 **buf = ((op & 0xffff) << 16) | (op >> 16);
196#else
197 **buf = op;
198#endif
199 (*buf)++;
200}
201
202static inline void __uasminit
203__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
204{
205 long laddr = (long)lab->addr;
206 long raddr = (long)rel->addr;
207
208 switch (rel->type) {
209 case R_MIPS_PC16:
210#ifdef CONFIG_CPU_LITTLE_ENDIAN
211 *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
212#else
213 *rel->addr |= build_bimm(laddr - (raddr + 4));
214#endif
215 break;
216
217 default:
218 panic("Unsupported Micro-assembler relocation %d",
219 rel->type);
220 }
221}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
new file mode 100644
index 000000000000..5fcdd8fe3e83
--- /dev/null
+++ b/arch/mips/mm/uasm-mips.c
@@ -0,0 +1,205 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * A small micro-assembler. It is intentionally kept simple, does only
7 * support a subset of instructions, and does not try to hide pipeline
8 * effects like branch delay slots.
9 *
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
13 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
14 */
15
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/init.h>
19
20#include <asm/inst.h>
21#include <asm/elf.h>
22#include <asm/bugs.h>
23#define UASM_ISA _UASM_ISA_CLASSIC
24#include <asm/uasm.h>
25
26#define RS_MASK 0x1f
27#define RS_SH 21
28#define RT_MASK 0x1f
29#define RT_SH 16
30#define SCIMM_MASK 0xfffff
31#define SCIMM_SH 6
32
33/* This macro sets the non-variable bits of an instruction. */
34#define M(a, b, c, d, e, f) \
35 ((a) << OP_SH \
36 | (b) << RS_SH \
37 | (c) << RT_SH \
38 | (d) << RD_SH \
39 | (e) << RE_SH \
40 | (f) << FUNC_SH)
41
42/* Define these when we are not the ISA the kernel is being compiled with. */
43#ifdef CONFIG_CPU_MICROMIPS
44#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
45#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
46#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
47#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
48#endif
49
50#include "uasm.c"
51
52static struct insn insn_table[] __uasminitdata = {
53 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
54 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
55 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
56 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
57 { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
58 { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
59 { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
60 { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
61 { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
62 { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
63 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
64 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
65 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
66 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
67 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
68 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
69 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
70 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
71 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
72 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
73 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
74 { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
75 { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
76 { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
77 { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
78 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
79 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
80 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
81 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
82 { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
83 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
84 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
85 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
86 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
87 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
88 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
89 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
90 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
92 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
93 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
94 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
95 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
96 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
97 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
98 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
99 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
100 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
101 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
102 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
103 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
104 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
105 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
106 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
107 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
108 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
109 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
110 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
111 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
112 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
113 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
114 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
115 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
116 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
117 { insn_invalid, 0, 0 }
118};
119
120#undef M
121
122static inline __uasminit u32 build_bimm(s32 arg)
123{
124 WARN(arg > 0x1ffff || arg < -0x20000,
125 KERN_WARNING "Micro-assembler field overflow\n");
126
127 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
128
129 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
130}
131
132static inline __uasminit u32 build_jimm(u32 arg)
133{
134 WARN(arg & ~(JIMM_MASK << 2),
135 KERN_WARNING "Micro-assembler field overflow\n");
136
137 return (arg >> 2) & JIMM_MASK;
138}
139
140/*
141 * The order of opcode arguments is implicitly left to right,
142 * starting with RS and ending with FUNC or IMM.
143 */
144static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
145{
146 struct insn *ip = NULL;
147 unsigned int i;
148 va_list ap;
149 u32 op;
150
151 for (i = 0; insn_table[i].opcode != insn_invalid; i++)
152 if (insn_table[i].opcode == opc) {
153 ip = &insn_table[i];
154 break;
155 }
156
157 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
158 panic("Unsupported Micro-assembler instruction %d", opc);
159
160 op = ip->match;
161 va_start(ap, opc);
162 if (ip->fields & RS)
163 op |= build_rs(va_arg(ap, u32));
164 if (ip->fields & RT)
165 op |= build_rt(va_arg(ap, u32));
166 if (ip->fields & RD)
167 op |= build_rd(va_arg(ap, u32));
168 if (ip->fields & RE)
169 op |= build_re(va_arg(ap, u32));
170 if (ip->fields & SIMM)
171 op |= build_simm(va_arg(ap, s32));
172 if (ip->fields & UIMM)
173 op |= build_uimm(va_arg(ap, u32));
174 if (ip->fields & BIMM)
175 op |= build_bimm(va_arg(ap, s32));
176 if (ip->fields & JIMM)
177 op |= build_jimm(va_arg(ap, u32));
178 if (ip->fields & FUNC)
179 op |= build_func(va_arg(ap, u32));
180 if (ip->fields & SET)
181 op |= build_set(va_arg(ap, u32));
182 if (ip->fields & SCIMM)
183 op |= build_scimm(va_arg(ap, u32));
184 va_end(ap);
185
186 **buf = op;
187 (*buf)++;
188}
189
190static inline void __uasminit
191__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
192{
193 long laddr = (long)lab->addr;
194 long raddr = (long)rel->addr;
195
196 switch (rel->type) {
197 case R_MIPS_PC16:
198 *rel->addr |= build_bimm(laddr - (raddr + 4));
199 break;
200
201 default:
202 panic("Unsupported Micro-assembler relocation %d",
203 rel->type);
204 }
205}
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 942ff6c2eba2..7eb5e4355d25 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -10,17 +10,9 @@
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki 11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
13 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
13 */ 14 */
14 15
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/init.h>
18
19#include <asm/inst.h>
20#include <asm/elf.h>
21#include <asm/bugs.h>
22#include <asm/uasm.h>
23
24enum fields { 16enum fields {
25 RS = 0x001, 17 RS = 0x001,
26 RT = 0x002, 18 RT = 0x002,
@@ -37,10 +29,6 @@ enum fields {
37 29
38#define OP_MASK 0x3f 30#define OP_MASK 0x3f
39#define OP_SH 26 31#define OP_SH 26
40#define RS_MASK 0x1f
41#define RS_SH 21
42#define RT_MASK 0x1f
43#define RT_SH 16
44#define RD_MASK 0x1f 32#define RD_MASK 0x1f
45#define RD_SH 11 33#define RD_SH 11
46#define RE_MASK 0x1f 34#define RE_MASK 0x1f
@@ -53,8 +41,6 @@ enum fields {
53#define FUNC_SH 0 41#define FUNC_SH 0
54#define SET_MASK 0x7 42#define SET_MASK 0x7
55#define SET_SH 0 43#define SET_SH 0
56#define SCIMM_MASK 0xfffff
57#define SCIMM_SH 6
58 44
59enum opcode { 45enum opcode {
60 insn_invalid, 46 insn_invalid,
@@ -77,85 +63,6 @@ struct insn {
77 enum fields fields; 63 enum fields fields;
78}; 64};
79 65
80/* This macro sets the non-variable bits of an instruction. */
81#define M(a, b, c, d, e, f) \
82 ((a) << OP_SH \
83 | (b) << RS_SH \
84 | (c) << RT_SH \
85 | (d) << RD_SH \
86 | (e) << RE_SH \
87 | (f) << FUNC_SH)
88
89static struct insn insn_table[] __uasminitdata = {
90 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
92 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
93 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
94 { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
95 { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
96 { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
97 { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
98 { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
99 { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
100 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
101 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
102 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
103 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
104 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
105 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
106 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
107 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
108 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
109 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
110 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
111 { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
112 { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
113 { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
114 { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
115 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
116 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
117 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
118 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
119 { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
120 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
121 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
122 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
123 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
124 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
125 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
126 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
127 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
128 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
129 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
130 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
131 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
132 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
133 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
134 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
135 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
136 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
137 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
138 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
139 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
140 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
141 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
142 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
143 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
144 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
145 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
146 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
147 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
148 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
149 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
150 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
151 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
152 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
153 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
154 { insn_invalid, 0, 0 }
155};
156
157#undef M
158
159static inline __uasminit u32 build_rs(u32 arg) 66static inline __uasminit u32 build_rs(u32 arg)
160{ 67{
161 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 68 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -199,24 +106,6 @@ static inline __uasminit u32 build_uimm(u32 arg)
199 return arg & IMM_MASK; 106 return arg & IMM_MASK;
200} 107}
201 108
202static inline __uasminit u32 build_bimm(s32 arg)
203{
204 WARN(arg > 0x1ffff || arg < -0x20000,
205 KERN_WARNING "Micro-assembler field overflow\n");
206
207 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
208
209 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
210}
211
212static inline __uasminit u32 build_jimm(u32 arg)
213{
214 WARN(arg & ~(JIMM_MASK << 2),
215 KERN_WARNING "Micro-assembler field overflow\n");
216
217 return (arg >> 2) & JIMM_MASK;
218}
219
220static inline __uasminit u32 build_scimm(u32 arg) 109static inline __uasminit u32 build_scimm(u32 arg)
221{ 110{
222 WARN(arg & ~SCIMM_MASK, 111 WARN(arg & ~SCIMM_MASK,
@@ -239,55 +128,7 @@ static inline __uasminit u32 build_set(u32 arg)
239 return arg & SET_MASK; 128 return arg & SET_MASK;
240} 129}
241 130
242/* 131static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
243 * The order of opcode arguments is implicitly left to right,
244 * starting with RS and ending with FUNC or IMM.
245 */
246static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
247{
248 struct insn *ip = NULL;
249 unsigned int i;
250 va_list ap;
251 u32 op;
252
253 for (i = 0; insn_table[i].opcode != insn_invalid; i++)
254 if (insn_table[i].opcode == opc) {
255 ip = &insn_table[i];
256 break;
257 }
258
259 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
260 panic("Unsupported Micro-assembler instruction %d", opc);
261
262 op = ip->match;
263 va_start(ap, opc);
264 if (ip->fields & RS)
265 op |= build_rs(va_arg(ap, u32));
266 if (ip->fields & RT)
267 op |= build_rt(va_arg(ap, u32));
268 if (ip->fields & RD)
269 op |= build_rd(va_arg(ap, u32));
270 if (ip->fields & RE)
271 op |= build_re(va_arg(ap, u32));
272 if (ip->fields & SIMM)
273 op |= build_simm(va_arg(ap, s32));
274 if (ip->fields & UIMM)
275 op |= build_uimm(va_arg(ap, u32));
276 if (ip->fields & BIMM)
277 op |= build_bimm(va_arg(ap, s32));
278 if (ip->fields & JIMM)
279 op |= build_jimm(va_arg(ap, u32));
280 if (ip->fields & FUNC)
281 op |= build_func(va_arg(ap, u32));
282 if (ip->fields & SET)
283 op |= build_set(va_arg(ap, u32));
284 if (ip->fields & SCIMM)
285 op |= build_scimm(va_arg(ap, u32));
286 va_end(ap);
287
288 **buf = op;
289 (*buf)++;
290}
291 132
292#define I_u1u2u3(op) \ 133#define I_u1u2u3(op) \
293Ip_u1u2u3(op) \ 134Ip_u1u2u3(op) \
@@ -445,7 +286,7 @@ I_u3u1u2(_ldx)
445 286
446#ifdef CONFIG_CPU_CAVIUM_OCTEON 287#ifdef CONFIG_CPU_CAVIUM_OCTEON
447#include <asm/octeon/octeon.h> 288#include <asm/octeon/octeon.h>
448void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, 289void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
449 unsigned int c) 290 unsigned int c)
450{ 291{
451 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) 292 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -457,21 +298,21 @@ void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
457 else 298 else
458 build_insn(buf, insn_pref, c, a, b); 299 build_insn(buf, insn_pref, c, a, b);
459} 300}
460UASM_EXPORT_SYMBOL(uasm_i_pref); 301UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
461#else 302#else
462I_u2s3u1(_pref) 303I_u2s3u1(_pref)
463#endif 304#endif
464 305
465/* Handle labels. */ 306/* Handle labels. */
466void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) 307void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
467{ 308{
468 (*lab)->addr = addr; 309 (*lab)->addr = addr;
469 (*lab)->lab = lid; 310 (*lab)->lab = lid;
470 (*lab)++; 311 (*lab)++;
471} 312}
472UASM_EXPORT_SYMBOL(uasm_build_label); 313UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
473 314
474int __uasminit uasm_in_compat_space_p(long addr) 315int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
475{ 316{
476 /* Is this address in 32bit compat space? */ 317 /* Is this address in 32bit compat space? */
477#ifdef CONFIG_64BIT 318#ifdef CONFIG_64BIT
@@ -480,7 +321,7 @@ int __uasminit uasm_in_compat_space_p(long addr)
480 return 1; 321 return 1;
481#endif 322#endif
482} 323}
483UASM_EXPORT_SYMBOL(uasm_in_compat_space_p); 324UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
484 325
485static int __uasminit uasm_rel_highest(long val) 326static int __uasminit uasm_rel_highest(long val)
486{ 327{
@@ -500,77 +341,66 @@ static int __uasminit uasm_rel_higher(long val)
500#endif 341#endif
501} 342}
502 343
503int __uasminit uasm_rel_hi(long val) 344int __uasminit ISAFUNC(uasm_rel_hi)(long val)
504{ 345{
505 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 346 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
506} 347}
507UASM_EXPORT_SYMBOL(uasm_rel_hi); 348UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
508 349
509int __uasminit uasm_rel_lo(long val) 350int __uasminit ISAFUNC(uasm_rel_lo)(long val)
510{ 351{
511 return ((val & 0xffff) ^ 0x8000) - 0x8000; 352 return ((val & 0xffff) ^ 0x8000) - 0x8000;
512} 353}
513UASM_EXPORT_SYMBOL(uasm_rel_lo); 354UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
514 355
515void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) 356void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
516{ 357{
517 if (!uasm_in_compat_space_p(addr)) { 358 if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
518 uasm_i_lui(buf, rs, uasm_rel_highest(addr)); 359 ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
519 if (uasm_rel_higher(addr)) 360 if (uasm_rel_higher(addr))
520 uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr)); 361 ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
521 if (uasm_rel_hi(addr)) { 362 if (ISAFUNC(uasm_rel_hi(addr))) {
522 uasm_i_dsll(buf, rs, rs, 16); 363 ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
523 uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr)); 364 ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
524 uasm_i_dsll(buf, rs, rs, 16); 365 ISAFUNC(uasm_rel_hi)(addr));
366 ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
525 } else 367 } else
526 uasm_i_dsll32(buf, rs, rs, 0); 368 ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
527 } else 369 } else
528 uasm_i_lui(buf, rs, uasm_rel_hi(addr)); 370 ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
529} 371}
530UASM_EXPORT_SYMBOL(UASM_i_LA_mostly); 372UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
531 373
532void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr) 374void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
533{ 375{
534 UASM_i_LA_mostly(buf, rs, addr); 376 ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
535 if (uasm_rel_lo(addr)) { 377 if (ISAFUNC(uasm_rel_lo(addr))) {
536 if (!uasm_in_compat_space_p(addr)) 378 if (!ISAFUNC(uasm_in_compat_space_p)(addr))
537 uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr)); 379 ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
380 ISAFUNC(uasm_rel_lo(addr)));
538 else 381 else
539 uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr)); 382 ISAFUNC(uasm_i_addiu)(buf, rs, rs,
383 ISAFUNC(uasm_rel_lo(addr)));
540 } 384 }
541} 385}
542UASM_EXPORT_SYMBOL(UASM_i_LA); 386UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
543 387
544/* Handle relocations. */ 388/* Handle relocations. */
545void __uasminit 389void __uasminit
546uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) 390ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
547{ 391{
548 (*rel)->addr = addr; 392 (*rel)->addr = addr;
549 (*rel)->type = R_MIPS_PC16; 393 (*rel)->type = R_MIPS_PC16;
550 (*rel)->lab = lid; 394 (*rel)->lab = lid;
551 (*rel)++; 395 (*rel)++;
552} 396}
553UASM_EXPORT_SYMBOL(uasm_r_mips_pc16); 397UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
554 398
555static inline void __uasminit 399static inline void __uasminit
556__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 400__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
557{
558 long laddr = (long)lab->addr;
559 long raddr = (long)rel->addr;
560
561 switch (rel->type) {
562 case R_MIPS_PC16:
563 *rel->addr |= build_bimm(laddr - (raddr + 4));
564 break;
565
566 default:
567 panic("Unsupported Micro-assembler relocation %d",
568 rel->type);
569 }
570}
571 401
572void __uasminit 402void __uasminit
573uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 403ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
574{ 404{
575 struct uasm_label *l; 405 struct uasm_label *l;
576 406
@@ -579,40 +409,40 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
579 if (rel->lab == l->lab) 409 if (rel->lab == l->lab)
580 __resolve_relocs(rel, l); 410 __resolve_relocs(rel, l);
581} 411}
582UASM_EXPORT_SYMBOL(uasm_resolve_relocs); 412UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
583 413
584void __uasminit 414void __uasminit
585uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) 415ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
586{ 416{
587 for (; rel->lab != UASM_LABEL_INVALID; rel++) 417 for (; rel->lab != UASM_LABEL_INVALID; rel++)
588 if (rel->addr >= first && rel->addr < end) 418 if (rel->addr >= first && rel->addr < end)
589 rel->addr += off; 419 rel->addr += off;
590} 420}
591UASM_EXPORT_SYMBOL(uasm_move_relocs); 421UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
592 422
593void __uasminit 423void __uasminit
594uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) 424ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
595{ 425{
596 for (; lab->lab != UASM_LABEL_INVALID; lab++) 426 for (; lab->lab != UASM_LABEL_INVALID; lab++)
597 if (lab->addr >= first && lab->addr < end) 427 if (lab->addr >= first && lab->addr < end)
598 lab->addr += off; 428 lab->addr += off;
599} 429}
600UASM_EXPORT_SYMBOL(uasm_move_labels); 430UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
601 431
602void __uasminit 432void __uasminit
603uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, 433ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
604 u32 *end, u32 *target) 434 u32 *end, u32 *target)
605{ 435{
606 long off = (long)(target - first); 436 long off = (long)(target - first);
607 437
608 memcpy(target, first, (end - first) * sizeof(u32)); 438 memcpy(target, first, (end - first) * sizeof(u32));
609 439
610 uasm_move_relocs(rel, first, end, off); 440 ISAFUNC(uasm_move_relocs(rel, first, end, off));
611 uasm_move_labels(lab, first, end, off); 441 ISAFUNC(uasm_move_labels(lab, first, end, off));
612} 442}
613UASM_EXPORT_SYMBOL(uasm_copy_handler); 443UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
614 444
615int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) 445int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
616{ 446{
617 for (; rel->lab != UASM_LABEL_INVALID; rel++) { 447 for (; rel->lab != UASM_LABEL_INVALID; rel++) {
618 if (rel->addr == addr 448 if (rel->addr == addr
@@ -623,88 +453,88 @@ int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
623 453
624 return 0; 454 return 0;
625} 455}
626UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay); 456UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
627 457
628/* Convenience functions for labeled branches. */ 458/* Convenience functions for labeled branches. */
629void __uasminit 459void __uasminit
630uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 460ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
631{ 461{
632 uasm_r_mips_pc16(r, *p, lid); 462 uasm_r_mips_pc16(r, *p, lid);
633 uasm_i_bltz(p, reg, 0); 463 ISAFUNC(uasm_i_bltz)(p, reg, 0);
634} 464}
635UASM_EXPORT_SYMBOL(uasm_il_bltz); 465UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
636 466
637void __uasminit 467void __uasminit
638uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) 468ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
639{ 469{
640 uasm_r_mips_pc16(r, *p, lid); 470 uasm_r_mips_pc16(r, *p, lid);
641 uasm_i_b(p, 0); 471 ISAFUNC(uasm_i_b)(p, 0);
642} 472}
643UASM_EXPORT_SYMBOL(uasm_il_b); 473UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
644 474
645void __uasminit 475void __uasminit
646uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 476ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
647{ 477{
648 uasm_r_mips_pc16(r, *p, lid); 478 uasm_r_mips_pc16(r, *p, lid);
649 uasm_i_beqz(p, reg, 0); 479 ISAFUNC(uasm_i_beqz)(p, reg, 0);
650} 480}
651UASM_EXPORT_SYMBOL(uasm_il_beqz); 481UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
652 482
653void __uasminit 483void __uasminit
654uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 484ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
655{ 485{
656 uasm_r_mips_pc16(r, *p, lid); 486 uasm_r_mips_pc16(r, *p, lid);
657 uasm_i_beqzl(p, reg, 0); 487 ISAFUNC(uasm_i_beqzl)(p, reg, 0);
658} 488}
659UASM_EXPORT_SYMBOL(uasm_il_beqzl); 489UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
660 490
661void __uasminit 491void __uasminit
662uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, 492ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
663 unsigned int reg2, int lid) 493 unsigned int reg2, int lid)
664{ 494{
665 uasm_r_mips_pc16(r, *p, lid); 495 uasm_r_mips_pc16(r, *p, lid);
666 uasm_i_bne(p, reg1, reg2, 0); 496 ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
667} 497}
668UASM_EXPORT_SYMBOL(uasm_il_bne); 498UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
669 499
670void __uasminit 500void __uasminit
671uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 501ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
672{ 502{
673 uasm_r_mips_pc16(r, *p, lid); 503 uasm_r_mips_pc16(r, *p, lid);
674 uasm_i_bnez(p, reg, 0); 504 ISAFUNC(uasm_i_bnez)(p, reg, 0);
675} 505}
676UASM_EXPORT_SYMBOL(uasm_il_bnez); 506UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
677 507
678void __uasminit 508void __uasminit
679uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 509ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
680{ 510{
681 uasm_r_mips_pc16(r, *p, lid); 511 uasm_r_mips_pc16(r, *p, lid);
682 uasm_i_bgezl(p, reg, 0); 512 ISAFUNC(uasm_i_bgezl)(p, reg, 0);
683} 513}
684UASM_EXPORT_SYMBOL(uasm_il_bgezl); 514UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
685 515
686void __uasminit 516void __uasminit
687uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 517ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
688{ 518{
689 uasm_r_mips_pc16(r, *p, lid); 519 uasm_r_mips_pc16(r, *p, lid);
690 uasm_i_bgez(p, reg, 0); 520 ISAFUNC(uasm_i_bgez)(p, reg, 0);
691} 521}
692UASM_EXPORT_SYMBOL(uasm_il_bgez); 522UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
693 523
694void __uasminit 524void __uasminit
695uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, 525ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
696 unsigned int bit, int lid) 526 unsigned int bit, int lid)
697{ 527{
698 uasm_r_mips_pc16(r, *p, lid); 528 uasm_r_mips_pc16(r, *p, lid);
699 uasm_i_bbit0(p, reg, bit, 0); 529 ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
700} 530}
701UASM_EXPORT_SYMBOL(uasm_il_bbit0); 531UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
702 532
703void __uasminit 533void __uasminit
704uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, 534ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
705 unsigned int bit, int lid) 535 unsigned int bit, int lid)
706{ 536{
707 uasm_r_mips_pc16(r, *p, lid); 537 uasm_r_mips_pc16(r, *p, lid);
708 uasm_i_bbit1(p, reg, bit, 0); 538 ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
709} 539}
710UASM_EXPORT_SYMBOL(uasm_il_bbit1); 540UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));