aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile2
-rw-r--r--arch/mips/mm/fault.c16
-rw-r--r--arch/mips/mm/sc-rm7k.c163
-rw-r--r--arch/mips/mm/tlbex.c5
-rw-r--r--arch/mips/mm/uasm.c162
5 files changed, 258 insertions, 90 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index f0e435599707..d679c772d082 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -34,5 +34,3 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
34obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o 34obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
35obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o 35obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
36obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o 36obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
37
38EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index b78f7d913ca4..783ad0065fdf 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -16,8 +16,8 @@
16#include <linux/mman.h> 16#include <linux/mman.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/vt_kern.h> /* For unblank_screen() */
20#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kprobes.h>
21 21
22#include <asm/branch.h> 22#include <asm/branch.h>
23#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
@@ -25,13 +25,14 @@
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/ptrace.h> 26#include <asm/ptrace.h>
27#include <asm/highmem.h> /* For VMALLOC_END */ 27#include <asm/highmem.h> /* For VMALLOC_END */
28#include <linux/kdebug.h>
28 29
29/* 30/*
30 * This routine handles page faults. It determines the address, 31 * This routine handles page faults. It determines the address,
31 * and the problem, and then passes it off to one of the appropriate 32 * and the problem, and then passes it off to one of the appropriate
32 * routines. 33 * routines.
33 */ 34 */
34asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, 35asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write,
35 unsigned long address) 36 unsigned long address)
36{ 37{
37 struct vm_area_struct * vma = NULL; 38 struct vm_area_struct * vma = NULL;
@@ -47,6 +48,17 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
47 field, regs->cp0_epc); 48 field, regs->cp0_epc);
48#endif 49#endif
49 50
51#ifdef CONFIG_KPROBES
52 /*
53 * This is to notify the fault handler of the kprobes. The
54 * exception code is redundant as it is also carried in REGS,
55 * but we pass it anyhow.
56 */
57 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
58 (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
59 return;
60#endif
61
50 info.si_code = SEGV_MAPERR; 62 info.si_code = SEGV_MAPERR;
51 63
52 /* 64 /*
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index de69bfbf506e..1ef75cd80a0d 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -16,6 +16,7 @@
16#include <asm/cacheops.h> 16#include <asm/cacheops.h>
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/sections.h>
19#include <asm/cacheflush.h> /* for run_uncached() */ 20#include <asm/cacheflush.h> /* for run_uncached() */
20 21
21/* Primary cache parameters. */ 22/* Primary cache parameters. */
@@ -25,11 +26,15 @@
25/* Secondary cache parameters. */ 26/* Secondary cache parameters. */
26#define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */ 27#define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */
27 28
29/* Tertiary cache parameters */
30#define tc_lsize 32
31
28extern unsigned long icache_way_size, dcache_way_size; 32extern unsigned long icache_way_size, dcache_way_size;
33unsigned long tcache_size;
29 34
30#include <asm/r4kcache.h> 35#include <asm/r4kcache.h>
31 36
32static int rm7k_tcache_enabled; 37static int rm7k_tcache_init;
33 38
34/* 39/*
35 * Writeback and invalidate the primary cache dcache before DMA. 40 * Writeback and invalidate the primary cache dcache before DMA.
@@ -46,7 +51,7 @@ static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
46 51
47 blast_scache_range(addr, addr + size); 52 blast_scache_range(addr, addr + size);
48 53
49 if (!rm7k_tcache_enabled) 54 if (!rm7k_tcache_init)
50 return; 55 return;
51 56
52 a = addr & ~(tc_pagesize - 1); 57 a = addr & ~(tc_pagesize - 1);
@@ -70,7 +75,7 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
70 75
71 blast_inv_scache_range(addr, addr + size); 76 blast_inv_scache_range(addr, addr + size);
72 77
73 if (!rm7k_tcache_enabled) 78 if (!rm7k_tcache_init)
74 return; 79 return;
75 80
76 a = addr & ~(tc_pagesize - 1); 81 a = addr & ~(tc_pagesize - 1);
@@ -83,6 +88,45 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
83 } 88 }
84} 89}
85 90
91static void blast_rm7k_tcache(void)
92{
93 unsigned long start = CKSEG0ADDR(0);
94 unsigned long end = start + tcache_size;
95
96 write_c0_taglo(0);
97
98 while (start < end) {
99 cache_op(Page_Invalidate_T, start);
100 start += tc_pagesize;
101 }
102}
103
104/*
105 * This function is executed in uncached address space.
106 */
107static __cpuinit void __rm7k_tc_enable(void)
108{
109 int i;
110
111 set_c0_config(RM7K_CONF_TE);
112
113 write_c0_taglo(0);
114 write_c0_taghi(0);
115
116 for (i = 0; i < tcache_size; i += tc_lsize)
117 cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
118}
119
120static __cpuinit void rm7k_tc_enable(void)
121{
122 if (read_c0_config() & RM7K_CONF_TE)
123 return;
124
125 BUG_ON(tcache_size == 0);
126
127 run_uncached(__rm7k_tc_enable);
128}
129
86/* 130/*
87 * This function is executed in uncached address space. 131 * This function is executed in uncached address space.
88 */ 132 */
@@ -95,16 +139,8 @@ static __cpuinit void __rm7k_sc_enable(void)
95 write_c0_taglo(0); 139 write_c0_taglo(0);
96 write_c0_taghi(0); 140 write_c0_taghi(0);
97 141
98 for (i = 0; i < scache_size; i += sc_lsize) { 142 for (i = 0; i < scache_size; i += sc_lsize)
99 __asm__ __volatile__ ( 143 cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
100 ".set noreorder\n\t"
101 ".set mips3\n\t"
102 "cache %1, (%0)\n\t"
103 ".set mips0\n\t"
104 ".set reorder"
105 :
106 : "r" (CKSEG0ADDR(i)), "i" (Index_Store_Tag_SD));
107 }
108} 144}
109 145
110static __cpuinit void rm7k_sc_enable(void) 146static __cpuinit void rm7k_sc_enable(void)
@@ -112,13 +148,29 @@ static __cpuinit void rm7k_sc_enable(void)
112 if (read_c0_config() & RM7K_CONF_SE) 148 if (read_c0_config() & RM7K_CONF_SE)
113 return; 149 return;
114 150
115 printk(KERN_INFO "Enabling secondary cache...\n"); 151 pr_info("Enabling secondary cache...\n");
116 run_uncached(__rm7k_sc_enable); 152 run_uncached(__rm7k_sc_enable);
153
154 if (rm7k_tcache_init)
155 rm7k_tc_enable();
156}
157
158static void rm7k_tc_disable(void)
159{
160 unsigned long flags;
161
162 local_irq_save(flags);
163 blast_rm7k_tcache();
164 clear_c0_config(RM7K_CONF_TE);
165 local_irq_save(flags);
117} 166}
118 167
119static void rm7k_sc_disable(void) 168static void rm7k_sc_disable(void)
120{ 169{
121 clear_c0_config(RM7K_CONF_SE); 170 clear_c0_config(RM7K_CONF_SE);
171
172 if (rm7k_tcache_init)
173 rm7k_tc_disable();
122} 174}
123 175
124static struct bcache_ops rm7k_sc_ops = { 176static struct bcache_ops rm7k_sc_ops = {
@@ -128,6 +180,52 @@ static struct bcache_ops rm7k_sc_ops = {
128 .bc_inv = rm7k_sc_inv 180 .bc_inv = rm7k_sc_inv
129}; 181};
130 182
183/*
184 * This is a probing function like the one found in c-r4k.c, we look for the
185 * wrap around point with different addresses.
186 */
187static __cpuinit void __probe_tcache(void)
188{
189 unsigned long flags, addr, begin, end, pow2;
190
191 begin = (unsigned long) &_stext;
192 begin &= ~((8 * 1024 * 1024) - 1);
193 end = begin + (8 * 1024 * 1024);
194
195 local_irq_save(flags);
196
197 set_c0_config(RM7K_CONF_TE);
198
199 /* Fill size-multiple lines with a valid tag */
200 pow2 = (256 * 1024);
201 for (addr = begin; addr <= end; addr = (begin + pow2)) {
202 unsigned long *p = (unsigned long *) addr;
203 __asm__ __volatile__("nop" : : "r" (*p));
204 pow2 <<= 1;
205 }
206
207 /* Load first line with a 0 tag, to check after */
208 write_c0_taglo(0);
209 write_c0_taghi(0);
210 cache_op(Index_Store_Tag_T, begin);
211
212 /* Look for the wrap-around */
213 pow2 = (512 * 1024);
214 for (addr = begin + (512 * 1024); addr <= end; addr = begin + pow2) {
215 cache_op(Index_Load_Tag_T, addr);
216 if (!read_c0_taglo())
217 break;
218 pow2 <<= 1;
219 }
220
221 addr -= begin;
222 tcache_size = addr;
223
224 clear_c0_config(RM7K_CONF_TE);
225
226 local_irq_restore(flags);
227}
228
131void __cpuinit rm7k_sc_init(void) 229void __cpuinit rm7k_sc_init(void)
132{ 230{
133 struct cpuinfo_mips *c = &current_cpu_data; 231 struct cpuinfo_mips *c = &current_cpu_data;
@@ -147,27 +245,26 @@ void __cpuinit rm7k_sc_init(void)
147 if (!(config & RM7K_CONF_SE)) 245 if (!(config & RM7K_CONF_SE))
148 rm7k_sc_enable(); 246 rm7k_sc_enable();
149 247
248 bcops = &rm7k_sc_ops;
249
150 /* 250 /*
151 * While we're at it let's deal with the tertiary cache. 251 * While we're at it let's deal with the tertiary cache.
152 */ 252 */
153 if (!(config & RM7K_CONF_TC)) {
154
155 /*
156 * We can't enable the L3 cache yet. There may be board-specific
157 * magic necessary to turn it on, and blindly asking the CPU to
158 * start using it would may give cache errors.
159 *
160 * Also, board-specific knowledge may allow us to use the
161 * CACHE Flash_Invalidate_T instruction if the tag RAM supports
162 * it, and may specify the size of the L3 cache so we don't have
163 * to probe it.
164 */
165 printk(KERN_INFO "Tertiary cache present, %s enabled\n",
166 (config & RM7K_CONF_TE) ? "already" : "not (yet)");
167
168 if ((config & RM7K_CONF_TE))
169 rm7k_tcache_enabled = 1;
170 }
171 253
172 bcops = &rm7k_sc_ops; 254 rm7k_tcache_init = 0;
255 tcache_size = 0;
256
257 if (config & RM7K_CONF_TC)
258 return;
259
260 /*
261 * No efficient way to ask the hardware for the size of the tcache,
262 * so must probe for it.
263 */
264 run_uncached(__probe_tcache);
265 rm7k_tc_enable();
266 rm7k_tcache_init = 1;
267 c->tcache.linesz = tc_lsize;
268 c->tcache.ways = 1;
269 pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10));
173} 270}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 86f004dc8355..4510e61883eb 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -409,6 +409,11 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
409 tlbw(p); 409 tlbw(p);
410 break; 410 break;
411 411
412 case CPU_JZRISC:
413 tlbw(p);
414 uasm_i_nop(p);
415 break;
416
412 default: 417 default:
413 panic("No TLB refill handler yet (CPU type: %d)", 418 panic("No TLB refill handler yet (CPU type: %d)",
414 current_cpu_data.cputype); 419 current_cpu_data.cputype);
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 611d564fdcf1..d2647a4e012b 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -62,12 +62,13 @@ enum opcode {
62 insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, 62 insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
63 insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0, 63 insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0,
64 insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, 64 insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
65 insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal, 65 insn_dsrl32, insn_drotr, insn_drotr32, insn_dsubu, insn_eret,
66 insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, 66 insn_j, insn_jal, insn_jr, insn_ld, insn_ll, insn_lld,
67 insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, 67 insn_lui, insn_lw, insn_mfc0, insn_mtc0, insn_or, insn_ori,
68 insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, 68 insn_pref, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
69 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, 69 insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, insn_tlbp,
70 insn_dins, insn_syscall 70 insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
71 insn_dins, insn_syscall, insn_bbit0, insn_bbit1
71}; 72};
72 73
73struct insn { 74struct insn {
@@ -85,7 +86,7 @@ struct insn {
85 | (e) << RE_SH \ 86 | (e) << RE_SH \
86 | (f) << FUNC_SH) 87 | (f) << FUNC_SH)
87 88
88static struct insn insn_table[] __cpuinitdata = { 89static struct insn insn_table[] __uasminitdata = {
89 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 90 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
90 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, 91 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
91 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, 92 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
@@ -108,6 +109,7 @@ static struct insn insn_table[] __cpuinitdata = {
108 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, 109 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
109 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, 110 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
110 { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE }, 111 { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
112 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
111 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, 113 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
112 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, 114 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
113 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 115 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
@@ -141,12 +143,14 @@ static struct insn insn_table[] __cpuinitdata = {
141 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 143 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
142 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, 144 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
143 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, 145 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
146 { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
147 { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
144 { insn_invalid, 0, 0 } 148 { insn_invalid, 0, 0 }
145}; 149};
146 150
147#undef M 151#undef M
148 152
149static inline __cpuinit u32 build_rs(u32 arg) 153static inline __uasminit u32 build_rs(u32 arg)
150{ 154{
151 if (arg & ~RS_MASK) 155 if (arg & ~RS_MASK)
152 printk(KERN_WARNING "Micro-assembler field overflow\n"); 156 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -154,7 +158,7 @@ static inline __cpuinit u32 build_rs(u32 arg)
154 return (arg & RS_MASK) << RS_SH; 158 return (arg & RS_MASK) << RS_SH;
155} 159}
156 160
157static inline __cpuinit u32 build_rt(u32 arg) 161static inline __uasminit u32 build_rt(u32 arg)
158{ 162{
159 if (arg & ~RT_MASK) 163 if (arg & ~RT_MASK)
160 printk(KERN_WARNING "Micro-assembler field overflow\n"); 164 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -162,7 +166,7 @@ static inline __cpuinit u32 build_rt(u32 arg)
162 return (arg & RT_MASK) << RT_SH; 166 return (arg & RT_MASK) << RT_SH;
163} 167}
164 168
165static inline __cpuinit u32 build_rd(u32 arg) 169static inline __uasminit u32 build_rd(u32 arg)
166{ 170{
167 if (arg & ~RD_MASK) 171 if (arg & ~RD_MASK)
168 printk(KERN_WARNING "Micro-assembler field overflow\n"); 172 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -170,7 +174,7 @@ static inline __cpuinit u32 build_rd(u32 arg)
170 return (arg & RD_MASK) << RD_SH; 174 return (arg & RD_MASK) << RD_SH;
171} 175}
172 176
173static inline __cpuinit u32 build_re(u32 arg) 177static inline __uasminit u32 build_re(u32 arg)
174{ 178{
175 if (arg & ~RE_MASK) 179 if (arg & ~RE_MASK)
176 printk(KERN_WARNING "Micro-assembler field overflow\n"); 180 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -178,7 +182,7 @@ static inline __cpuinit u32 build_re(u32 arg)
178 return (arg & RE_MASK) << RE_SH; 182 return (arg & RE_MASK) << RE_SH;
179} 183}
180 184
181static inline __cpuinit u32 build_simm(s32 arg) 185static inline __uasminit u32 build_simm(s32 arg)
182{ 186{
183 if (arg > 0x7fff || arg < -0x8000) 187 if (arg > 0x7fff || arg < -0x8000)
184 printk(KERN_WARNING "Micro-assembler field overflow\n"); 188 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -186,7 +190,7 @@ static inline __cpuinit u32 build_simm(s32 arg)
186 return arg & 0xffff; 190 return arg & 0xffff;
187} 191}
188 192
189static inline __cpuinit u32 build_uimm(u32 arg) 193static inline __uasminit u32 build_uimm(u32 arg)
190{ 194{
191 if (arg & ~IMM_MASK) 195 if (arg & ~IMM_MASK)
192 printk(KERN_WARNING "Micro-assembler field overflow\n"); 196 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -194,7 +198,7 @@ static inline __cpuinit u32 build_uimm(u32 arg)
194 return arg & IMM_MASK; 198 return arg & IMM_MASK;
195} 199}
196 200
197static inline __cpuinit u32 build_bimm(s32 arg) 201static inline __uasminit u32 build_bimm(s32 arg)
198{ 202{
199 if (arg > 0x1ffff || arg < -0x20000) 203 if (arg > 0x1ffff || arg < -0x20000)
200 printk(KERN_WARNING "Micro-assembler field overflow\n"); 204 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -205,7 +209,7 @@ static inline __cpuinit u32 build_bimm(s32 arg)
205 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); 209 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
206} 210}
207 211
208static inline __cpuinit u32 build_jimm(u32 arg) 212static inline __uasminit u32 build_jimm(u32 arg)
209{ 213{
210 if (arg & ~((JIMM_MASK) << 2)) 214 if (arg & ~((JIMM_MASK) << 2))
211 printk(KERN_WARNING "Micro-assembler field overflow\n"); 215 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -213,7 +217,7 @@ static inline __cpuinit u32 build_jimm(u32 arg)
213 return (arg >> 2) & JIMM_MASK; 217 return (arg >> 2) & JIMM_MASK;
214} 218}
215 219
216static inline __cpuinit u32 build_scimm(u32 arg) 220static inline __uasminit u32 build_scimm(u32 arg)
217{ 221{
218 if (arg & ~SCIMM_MASK) 222 if (arg & ~SCIMM_MASK)
219 printk(KERN_WARNING "Micro-assembler field overflow\n"); 223 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -221,7 +225,7 @@ static inline __cpuinit u32 build_scimm(u32 arg)
221 return (arg & SCIMM_MASK) << SCIMM_SH; 225 return (arg & SCIMM_MASK) << SCIMM_SH;
222} 226}
223 227
224static inline __cpuinit u32 build_func(u32 arg) 228static inline __uasminit u32 build_func(u32 arg)
225{ 229{
226 if (arg & ~FUNC_MASK) 230 if (arg & ~FUNC_MASK)
227 printk(KERN_WARNING "Micro-assembler field overflow\n"); 231 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -229,7 +233,7 @@ static inline __cpuinit u32 build_func(u32 arg)
229 return arg & FUNC_MASK; 233 return arg & FUNC_MASK;
230} 234}
231 235
232static inline __cpuinit u32 build_set(u32 arg) 236static inline __uasminit u32 build_set(u32 arg)
233{ 237{
234 if (arg & ~SET_MASK) 238 if (arg & ~SET_MASK)
235 printk(KERN_WARNING "Micro-assembler field overflow\n"); 239 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -241,7 +245,7 @@ static inline __cpuinit u32 build_set(u32 arg)
241 * The order of opcode arguments is implicitly left to right, 245 * The order of opcode arguments is implicitly left to right,
242 * starting with RS and ending with FUNC or IMM. 246 * starting with RS and ending with FUNC or IMM.
243 */ 247 */
244static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...) 248static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
245{ 249{
246 struct insn *ip = NULL; 250 struct insn *ip = NULL;
247 unsigned int i; 251 unsigned int i;
@@ -291,67 +295,78 @@ static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
291Ip_u1u2u3(op) \ 295Ip_u1u2u3(op) \
292{ \ 296{ \
293 build_insn(buf, insn##op, a, b, c); \ 297 build_insn(buf, insn##op, a, b, c); \
294} 298} \
299UASM_EXPORT_SYMBOL(uasm_i##op);
295 300
296#define I_u2u1u3(op) \ 301#define I_u2u1u3(op) \
297Ip_u2u1u3(op) \ 302Ip_u2u1u3(op) \
298{ \ 303{ \
299 build_insn(buf, insn##op, b, a, c); \ 304 build_insn(buf, insn##op, b, a, c); \
300} 305} \
306UASM_EXPORT_SYMBOL(uasm_i##op);
301 307
302#define I_u3u1u2(op) \ 308#define I_u3u1u2(op) \
303Ip_u3u1u2(op) \ 309Ip_u3u1u2(op) \
304{ \ 310{ \
305 build_insn(buf, insn##op, b, c, a); \ 311 build_insn(buf, insn##op, b, c, a); \
306} 312} \
313UASM_EXPORT_SYMBOL(uasm_i##op);
307 314
308#define I_u1u2s3(op) \ 315#define I_u1u2s3(op) \
309Ip_u1u2s3(op) \ 316Ip_u1u2s3(op) \
310{ \ 317{ \
311 build_insn(buf, insn##op, a, b, c); \ 318 build_insn(buf, insn##op, a, b, c); \
312} 319} \
320UASM_EXPORT_SYMBOL(uasm_i##op);
313 321
314#define I_u2s3u1(op) \ 322#define I_u2s3u1(op) \
315Ip_u2s3u1(op) \ 323Ip_u2s3u1(op) \
316{ \ 324{ \
317 build_insn(buf, insn##op, c, a, b); \ 325 build_insn(buf, insn##op, c, a, b); \
318} 326} \
327UASM_EXPORT_SYMBOL(uasm_i##op);
319 328
320#define I_u2u1s3(op) \ 329#define I_u2u1s3(op) \
321Ip_u2u1s3(op) \ 330Ip_u2u1s3(op) \
322{ \ 331{ \
323 build_insn(buf, insn##op, b, a, c); \ 332 build_insn(buf, insn##op, b, a, c); \
324} 333} \
334UASM_EXPORT_SYMBOL(uasm_i##op);
325 335
326#define I_u2u1msbu3(op) \ 336#define I_u2u1msbu3(op) \
327Ip_u2u1msbu3(op) \ 337Ip_u2u1msbu3(op) \
328{ \ 338{ \
329 build_insn(buf, insn##op, b, a, c+d-1, c); \ 339 build_insn(buf, insn##op, b, a, c+d-1, c); \
330} 340} \
341UASM_EXPORT_SYMBOL(uasm_i##op);
331 342
332#define I_u1u2(op) \ 343#define I_u1u2(op) \
333Ip_u1u2(op) \ 344Ip_u1u2(op) \
334{ \ 345{ \
335 build_insn(buf, insn##op, a, b); \ 346 build_insn(buf, insn##op, a, b); \
336} 347} \
348UASM_EXPORT_SYMBOL(uasm_i##op);
337 349
338#define I_u1s2(op) \ 350#define I_u1s2(op) \
339Ip_u1s2(op) \ 351Ip_u1s2(op) \
340{ \ 352{ \
341 build_insn(buf, insn##op, a, b); \ 353 build_insn(buf, insn##op, a, b); \
342} 354} \
355UASM_EXPORT_SYMBOL(uasm_i##op);
343 356
344#define I_u1(op) \ 357#define I_u1(op) \
345Ip_u1(op) \ 358Ip_u1(op) \
346{ \ 359{ \
347 build_insn(buf, insn##op, a); \ 360 build_insn(buf, insn##op, a); \
348} 361} \
362UASM_EXPORT_SYMBOL(uasm_i##op);
349 363
350#define I_0(op) \ 364#define I_0(op) \
351Ip_0(op) \ 365Ip_0(op) \
352{ \ 366{ \
353 build_insn(buf, insn##op); \ 367 build_insn(buf, insn##op); \
354} 368} \
369UASM_EXPORT_SYMBOL(uasm_i##op);
355 370
356I_u2u1s3(_addiu) 371I_u2u1s3(_addiu)
357I_u3u1u2(_addu) 372I_u3u1u2(_addu)
@@ -375,6 +390,7 @@ I_u2u1u3(_dsra)
375I_u2u1u3(_dsrl) 390I_u2u1u3(_dsrl)
376I_u2u1u3(_dsrl32) 391I_u2u1u3(_dsrl32)
377I_u2u1u3(_drotr) 392I_u2u1u3(_drotr)
393I_u2u1u3(_drotr32)
378I_u3u1u2(_dsubu) 394I_u3u1u2(_dsubu)
379I_0(_eret) 395I_0(_eret)
380I_u1(_j) 396I_u1(_j)
@@ -408,16 +424,19 @@ I_u3u1u2(_xor)
408I_u2u1u3(_xori) 424I_u2u1u3(_xori)
409I_u2u1msbu3(_dins); 425I_u2u1msbu3(_dins);
410I_u1(_syscall); 426I_u1(_syscall);
427I_u1u2s3(_bbit0);
428I_u1u2s3(_bbit1);
411 429
412/* Handle labels. */ 430/* Handle labels. */
413void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) 431void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
414{ 432{
415 (*lab)->addr = addr; 433 (*lab)->addr = addr;
416 (*lab)->lab = lid; 434 (*lab)->lab = lid;
417 (*lab)++; 435 (*lab)++;
418} 436}
437UASM_EXPORT_SYMBOL(uasm_build_label);
419 438
420int __cpuinit uasm_in_compat_space_p(long addr) 439int __uasminit uasm_in_compat_space_p(long addr)
421{ 440{
422 /* Is this address in 32bit compat space? */ 441 /* Is this address in 32bit compat space? */
423#ifdef CONFIG_64BIT 442#ifdef CONFIG_64BIT
@@ -426,8 +445,9 @@ int __cpuinit uasm_in_compat_space_p(long addr)
426 return 1; 445 return 1;
427#endif 446#endif
428} 447}
448UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
429 449
430static int __cpuinit uasm_rel_highest(long val) 450static int __uasminit uasm_rel_highest(long val)
431{ 451{
432#ifdef CONFIG_64BIT 452#ifdef CONFIG_64BIT
433 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; 453 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
@@ -436,7 +456,7 @@ static int __cpuinit uasm_rel_highest(long val)
436#endif 456#endif
437} 457}
438 458
439static int __cpuinit uasm_rel_higher(long val) 459static int __uasminit uasm_rel_higher(long val)
440{ 460{
441#ifdef CONFIG_64BIT 461#ifdef CONFIG_64BIT
442 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; 462 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
@@ -445,17 +465,19 @@ static int __cpuinit uasm_rel_higher(long val)
445#endif 465#endif
446} 466}
447 467
448int __cpuinit uasm_rel_hi(long val) 468int __uasminit uasm_rel_hi(long val)
449{ 469{
450 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 470 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
451} 471}
472UASM_EXPORT_SYMBOL(uasm_rel_hi);
452 473
453int __cpuinit uasm_rel_lo(long val) 474int __uasminit uasm_rel_lo(long val)
454{ 475{
455 return ((val & 0xffff) ^ 0x8000) - 0x8000; 476 return ((val & 0xffff) ^ 0x8000) - 0x8000;
456} 477}
478UASM_EXPORT_SYMBOL(uasm_rel_lo);
457 479
458void __cpuinit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) 480void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
459{ 481{
460 if (!uasm_in_compat_space_p(addr)) { 482 if (!uasm_in_compat_space_p(addr)) {
461 uasm_i_lui(buf, rs, uasm_rel_highest(addr)); 483 uasm_i_lui(buf, rs, uasm_rel_highest(addr));
@@ -470,8 +492,9 @@ void __cpuinit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
470 } else 492 } else
471 uasm_i_lui(buf, rs, uasm_rel_hi(addr)); 493 uasm_i_lui(buf, rs, uasm_rel_hi(addr));
472} 494}
495UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
473 496
474void __cpuinit UASM_i_LA(u32 **buf, unsigned int rs, long addr) 497void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
475{ 498{
476 UASM_i_LA_mostly(buf, rs, addr); 499 UASM_i_LA_mostly(buf, rs, addr);
477 if (uasm_rel_lo(addr)) { 500 if (uasm_rel_lo(addr)) {
@@ -481,9 +504,10 @@ void __cpuinit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
481 uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr)); 504 uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
482 } 505 }
483} 506}
507UASM_EXPORT_SYMBOL(UASM_i_LA);
484 508
485/* Handle relocations. */ 509/* Handle relocations. */
486void __cpuinit 510void __uasminit
487uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) 511uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
488{ 512{
489 (*rel)->addr = addr; 513 (*rel)->addr = addr;
@@ -491,8 +515,9 @@ uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
491 (*rel)->lab = lid; 515 (*rel)->lab = lid;
492 (*rel)++; 516 (*rel)++;
493} 517}
518UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
494 519
495static inline void __cpuinit 520static inline void __uasminit
496__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 521__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
497{ 522{
498 long laddr = (long)lab->addr; 523 long laddr = (long)lab->addr;
@@ -509,7 +534,7 @@ __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
509 } 534 }
510} 535}
511 536
512void __cpuinit 537void __uasminit
513uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 538uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
514{ 539{
515 struct uasm_label *l; 540 struct uasm_label *l;
@@ -519,24 +544,27 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
519 if (rel->lab == l->lab) 544 if (rel->lab == l->lab)
520 __resolve_relocs(rel, l); 545 __resolve_relocs(rel, l);
521} 546}
547UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
522 548
523void __cpuinit 549void __uasminit
524uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) 550uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
525{ 551{
526 for (; rel->lab != UASM_LABEL_INVALID; rel++) 552 for (; rel->lab != UASM_LABEL_INVALID; rel++)
527 if (rel->addr >= first && rel->addr < end) 553 if (rel->addr >= first && rel->addr < end)
528 rel->addr += off; 554 rel->addr += off;
529} 555}
556UASM_EXPORT_SYMBOL(uasm_move_relocs);
530 557
531void __cpuinit 558void __uasminit
532uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) 559uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
533{ 560{
534 for (; lab->lab != UASM_LABEL_INVALID; lab++) 561 for (; lab->lab != UASM_LABEL_INVALID; lab++)
535 if (lab->addr >= first && lab->addr < end) 562 if (lab->addr >= first && lab->addr < end)
536 lab->addr += off; 563 lab->addr += off;
537} 564}
565UASM_EXPORT_SYMBOL(uasm_move_labels);
538 566
539void __cpuinit 567void __uasminit
540uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, 568uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
541 u32 *end, u32 *target) 569 u32 *end, u32 *target)
542{ 570{
@@ -547,8 +575,9 @@ uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
547 uasm_move_relocs(rel, first, end, off); 575 uasm_move_relocs(rel, first, end, off);
548 uasm_move_labels(lab, first, end, off); 576 uasm_move_labels(lab, first, end, off);
549} 577}
578UASM_EXPORT_SYMBOL(uasm_copy_handler);
550 579
551int __cpuinit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) 580int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
552{ 581{
553 for (; rel->lab != UASM_LABEL_INVALID; rel++) { 582 for (; rel->lab != UASM_LABEL_INVALID; rel++) {
554 if (rel->addr == addr 583 if (rel->addr == addr
@@ -559,61 +588,88 @@ int __cpuinit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
559 588
560 return 0; 589 return 0;
561} 590}
591UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
562 592
563/* Convenience functions for labeled branches. */ 593/* Convenience functions for labeled branches. */
564void __cpuinit 594void __uasminit
565uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 595uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
566{ 596{
567 uasm_r_mips_pc16(r, *p, lid); 597 uasm_r_mips_pc16(r, *p, lid);
568 uasm_i_bltz(p, reg, 0); 598 uasm_i_bltz(p, reg, 0);
569} 599}
600UASM_EXPORT_SYMBOL(uasm_il_bltz);
570 601
571void __cpuinit 602void __uasminit
572uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) 603uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
573{ 604{
574 uasm_r_mips_pc16(r, *p, lid); 605 uasm_r_mips_pc16(r, *p, lid);
575 uasm_i_b(p, 0); 606 uasm_i_b(p, 0);
576} 607}
608UASM_EXPORT_SYMBOL(uasm_il_b);
577 609
578void __cpuinit 610void __uasminit
579uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 611uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
580{ 612{
581 uasm_r_mips_pc16(r, *p, lid); 613 uasm_r_mips_pc16(r, *p, lid);
582 uasm_i_beqz(p, reg, 0); 614 uasm_i_beqz(p, reg, 0);
583} 615}
616UASM_EXPORT_SYMBOL(uasm_il_beqz);
584 617
585void __cpuinit 618void __uasminit
586uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 619uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
587{ 620{
588 uasm_r_mips_pc16(r, *p, lid); 621 uasm_r_mips_pc16(r, *p, lid);
589 uasm_i_beqzl(p, reg, 0); 622 uasm_i_beqzl(p, reg, 0);
590} 623}
624UASM_EXPORT_SYMBOL(uasm_il_beqzl);
591 625
592void __cpuinit 626void __uasminit
593uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, 627uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
594 unsigned int reg2, int lid) 628 unsigned int reg2, int lid)
595{ 629{
596 uasm_r_mips_pc16(r, *p, lid); 630 uasm_r_mips_pc16(r, *p, lid);
597 uasm_i_bne(p, reg1, reg2, 0); 631 uasm_i_bne(p, reg1, reg2, 0);
598} 632}
633UASM_EXPORT_SYMBOL(uasm_il_bne);
599 634
600void __cpuinit 635void __uasminit
601uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 636uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
602{ 637{
603 uasm_r_mips_pc16(r, *p, lid); 638 uasm_r_mips_pc16(r, *p, lid);
604 uasm_i_bnez(p, reg, 0); 639 uasm_i_bnez(p, reg, 0);
605} 640}
641UASM_EXPORT_SYMBOL(uasm_il_bnez);
606 642
607void __cpuinit 643void __uasminit
608uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 644uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
609{ 645{
610 uasm_r_mips_pc16(r, *p, lid); 646 uasm_r_mips_pc16(r, *p, lid);
611 uasm_i_bgezl(p, reg, 0); 647 uasm_i_bgezl(p, reg, 0);
612} 648}
649UASM_EXPORT_SYMBOL(uasm_il_bgezl);
613 650
614void __cpuinit 651void __uasminit
615uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 652uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
616{ 653{
617 uasm_r_mips_pc16(r, *p, lid); 654 uasm_r_mips_pc16(r, *p, lid);
618 uasm_i_bgez(p, reg, 0); 655 uasm_i_bgez(p, reg, 0);
619} 656}
657UASM_EXPORT_SYMBOL(uasm_il_bgez);
658
659void __uasminit
660uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
661 unsigned int bit, int lid)
662{
663 uasm_r_mips_pc16(r, *p, lid);
664 uasm_i_bbit0(p, reg, bit, 0);
665}
666UASM_EXPORT_SYMBOL(uasm_il_bbit0);
667
668void __uasminit
669uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
670 unsigned int bit, int lid)
671{
672 uasm_r_mips_pc16(r, *p, lid);
673 uasm_i_bbit1(p, reg, bit, 0);
674}
675UASM_EXPORT_SYMBOL(uasm_il_bbit1);