diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/fault.c | 13 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 85 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 83 |
3 files changed, 141 insertions, 40 deletions
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 2d9624fd10ec..e3a617224868 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -157,7 +157,6 @@ no_context: | |||
157 | * Oops. The kernel tried to access some bad page. We'll have to | 157 | * Oops. The kernel tried to access some bad page. We'll have to |
158 | * terminate things with extreme prejudice. | 158 | * terminate things with extreme prejudice. |
159 | */ | 159 | */ |
160 | |||
161 | bust_spinlocks(1); | 160 | bust_spinlocks(1); |
162 | 161 | ||
163 | printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " | 162 | printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " |
@@ -188,11 +187,20 @@ do_sigbus: | |||
188 | /* Kernel mode? Handle exceptions or die */ | 187 | /* Kernel mode? Handle exceptions or die */ |
189 | if (!user_mode(regs)) | 188 | if (!user_mode(regs)) |
190 | goto no_context; | 189 | goto no_context; |
191 | 190 | else | |
192 | /* | 191 | /* |
193 | * Send a sigbus, regardless of whether we were in kernel | 192 | * Send a sigbus, regardless of whether we were in kernel |
194 | * or user mode. | 193 | * or user mode. |
195 | */ | 194 | */ |
195 | #if 0 | ||
196 | printk("do_page_fault() #3: sending SIGBUS to %s for " | ||
197 | "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", | ||
198 | tsk->comm, | ||
199 | write ? "write access to" : "read access from", | ||
200 | field, address, | ||
201 | field, (unsigned long) regs->cp0_epc, | ||
202 | field, (unsigned long) regs->regs[31]); | ||
203 | #endif | ||
196 | tsk->thread.cp0_badvaddr = address; | 204 | tsk->thread.cp0_badvaddr = address; |
197 | info.si_signo = SIGBUS; | 205 | info.si_signo = SIGBUS; |
198 | info.si_errno = 0; | 206 | info.si_errno = 0; |
@@ -201,7 +209,6 @@ do_sigbus: | |||
201 | force_sig_info(SIGBUS, &info, tsk); | 209 | force_sig_info(SIGBUS, &info, tsk); |
202 | 210 | ||
203 | return; | 211 | return; |
204 | |||
205 | vmalloc_fault: | 212 | vmalloc_fault: |
206 | { | 213 | { |
207 | /* | 214 | /* |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index a865f2394cb0..9dca099ba16b 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -32,13 +32,35 @@ extern void build_tlb_refill_handler(void); | |||
32 | "nop; nop; nop; nop; nop; nop;\n\t" \ | 32 | "nop; nop; nop; nop; nop; nop;\n\t" \ |
33 | ".set reorder\n\t") | 33 | ".set reorder\n\t") |
34 | 34 | ||
35 | /* Atomicity and interruptability */ | ||
36 | #ifdef CONFIG_MIPS_MT_SMTC | ||
37 | |||
38 | #include <asm/smtc.h> | ||
39 | #include <asm/mipsmtregs.h> | ||
40 | |||
41 | #define ENTER_CRITICAL(flags) \ | ||
42 | { \ | ||
43 | unsigned int mvpflags; \ | ||
44 | local_irq_save(flags);\ | ||
45 | mvpflags = dvpe() | ||
46 | #define EXIT_CRITICAL(flags) \ | ||
47 | evpe(mvpflags); \ | ||
48 | local_irq_restore(flags); \ | ||
49 | } | ||
50 | #else | ||
51 | |||
52 | #define ENTER_CRITICAL(flags) local_irq_save(flags) | ||
53 | #define EXIT_CRITICAL(flags) local_irq_restore(flags) | ||
54 | |||
55 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
56 | |||
35 | void local_flush_tlb_all(void) | 57 | void local_flush_tlb_all(void) |
36 | { | 58 | { |
37 | unsigned long flags; | 59 | unsigned long flags; |
38 | unsigned long old_ctx; | 60 | unsigned long old_ctx; |
39 | int entry; | 61 | int entry; |
40 | 62 | ||
41 | local_irq_save(flags); | 63 | ENTER_CRITICAL(flags); |
42 | /* Save old context and create impossible VPN2 value */ | 64 | /* Save old context and create impossible VPN2 value */ |
43 | old_ctx = read_c0_entryhi(); | 65 | old_ctx = read_c0_entryhi(); |
44 | write_c0_entrylo0(0); | 66 | write_c0_entrylo0(0); |
@@ -57,7 +79,7 @@ void local_flush_tlb_all(void) | |||
57 | } | 79 | } |
58 | tlbw_use_hazard(); | 80 | tlbw_use_hazard(); |
59 | write_c0_entryhi(old_ctx); | 81 | write_c0_entryhi(old_ctx); |
60 | local_irq_restore(flags); | 82 | EXIT_CRITICAL(flags); |
61 | } | 83 | } |
62 | 84 | ||
63 | /* All entries common to a mm share an asid. To effectively flush | 85 | /* All entries common to a mm share an asid. To effectively flush |
@@ -87,6 +109,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
87 | unsigned long flags; | 109 | unsigned long flags; |
88 | int size; | 110 | int size; |
89 | 111 | ||
112 | ENTER_CRITICAL(flags); | ||
90 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 113 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
91 | size = (size + 1) >> 1; | 114 | size = (size + 1) >> 1; |
92 | local_irq_save(flags); | 115 | local_irq_save(flags); |
@@ -120,7 +143,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
120 | } else { | 143 | } else { |
121 | drop_mmu_context(mm, cpu); | 144 | drop_mmu_context(mm, cpu); |
122 | } | 145 | } |
123 | local_irq_restore(flags); | 146 | EXIT_CRITICAL(flags); |
124 | } | 147 | } |
125 | } | 148 | } |
126 | 149 | ||
@@ -129,9 +152,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
129 | unsigned long flags; | 152 | unsigned long flags; |
130 | int size; | 153 | int size; |
131 | 154 | ||
155 | ENTER_CRITICAL(flags); | ||
132 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 156 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
133 | size = (size + 1) >> 1; | 157 | size = (size + 1) >> 1; |
134 | local_irq_save(flags); | ||
135 | if (size <= current_cpu_data.tlbsize / 2) { | 158 | if (size <= current_cpu_data.tlbsize / 2) { |
136 | int pid = read_c0_entryhi(); | 159 | int pid = read_c0_entryhi(); |
137 | 160 | ||
@@ -162,7 +185,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
162 | } else { | 185 | } else { |
163 | local_flush_tlb_all(); | 186 | local_flush_tlb_all(); |
164 | } | 187 | } |
165 | local_irq_restore(flags); | 188 | EXIT_CRITICAL(flags); |
166 | } | 189 | } |
167 | 190 | ||
168 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 191 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
@@ -175,7 +198,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
175 | 198 | ||
176 | newpid = cpu_asid(cpu, vma->vm_mm); | 199 | newpid = cpu_asid(cpu, vma->vm_mm); |
177 | page &= (PAGE_MASK << 1); | 200 | page &= (PAGE_MASK << 1); |
178 | local_irq_save(flags); | 201 | ENTER_CRITICAL(flags); |
179 | oldpid = read_c0_entryhi(); | 202 | oldpid = read_c0_entryhi(); |
180 | write_c0_entryhi(page | newpid); | 203 | write_c0_entryhi(page | newpid); |
181 | mtc0_tlbw_hazard(); | 204 | mtc0_tlbw_hazard(); |
@@ -194,7 +217,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
194 | 217 | ||
195 | finish: | 218 | finish: |
196 | write_c0_entryhi(oldpid); | 219 | write_c0_entryhi(oldpid); |
197 | local_irq_restore(flags); | 220 | EXIT_CRITICAL(flags); |
198 | } | 221 | } |
199 | } | 222 | } |
200 | 223 | ||
@@ -207,7 +230,7 @@ void local_flush_tlb_one(unsigned long page) | |||
207 | unsigned long flags; | 230 | unsigned long flags; |
208 | int oldpid, idx; | 231 | int oldpid, idx; |
209 | 232 | ||
210 | local_irq_save(flags); | 233 | ENTER_CRITICAL(flags); |
211 | oldpid = read_c0_entryhi(); | 234 | oldpid = read_c0_entryhi(); |
212 | page &= (PAGE_MASK << 1); | 235 | page &= (PAGE_MASK << 1); |
213 | write_c0_entryhi(page); | 236 | write_c0_entryhi(page); |
@@ -226,7 +249,7 @@ void local_flush_tlb_one(unsigned long page) | |||
226 | } | 249 | } |
227 | write_c0_entryhi(oldpid); | 250 | write_c0_entryhi(oldpid); |
228 | 251 | ||
229 | local_irq_restore(flags); | 252 | EXIT_CRITICAL(flags); |
230 | } | 253 | } |
231 | 254 | ||
232 | /* | 255 | /* |
@@ -249,7 +272,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
249 | if (current->active_mm != vma->vm_mm) | 272 | if (current->active_mm != vma->vm_mm) |
250 | return; | 273 | return; |
251 | 274 | ||
252 | local_irq_save(flags); | 275 | ENTER_CRITICAL(flags); |
253 | 276 | ||
254 | pid = read_c0_entryhi() & ASID_MASK; | 277 | pid = read_c0_entryhi() & ASID_MASK; |
255 | address &= (PAGE_MASK << 1); | 278 | address &= (PAGE_MASK << 1); |
@@ -277,7 +300,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
277 | else | 300 | else |
278 | tlb_write_indexed(); | 301 | tlb_write_indexed(); |
279 | tlbw_use_hazard(); | 302 | tlbw_use_hazard(); |
280 | local_irq_restore(flags); | 303 | EXIT_CRITICAL(flags); |
281 | } | 304 | } |
282 | 305 | ||
283 | #if 0 | 306 | #if 0 |
@@ -291,7 +314,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma, | |||
291 | pte_t *ptep; | 314 | pte_t *ptep; |
292 | int idx; | 315 | int idx; |
293 | 316 | ||
294 | local_irq_save(flags); | 317 | ENTER_CRITICAL(flags); |
295 | address &= (PAGE_MASK << 1); | 318 | address &= (PAGE_MASK << 1); |
296 | asid = read_c0_entryhi() & ASID_MASK; | 319 | asid = read_c0_entryhi() & ASID_MASK; |
297 | write_c0_entryhi(address | asid); | 320 | write_c0_entryhi(address | asid); |
@@ -310,7 +333,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma, | |||
310 | else | 333 | else |
311 | tlb_write_indexed(); | 334 | tlb_write_indexed(); |
312 | tlbw_use_hazard(); | 335 | tlbw_use_hazard(); |
313 | local_irq_restore(flags); | 336 | EXIT_CRITICAL(flags); |
314 | } | 337 | } |
315 | #endif | 338 | #endif |
316 | 339 | ||
@@ -322,7 +345,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
322 | unsigned long old_pagemask; | 345 | unsigned long old_pagemask; |
323 | unsigned long old_ctx; | 346 | unsigned long old_ctx; |
324 | 347 | ||
325 | local_irq_save(flags); | 348 | ENTER_CRITICAL(flags); |
326 | /* Save old context and create impossible VPN2 value */ | 349 | /* Save old context and create impossible VPN2 value */ |
327 | old_ctx = read_c0_entryhi(); | 350 | old_ctx = read_c0_entryhi(); |
328 | old_pagemask = read_c0_pagemask(); | 351 | old_pagemask = read_c0_pagemask(); |
@@ -342,7 +365,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
342 | BARRIER; | 365 | BARRIER; |
343 | write_c0_pagemask(old_pagemask); | 366 | write_c0_pagemask(old_pagemask); |
344 | local_flush_tlb_all(); | 367 | local_flush_tlb_all(); |
345 | local_irq_restore(flags); | 368 | EXIT_CRITICAL(flags); |
346 | } | 369 | } |
347 | 370 | ||
348 | /* | 371 | /* |
@@ -362,7 +385,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
362 | unsigned long old_pagemask; | 385 | unsigned long old_pagemask; |
363 | unsigned long old_ctx; | 386 | unsigned long old_ctx; |
364 | 387 | ||
365 | local_irq_save(flags); | 388 | ENTER_CRITICAL(flags); |
366 | /* Save old context and create impossible VPN2 value */ | 389 | /* Save old context and create impossible VPN2 value */ |
367 | old_ctx = read_c0_entryhi(); | 390 | old_ctx = read_c0_entryhi(); |
368 | old_pagemask = read_c0_pagemask(); | 391 | old_pagemask = read_c0_pagemask(); |
@@ -386,10 +409,11 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
386 | write_c0_entryhi(old_ctx); | 409 | write_c0_entryhi(old_ctx); |
387 | write_c0_pagemask(old_pagemask); | 410 | write_c0_pagemask(old_pagemask); |
388 | out: | 411 | out: |
389 | local_irq_restore(flags); | 412 | EXIT_CRITICAL(flags); |
390 | return ret; | 413 | return ret; |
391 | } | 414 | } |
392 | 415 | ||
416 | extern void __init sanitize_tlb_entries(void); | ||
393 | static void __init probe_tlb(unsigned long config) | 417 | static void __init probe_tlb(unsigned long config) |
394 | { | 418 | { |
395 | struct cpuinfo_mips *c = ¤t_cpu_data; | 419 | struct cpuinfo_mips *c = ¤t_cpu_data; |
@@ -402,6 +426,14 @@ static void __init probe_tlb(unsigned long config) | |||
402 | */ | 426 | */ |
403 | if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY) | 427 | if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY) |
404 | return; | 428 | return; |
429 | #ifdef CONFIG_MIPS_MT_SMTC | ||
430 | /* | ||
431 | * If TLB is shared in SMTC system, total size already | ||
432 | * has been calculated and written into cpu_data tlbsize | ||
433 | */ | ||
434 | if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED) | ||
435 | return; | ||
436 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
405 | 437 | ||
406 | reg = read_c0_config1(); | 438 | reg = read_c0_config1(); |
407 | if (!((config >> 7) & 3)) | 439 | if (!((config >> 7) & 3)) |
@@ -410,6 +442,15 @@ static void __init probe_tlb(unsigned long config) | |||
410 | c->tlbsize = ((reg >> 25) & 0x3f) + 1; | 442 | c->tlbsize = ((reg >> 25) & 0x3f) + 1; |
411 | } | 443 | } |
412 | 444 | ||
445 | static int __initdata ntlb = 0; | ||
446 | static int __init set_ntlb(char *str) | ||
447 | { | ||
448 | get_option(&str, &ntlb); | ||
449 | return 1; | ||
450 | } | ||
451 | |||
452 | __setup("ntlb=", set_ntlb); | ||
453 | |||
413 | void __init tlb_init(void) | 454 | void __init tlb_init(void) |
414 | { | 455 | { |
415 | unsigned int config = read_c0_config(); | 456 | unsigned int config = read_c0_config(); |
@@ -432,5 +473,15 @@ void __init tlb_init(void) | |||
432 | 473 | ||
433 | /* Did I tell you that ARC SUCKS? */ | 474 | /* Did I tell you that ARC SUCKS? */ |
434 | 475 | ||
476 | if (ntlb) { | ||
477 | if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { | ||
478 | int wired = current_cpu_data.tlbsize - ntlb; | ||
479 | write_c0_wired(wired); | ||
480 | write_c0_index(wired-1); | ||
481 | printk ("Restricting TLB to %d entries\n", ntlb); | ||
482 | } else | ||
483 | printk("Ignoring invalid argument ntlb=%d\n", ntlb); | ||
484 | } | ||
485 | |||
435 | build_tlb_refill_handler(); | 486 | build_tlb_refill_handler(); |
436 | } | 487 | } |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index c5eea6ae12ca..053dbacac56b 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -7,6 +7,16 @@ | |||
7 | * | 7 | * |
8 | * Copyright (C) 2004,2005 by Thiemo Seufer | 8 | * Copyright (C) 2004,2005 by Thiemo Seufer |
9 | * Copyright (C) 2005 Maciej W. Rozycki | 9 | * Copyright (C) 2005 Maciej W. Rozycki |
10 | * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) | ||
11 | * | ||
12 | * ... and the days got worse and worse and now you see | ||
13 | * I've gone completly out of my mind. | ||
14 | * | ||
15 | * They're coming to take me a away haha | ||
16 | * they're coming to take me a away hoho hihi haha | ||
17 | * to the funny farm where code is beautiful all the time ... | ||
18 | * | ||
19 | * (Condolences to Napoleon XIV) | ||
10 | */ | 20 | */ |
11 | 21 | ||
12 | #include <stdarg.h> | 22 | #include <stdarg.h> |
@@ -68,6 +78,7 @@ enum fields | |||
68 | BIMM = 0x040, | 78 | BIMM = 0x040, |
69 | JIMM = 0x080, | 79 | JIMM = 0x080, |
70 | FUNC = 0x100, | 80 | FUNC = 0x100, |
81 | SET = 0x200 | ||
71 | }; | 82 | }; |
72 | 83 | ||
73 | #define OP_MASK 0x2f | 84 | #define OP_MASK 0x2f |
@@ -86,6 +97,8 @@ enum fields | |||
86 | #define JIMM_SH 0 | 97 | #define JIMM_SH 0 |
87 | #define FUNC_MASK 0x2f | 98 | #define FUNC_MASK 0x2f |
88 | #define FUNC_SH 0 | 99 | #define FUNC_SH 0 |
100 | #define SET_MASK 0x7 | ||
101 | #define SET_SH 0 | ||
89 | 102 | ||
90 | enum opcode { | 103 | enum opcode { |
91 | insn_invalid, | 104 | insn_invalid, |
@@ -129,8 +142,8 @@ static __initdata struct insn insn_table[] = { | |||
129 | { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM }, | 142 | { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM }, |
130 | { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM }, | 143 | { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM }, |
131 | { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD }, | 144 | { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD }, |
132 | { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD }, | 145 | { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET}, |
133 | { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD }, | 146 | { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET}, |
134 | { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE }, | 147 | { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE }, |
135 | { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, | 148 | { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, |
136 | { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, | 149 | { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, |
@@ -145,8 +158,8 @@ static __initdata struct insn insn_table[] = { | |||
145 | { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM }, | 158 | { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM }, |
146 | { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM }, | 159 | { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM }, |
147 | { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM }, | 160 | { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM }, |
148 | { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD }, | 161 | { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET}, |
149 | { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD }, | 162 | { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET}, |
150 | { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM }, | 163 | { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM }, |
151 | { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 }, | 164 | { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 }, |
152 | { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM }, | 165 | { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM }, |
@@ -242,6 +255,14 @@ static __init u32 build_func(u32 arg) | |||
242 | return arg & FUNC_MASK; | 255 | return arg & FUNC_MASK; |
243 | } | 256 | } |
244 | 257 | ||
258 | static __init u32 build_set(u32 arg) | ||
259 | { | ||
260 | if (arg & ~SET_MASK) | ||
261 | printk(KERN_WARNING "TLB synthesizer field overflow\n"); | ||
262 | |||
263 | return arg & SET_MASK; | ||
264 | } | ||
265 | |||
245 | /* | 266 | /* |
246 | * The order of opcode arguments is implicitly left to right, | 267 | * The order of opcode arguments is implicitly left to right, |
247 | * starting with RS and ending with FUNC or IMM. | 268 | * starting with RS and ending with FUNC or IMM. |
@@ -273,6 +294,7 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...) | |||
273 | if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); | 294 | if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); |
274 | if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); | 295 | if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); |
275 | if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); | 296 | if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); |
297 | if (ip->fields & SET) op |= build_set(va_arg(ap, u32)); | ||
276 | va_end(ap); | 298 | va_end(ap); |
277 | 299 | ||
278 | **buf = op; | 300 | **buf = op; |
@@ -358,8 +380,8 @@ I_u1s2(_bgezl); | |||
358 | I_u1s2(_bltz); | 380 | I_u1s2(_bltz); |
359 | I_u1s2(_bltzl); | 381 | I_u1s2(_bltzl); |
360 | I_u1u2s3(_bne); | 382 | I_u1u2s3(_bne); |
361 | I_u1u2(_dmfc0); | 383 | I_u1u2u3(_dmfc0); |
362 | I_u1u2(_dmtc0); | 384 | I_u1u2u3(_dmtc0); |
363 | I_u2u1s3(_daddiu); | 385 | I_u2u1s3(_daddiu); |
364 | I_u3u1u2(_daddu); | 386 | I_u3u1u2(_daddu); |
365 | I_u2u1u3(_dsll); | 387 | I_u2u1u3(_dsll); |
@@ -376,8 +398,8 @@ I_u2s3u1(_ll); | |||
376 | I_u2s3u1(_lld); | 398 | I_u2s3u1(_lld); |
377 | I_u1s2(_lui); | 399 | I_u1s2(_lui); |
378 | I_u2s3u1(_lw); | 400 | I_u2s3u1(_lw); |
379 | I_u1u2(_mfc0); | 401 | I_u1u2u3(_mfc0); |
380 | I_u1u2(_mtc0); | 402 | I_u1u2u3(_mtc0); |
381 | I_u2u1u3(_ori); | 403 | I_u2u1u3(_ori); |
382 | I_0(_rfe); | 404 | I_0(_rfe); |
383 | I_u2s3u1(_sc); | 405 | I_u2s3u1(_sc); |
@@ -451,8 +473,8 @@ L_LA(_r3000_write_probe_fail) | |||
451 | # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh) | 473 | # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh) |
452 | # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh) | 474 | # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh) |
453 | # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh) | 475 | # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh) |
454 | # define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd) | 476 | # define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd) |
455 | # define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd) | 477 | # define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd) |
456 | # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val) | 478 | # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val) |
457 | # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd) | 479 | # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd) |
458 | # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd) | 480 | # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd) |
@@ -464,8 +486,8 @@ L_LA(_r3000_write_probe_fail) | |||
464 | # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh) | 486 | # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh) |
465 | # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh) | 487 | # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh) |
466 | # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh) | 488 | # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh) |
467 | # define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd) | 489 | # define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd) |
468 | # define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd) | 490 | # define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd) |
469 | # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val) | 491 | # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val) |
470 | # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd) | 492 | # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd) |
471 | # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd) | 493 | # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd) |
@@ -670,14 +692,15 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg, | |||
670 | #define K1 27 | 692 | #define K1 27 |
671 | 693 | ||
672 | /* Some CP0 registers */ | 694 | /* Some CP0 registers */ |
673 | #define C0_INDEX 0 | 695 | #define C0_INDEX 0, 0 |
674 | #define C0_ENTRYLO0 2 | 696 | #define C0_ENTRYLO0 2, 0 |
675 | #define C0_ENTRYLO1 3 | 697 | #define C0_TCBIND 2, 2 |
676 | #define C0_CONTEXT 4 | 698 | #define C0_ENTRYLO1 3, 0 |
677 | #define C0_BADVADDR 8 | 699 | #define C0_CONTEXT 4, 0 |
678 | #define C0_ENTRYHI 10 | 700 | #define C0_BADVADDR 8, 0 |
679 | #define C0_EPC 14 | 701 | #define C0_ENTRYHI 10, 0 |
680 | #define C0_XCONTEXT 20 | 702 | #define C0_EPC 14, 0 |
703 | #define C0_XCONTEXT 20, 0 | ||
681 | 704 | ||
682 | #ifdef CONFIG_64BIT | 705 | #ifdef CONFIG_64BIT |
683 | # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT) | 706 | # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT) |
@@ -951,12 +974,20 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r, | |||
951 | /* No i_nop needed here, since the next insn doesn't touch TMP. */ | 974 | /* No i_nop needed here, since the next insn doesn't touch TMP. */ |
952 | 975 | ||
953 | #ifdef CONFIG_SMP | 976 | #ifdef CONFIG_SMP |
977 | # ifdef CONFIG_MIPS_MT_SMTC | ||
978 | /* | ||
979 | * SMTC uses TCBind value as "CPU" index | ||
980 | */ | ||
981 | i_mfc0(p, ptr, C0_TCBIND); | ||
982 | i_dsrl(p, ptr, ptr, 19); | ||
983 | # else | ||
954 | /* | 984 | /* |
955 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 | 985 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 |
956 | * stored in CONTEXT. | 986 | * stored in CONTEXT. |
957 | */ | 987 | */ |
958 | i_dmfc0(p, ptr, C0_CONTEXT); | 988 | i_dmfc0(p, ptr, C0_CONTEXT); |
959 | i_dsrl(p, ptr, ptr, 23); | 989 | i_dsrl(p, ptr, ptr, 23); |
990 | #endif | ||
960 | i_LA_mostly(p, tmp, pgdc); | 991 | i_LA_mostly(p, tmp, pgdc); |
961 | i_daddu(p, ptr, ptr, tmp); | 992 | i_daddu(p, ptr, ptr, tmp); |
962 | i_dmfc0(p, tmp, C0_BADVADDR); | 993 | i_dmfc0(p, tmp, C0_BADVADDR); |
@@ -1014,9 +1045,21 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) | |||
1014 | 1045 | ||
1015 | /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ | 1046 | /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ |
1016 | #ifdef CONFIG_SMP | 1047 | #ifdef CONFIG_SMP |
1048 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1049 | /* | ||
1050 | * SMTC uses TCBind value as "CPU" index | ||
1051 | */ | ||
1052 | i_mfc0(p, ptr, C0_TCBIND); | ||
1053 | i_LA_mostly(p, tmp, pgdc); | ||
1054 | i_srl(p, ptr, ptr, 19); | ||
1055 | #else | ||
1056 | /* | ||
1057 | * smp_processor_id() << 3 is stored in CONTEXT. | ||
1058 | */ | ||
1017 | i_mfc0(p, ptr, C0_CONTEXT); | 1059 | i_mfc0(p, ptr, C0_CONTEXT); |
1018 | i_LA_mostly(p, tmp, pgdc); | 1060 | i_LA_mostly(p, tmp, pgdc); |
1019 | i_srl(p, ptr, ptr, 23); | 1061 | i_srl(p, ptr, ptr, 23); |
1062 | #endif | ||
1020 | i_addu(p, ptr, tmp, ptr); | 1063 | i_addu(p, ptr, tmp, ptr); |
1021 | #else | 1064 | #else |
1022 | i_LA_mostly(p, ptr, pgdc); | 1065 | i_LA_mostly(p, ptr, pgdc); |