diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/asm-offsets.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/irq-msc01.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/pm-cps.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/r4k_fpu.S | 213 | ||||
-rw-r--r-- | arch/mips/kernel/signal.c | 79 | ||||
-rw-r--r-- | arch/mips/kernel/signal32.c | 74 | ||||
-rw-r--r-- | arch/mips/kernel/smp-cps.c | 2 |
7 files changed, 20 insertions, 357 deletions
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 02f075df8f2e..4bb5107511e2 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -293,7 +293,6 @@ void output_sc_defines(void) | |||
293 | OFFSET(SC_LO2, sigcontext, sc_lo2); | 293 | OFFSET(SC_LO2, sigcontext, sc_lo2); |
294 | OFFSET(SC_HI3, sigcontext, sc_hi3); | 294 | OFFSET(SC_HI3, sigcontext, sc_hi3); |
295 | OFFSET(SC_LO3, sigcontext, sc_lo3); | 295 | OFFSET(SC_LO3, sigcontext, sc_lo3); |
296 | OFFSET(SC_MSAREGS, sigcontext, sc_msaregs); | ||
297 | BLANK(); | 296 | BLANK(); |
298 | } | 297 | } |
299 | #endif | 298 | #endif |
@@ -308,7 +307,6 @@ void output_sc_defines(void) | |||
308 | OFFSET(SC_MDLO, sigcontext, sc_mdlo); | 307 | OFFSET(SC_MDLO, sigcontext, sc_mdlo); |
309 | OFFSET(SC_PC, sigcontext, sc_pc); | 308 | OFFSET(SC_PC, sigcontext, sc_pc); |
310 | OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); | 309 | OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); |
311 | OFFSET(SC_MSAREGS, sigcontext, sc_msaregs); | ||
312 | BLANK(); | 310 | BLANK(); |
313 | } | 311 | } |
314 | #endif | 312 | #endif |
@@ -320,7 +318,6 @@ void output_sc32_defines(void) | |||
320 | OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); | 318 | OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); |
321 | OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); | 319 | OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); |
322 | OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); | 320 | OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); |
323 | OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs); | ||
324 | BLANK(); | 321 | BLANK(); |
325 | } | 322 | } |
326 | #endif | 323 | #endif |
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 4858642d543d..a734b2c2f9ea 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -126,7 +126,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma | |||
126 | 126 | ||
127 | board_bind_eic_interrupt = &msc_bind_eic_interrupt; | 127 | board_bind_eic_interrupt = &msc_bind_eic_interrupt; |
128 | 128 | ||
129 | for (; nirq >= 0; nirq--, imp++) { | 129 | for (; nirq > 0; nirq--, imp++) { |
130 | int n = imp->im_irq; | 130 | int n = imp->im_irq; |
131 | 131 | ||
132 | switch (imp->im_type) { | 132 | switch (imp->im_type) { |
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 5aa4c6f8cf83..c4c2069d3a20 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c | |||
@@ -101,7 +101,7 @@ static void coupled_barrier(atomic_t *a, unsigned online) | |||
101 | if (!coupled_coherence) | 101 | if (!coupled_coherence) |
102 | return; | 102 | return; |
103 | 103 | ||
104 | smp_mb__before_atomic_inc(); | 104 | smp_mb__before_atomic(); |
105 | atomic_inc(a); | 105 | atomic_inc(a); |
106 | 106 | ||
107 | while (atomic_read(a) < online) | 107 | while (atomic_read(a) < online) |
@@ -158,7 +158,7 @@ int cps_pm_enter_state(enum cps_pm_state state) | |||
158 | 158 | ||
159 | /* Indicate that this CPU might not be coherent */ | 159 | /* Indicate that this CPU might not be coherent */ |
160 | cpumask_clear_cpu(cpu, &cpu_coherent_mask); | 160 | cpumask_clear_cpu(cpu, &cpu_coherent_mask); |
161 | smp_mb__after_clear_bit(); | 161 | smp_mb__after_atomic(); |
162 | 162 | ||
163 | /* Create a non-coherent mapping of the core ready_count */ | 163 | /* Create a non-coherent mapping of the core ready_count */ |
164 | core_ready_count = per_cpu(ready_count, core); | 164 | core_ready_count = per_cpu(ready_count, core); |
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 71814272d148..8352523568e6 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S | |||
@@ -13,7 +13,6 @@ | |||
13 | * Copyright (C) 1999, 2001 Silicon Graphics, Inc. | 13 | * Copyright (C) 1999, 2001 Silicon Graphics, Inc. |
14 | */ | 14 | */ |
15 | #include <asm/asm.h> | 15 | #include <asm/asm.h> |
16 | #include <asm/asmmacro.h> | ||
17 | #include <asm/errno.h> | 16 | #include <asm/errno.h> |
18 | #include <asm/fpregdef.h> | 17 | #include <asm/fpregdef.h> |
19 | #include <asm/mipsregs.h> | 18 | #include <asm/mipsregs.h> |
@@ -246,218 +245,6 @@ LEAF(_restore_fp_context32) | |||
246 | END(_restore_fp_context32) | 245 | END(_restore_fp_context32) |
247 | #endif | 246 | #endif |
248 | 247 | ||
249 | #ifdef CONFIG_CPU_HAS_MSA | ||
250 | |||
251 | .macro save_sc_msareg wr, off, sc, tmp | ||
252 | #ifdef CONFIG_64BIT | ||
253 | copy_u_d \tmp, \wr, 1 | ||
254 | EX sd \tmp, (\off+(\wr*8))(\sc) | ||
255 | #elif defined(CONFIG_CPU_LITTLE_ENDIAN) | ||
256 | copy_u_w \tmp, \wr, 2 | ||
257 | EX sw \tmp, (\off+(\wr*8)+0)(\sc) | ||
258 | copy_u_w \tmp, \wr, 3 | ||
259 | EX sw \tmp, (\off+(\wr*8)+4)(\sc) | ||
260 | #else /* CONFIG_CPU_BIG_ENDIAN */ | ||
261 | copy_u_w \tmp, \wr, 2 | ||
262 | EX sw \tmp, (\off+(\wr*8)+4)(\sc) | ||
263 | copy_u_w \tmp, \wr, 3 | ||
264 | EX sw \tmp, (\off+(\wr*8)+0)(\sc) | ||
265 | #endif | ||
266 | .endm | ||
267 | |||
268 | /* | ||
269 | * int _save_msa_context(struct sigcontext *sc) | ||
270 | * | ||
271 | * Save the upper 64 bits of each vector register along with the MSA_CSR | ||
272 | * register into sc. Returns zero on success, else non-zero. | ||
273 | */ | ||
274 | LEAF(_save_msa_context) | ||
275 | save_sc_msareg 0, SC_MSAREGS, a0, t0 | ||
276 | save_sc_msareg 1, SC_MSAREGS, a0, t0 | ||
277 | save_sc_msareg 2, SC_MSAREGS, a0, t0 | ||
278 | save_sc_msareg 3, SC_MSAREGS, a0, t0 | ||
279 | save_sc_msareg 4, SC_MSAREGS, a0, t0 | ||
280 | save_sc_msareg 5, SC_MSAREGS, a0, t0 | ||
281 | save_sc_msareg 6, SC_MSAREGS, a0, t0 | ||
282 | save_sc_msareg 7, SC_MSAREGS, a0, t0 | ||
283 | save_sc_msareg 8, SC_MSAREGS, a0, t0 | ||
284 | save_sc_msareg 9, SC_MSAREGS, a0, t0 | ||
285 | save_sc_msareg 10, SC_MSAREGS, a0, t0 | ||
286 | save_sc_msareg 11, SC_MSAREGS, a0, t0 | ||
287 | save_sc_msareg 12, SC_MSAREGS, a0, t0 | ||
288 | save_sc_msareg 13, SC_MSAREGS, a0, t0 | ||
289 | save_sc_msareg 14, SC_MSAREGS, a0, t0 | ||
290 | save_sc_msareg 15, SC_MSAREGS, a0, t0 | ||
291 | save_sc_msareg 16, SC_MSAREGS, a0, t0 | ||
292 | save_sc_msareg 17, SC_MSAREGS, a0, t0 | ||
293 | save_sc_msareg 18, SC_MSAREGS, a0, t0 | ||
294 | save_sc_msareg 19, SC_MSAREGS, a0, t0 | ||
295 | save_sc_msareg 20, SC_MSAREGS, a0, t0 | ||
296 | save_sc_msareg 21, SC_MSAREGS, a0, t0 | ||
297 | save_sc_msareg 22, SC_MSAREGS, a0, t0 | ||
298 | save_sc_msareg 23, SC_MSAREGS, a0, t0 | ||
299 | save_sc_msareg 24, SC_MSAREGS, a0, t0 | ||
300 | save_sc_msareg 25, SC_MSAREGS, a0, t0 | ||
301 | save_sc_msareg 26, SC_MSAREGS, a0, t0 | ||
302 | save_sc_msareg 27, SC_MSAREGS, a0, t0 | ||
303 | save_sc_msareg 28, SC_MSAREGS, a0, t0 | ||
304 | save_sc_msareg 29, SC_MSAREGS, a0, t0 | ||
305 | save_sc_msareg 30, SC_MSAREGS, a0, t0 | ||
306 | save_sc_msareg 31, SC_MSAREGS, a0, t0 | ||
307 | jr ra | ||
308 | li v0, 0 | ||
309 | END(_save_msa_context) | ||
310 | |||
311 | #ifdef CONFIG_MIPS32_COMPAT | ||
312 | |||
313 | /* | ||
314 | * int _save_msa_context32(struct sigcontext32 *sc) | ||
315 | * | ||
316 | * Save the upper 64 bits of each vector register along with the MSA_CSR | ||
317 | * register into sc. Returns zero on success, else non-zero. | ||
318 | */ | ||
319 | LEAF(_save_msa_context32) | ||
320 | save_sc_msareg 0, SC32_MSAREGS, a0, t0 | ||
321 | save_sc_msareg 1, SC32_MSAREGS, a0, t0 | ||
322 | save_sc_msareg 2, SC32_MSAREGS, a0, t0 | ||
323 | save_sc_msareg 3, SC32_MSAREGS, a0, t0 | ||
324 | save_sc_msareg 4, SC32_MSAREGS, a0, t0 | ||
325 | save_sc_msareg 5, SC32_MSAREGS, a0, t0 | ||
326 | save_sc_msareg 6, SC32_MSAREGS, a0, t0 | ||
327 | save_sc_msareg 7, SC32_MSAREGS, a0, t0 | ||
328 | save_sc_msareg 8, SC32_MSAREGS, a0, t0 | ||
329 | save_sc_msareg 9, SC32_MSAREGS, a0, t0 | ||
330 | save_sc_msareg 10, SC32_MSAREGS, a0, t0 | ||
331 | save_sc_msareg 11, SC32_MSAREGS, a0, t0 | ||
332 | save_sc_msareg 12, SC32_MSAREGS, a0, t0 | ||
333 | save_sc_msareg 13, SC32_MSAREGS, a0, t0 | ||
334 | save_sc_msareg 14, SC32_MSAREGS, a0, t0 | ||
335 | save_sc_msareg 15, SC32_MSAREGS, a0, t0 | ||
336 | save_sc_msareg 16, SC32_MSAREGS, a0, t0 | ||
337 | save_sc_msareg 17, SC32_MSAREGS, a0, t0 | ||
338 | save_sc_msareg 18, SC32_MSAREGS, a0, t0 | ||
339 | save_sc_msareg 19, SC32_MSAREGS, a0, t0 | ||
340 | save_sc_msareg 20, SC32_MSAREGS, a0, t0 | ||
341 | save_sc_msareg 21, SC32_MSAREGS, a0, t0 | ||
342 | save_sc_msareg 22, SC32_MSAREGS, a0, t0 | ||
343 | save_sc_msareg 23, SC32_MSAREGS, a0, t0 | ||
344 | save_sc_msareg 24, SC32_MSAREGS, a0, t0 | ||
345 | save_sc_msareg 25, SC32_MSAREGS, a0, t0 | ||
346 | save_sc_msareg 26, SC32_MSAREGS, a0, t0 | ||
347 | save_sc_msareg 27, SC32_MSAREGS, a0, t0 | ||
348 | save_sc_msareg 28, SC32_MSAREGS, a0, t0 | ||
349 | save_sc_msareg 29, SC32_MSAREGS, a0, t0 | ||
350 | save_sc_msareg 30, SC32_MSAREGS, a0, t0 | ||
351 | save_sc_msareg 31, SC32_MSAREGS, a0, t0 | ||
352 | jr ra | ||
353 | li v0, 0 | ||
354 | END(_save_msa_context32) | ||
355 | |||
356 | #endif /* CONFIG_MIPS32_COMPAT */ | ||
357 | |||
358 | .macro restore_sc_msareg wr, off, sc, tmp | ||
359 | #ifdef CONFIG_64BIT | ||
360 | EX ld \tmp, (\off+(\wr*8))(\sc) | ||
361 | insert_d \wr, 1, \tmp | ||
362 | #elif defined(CONFIG_CPU_LITTLE_ENDIAN) | ||
363 | EX lw \tmp, (\off+(\wr*8)+0)(\sc) | ||
364 | insert_w \wr, 2, \tmp | ||
365 | EX lw \tmp, (\off+(\wr*8)+4)(\sc) | ||
366 | insert_w \wr, 3, \tmp | ||
367 | #else /* CONFIG_CPU_BIG_ENDIAN */ | ||
368 | EX lw \tmp, (\off+(\wr*8)+4)(\sc) | ||
369 | insert_w \wr, 2, \tmp | ||
370 | EX lw \tmp, (\off+(\wr*8)+0)(\sc) | ||
371 | insert_w \wr, 3, \tmp | ||
372 | #endif | ||
373 | .endm | ||
374 | |||
375 | /* | ||
376 | * int _restore_msa_context(struct sigcontext *sc) | ||
377 | */ | ||
378 | LEAF(_restore_msa_context) | ||
379 | restore_sc_msareg 0, SC_MSAREGS, a0, t0 | ||
380 | restore_sc_msareg 1, SC_MSAREGS, a0, t0 | ||
381 | restore_sc_msareg 2, SC_MSAREGS, a0, t0 | ||
382 | restore_sc_msareg 3, SC_MSAREGS, a0, t0 | ||
383 | restore_sc_msareg 4, SC_MSAREGS, a0, t0 | ||
384 | restore_sc_msareg 5, SC_MSAREGS, a0, t0 | ||
385 | restore_sc_msareg 6, SC_MSAREGS, a0, t0 | ||
386 | restore_sc_msareg 7, SC_MSAREGS, a0, t0 | ||
387 | restore_sc_msareg 8, SC_MSAREGS, a0, t0 | ||
388 | restore_sc_msareg 9, SC_MSAREGS, a0, t0 | ||
389 | restore_sc_msareg 10, SC_MSAREGS, a0, t0 | ||
390 | restore_sc_msareg 11, SC_MSAREGS, a0, t0 | ||
391 | restore_sc_msareg 12, SC_MSAREGS, a0, t0 | ||
392 | restore_sc_msareg 13, SC_MSAREGS, a0, t0 | ||
393 | restore_sc_msareg 14, SC_MSAREGS, a0, t0 | ||
394 | restore_sc_msareg 15, SC_MSAREGS, a0, t0 | ||
395 | restore_sc_msareg 16, SC_MSAREGS, a0, t0 | ||
396 | restore_sc_msareg 17, SC_MSAREGS, a0, t0 | ||
397 | restore_sc_msareg 18, SC_MSAREGS, a0, t0 | ||
398 | restore_sc_msareg 19, SC_MSAREGS, a0, t0 | ||
399 | restore_sc_msareg 20, SC_MSAREGS, a0, t0 | ||
400 | restore_sc_msareg 21, SC_MSAREGS, a0, t0 | ||
401 | restore_sc_msareg 22, SC_MSAREGS, a0, t0 | ||
402 | restore_sc_msareg 23, SC_MSAREGS, a0, t0 | ||
403 | restore_sc_msareg 24, SC_MSAREGS, a0, t0 | ||
404 | restore_sc_msareg 25, SC_MSAREGS, a0, t0 | ||
405 | restore_sc_msareg 26, SC_MSAREGS, a0, t0 | ||
406 | restore_sc_msareg 27, SC_MSAREGS, a0, t0 | ||
407 | restore_sc_msareg 28, SC_MSAREGS, a0, t0 | ||
408 | restore_sc_msareg 29, SC_MSAREGS, a0, t0 | ||
409 | restore_sc_msareg 30, SC_MSAREGS, a0, t0 | ||
410 | restore_sc_msareg 31, SC_MSAREGS, a0, t0 | ||
411 | jr ra | ||
412 | li v0, 0 | ||
413 | END(_restore_msa_context) | ||
414 | |||
415 | #ifdef CONFIG_MIPS32_COMPAT | ||
416 | |||
417 | /* | ||
418 | * int _restore_msa_context32(struct sigcontext32 *sc) | ||
419 | */ | ||
420 | LEAF(_restore_msa_context32) | ||
421 | restore_sc_msareg 0, SC32_MSAREGS, a0, t0 | ||
422 | restore_sc_msareg 1, SC32_MSAREGS, a0, t0 | ||
423 | restore_sc_msareg 2, SC32_MSAREGS, a0, t0 | ||
424 | restore_sc_msareg 3, SC32_MSAREGS, a0, t0 | ||
425 | restore_sc_msareg 4, SC32_MSAREGS, a0, t0 | ||
426 | restore_sc_msareg 5, SC32_MSAREGS, a0, t0 | ||
427 | restore_sc_msareg 6, SC32_MSAREGS, a0, t0 | ||
428 | restore_sc_msareg 7, SC32_MSAREGS, a0, t0 | ||
429 | restore_sc_msareg 8, SC32_MSAREGS, a0, t0 | ||
430 | restore_sc_msareg 9, SC32_MSAREGS, a0, t0 | ||
431 | restore_sc_msareg 10, SC32_MSAREGS, a0, t0 | ||
432 | restore_sc_msareg 11, SC32_MSAREGS, a0, t0 | ||
433 | restore_sc_msareg 12, SC32_MSAREGS, a0, t0 | ||
434 | restore_sc_msareg 13, SC32_MSAREGS, a0, t0 | ||
435 | restore_sc_msareg 14, SC32_MSAREGS, a0, t0 | ||
436 | restore_sc_msareg 15, SC32_MSAREGS, a0, t0 | ||
437 | restore_sc_msareg 16, SC32_MSAREGS, a0, t0 | ||
438 | restore_sc_msareg 17, SC32_MSAREGS, a0, t0 | ||
439 | restore_sc_msareg 18, SC32_MSAREGS, a0, t0 | ||
440 | restore_sc_msareg 19, SC32_MSAREGS, a0, t0 | ||
441 | restore_sc_msareg 20, SC32_MSAREGS, a0, t0 | ||
442 | restore_sc_msareg 21, SC32_MSAREGS, a0, t0 | ||
443 | restore_sc_msareg 22, SC32_MSAREGS, a0, t0 | ||
444 | restore_sc_msareg 23, SC32_MSAREGS, a0, t0 | ||
445 | restore_sc_msareg 24, SC32_MSAREGS, a0, t0 | ||
446 | restore_sc_msareg 25, SC32_MSAREGS, a0, t0 | ||
447 | restore_sc_msareg 26, SC32_MSAREGS, a0, t0 | ||
448 | restore_sc_msareg 27, SC32_MSAREGS, a0, t0 | ||
449 | restore_sc_msareg 28, SC32_MSAREGS, a0, t0 | ||
450 | restore_sc_msareg 29, SC32_MSAREGS, a0, t0 | ||
451 | restore_sc_msareg 30, SC32_MSAREGS, a0, t0 | ||
452 | restore_sc_msareg 31, SC32_MSAREGS, a0, t0 | ||
453 | jr ra | ||
454 | li v0, 0 | ||
455 | END(_restore_msa_context32) | ||
456 | |||
457 | #endif /* CONFIG_MIPS32_COMPAT */ | ||
458 | |||
459 | #endif /* CONFIG_CPU_HAS_MSA */ | ||
460 | |||
461 | .set reorder | 248 | .set reorder |
462 | 249 | ||
463 | .type fault@function | 250 | .type fault@function |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 33133d3df3e5..9e60d117e41e 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/bitops.h> | 31 | #include <linux/bitops.h> |
32 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
33 | #include <asm/fpu.h> | 33 | #include <asm/fpu.h> |
34 | #include <asm/msa.h> | ||
35 | #include <asm/sim.h> | 34 | #include <asm/sim.h> |
36 | #include <asm/ucontext.h> | 35 | #include <asm/ucontext.h> |
37 | #include <asm/cpu-features.h> | 36 | #include <asm/cpu-features.h> |
@@ -48,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc); | |||
48 | extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); | 47 | extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); |
49 | extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); | 48 | extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); |
50 | 49 | ||
51 | extern asmlinkage int _save_msa_context(struct sigcontext __user *sc); | ||
52 | extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc); | ||
53 | |||
54 | struct sigframe { | 50 | struct sigframe { |
55 | u32 sf_ass[4]; /* argument save space for o32 */ | 51 | u32 sf_ass[4]; /* argument save space for o32 */ |
56 | u32 sf_pad[2]; /* Was: signal trampoline */ | 52 | u32 sf_pad[2]; /* Was: signal trampoline */ |
@@ -100,60 +96,20 @@ static int copy_fp_from_sigcontext(struct sigcontext __user *sc) | |||
100 | } | 96 | } |
101 | 97 | ||
102 | /* | 98 | /* |
103 | * These functions will save only the upper 64 bits of the vector registers, | ||
104 | * since the lower 64 bits have already been saved as the scalar FP context. | ||
105 | */ | ||
106 | static int copy_msa_to_sigcontext(struct sigcontext __user *sc) | ||
107 | { | ||
108 | int i; | ||
109 | int err = 0; | ||
110 | |||
111 | for (i = 0; i < NUM_FPU_REGS; i++) { | ||
112 | err |= | ||
113 | __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1), | ||
114 | &sc->sc_msaregs[i]); | ||
115 | } | ||
116 | err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); | ||
117 | |||
118 | return err; | ||
119 | } | ||
120 | |||
121 | static int copy_msa_from_sigcontext(struct sigcontext __user *sc) | ||
122 | { | ||
123 | int i; | ||
124 | int err = 0; | ||
125 | u64 val; | ||
126 | |||
127 | for (i = 0; i < NUM_FPU_REGS; i++) { | ||
128 | err |= __get_user(val, &sc->sc_msaregs[i]); | ||
129 | set_fpr64(¤t->thread.fpu.fpr[i], 1, val); | ||
130 | } | ||
131 | err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); | ||
132 | |||
133 | return err; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Helper routines | 99 | * Helper routines |
138 | */ | 100 | */ |
139 | static int protected_save_fp_context(struct sigcontext __user *sc, | 101 | static int protected_save_fp_context(struct sigcontext __user *sc) |
140 | unsigned used_math) | ||
141 | { | 102 | { |
142 | int err; | 103 | int err; |
143 | bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA); | ||
144 | #ifndef CONFIG_EVA | 104 | #ifndef CONFIG_EVA |
145 | while (1) { | 105 | while (1) { |
146 | lock_fpu_owner(); | 106 | lock_fpu_owner(); |
147 | if (is_fpu_owner()) { | 107 | if (is_fpu_owner()) { |
148 | err = save_fp_context(sc); | 108 | err = save_fp_context(sc); |
149 | if (save_msa && !err) | ||
150 | err = _save_msa_context(sc); | ||
151 | unlock_fpu_owner(); | 109 | unlock_fpu_owner(); |
152 | } else { | 110 | } else { |
153 | unlock_fpu_owner(); | 111 | unlock_fpu_owner(); |
154 | err = copy_fp_to_sigcontext(sc); | 112 | err = copy_fp_to_sigcontext(sc); |
155 | if (save_msa && !err) | ||
156 | err = copy_msa_to_sigcontext(sc); | ||
157 | } | 113 | } |
158 | if (likely(!err)) | 114 | if (likely(!err)) |
159 | break; | 115 | break; |
@@ -169,38 +125,24 @@ static int protected_save_fp_context(struct sigcontext __user *sc, | |||
169 | * EVA does not have FPU EVA instructions so saving fpu context directly | 125 | * EVA does not have FPU EVA instructions so saving fpu context directly |
170 | * does not work. | 126 | * does not work. |
171 | */ | 127 | */ |
172 | disable_msa(); | ||
173 | lose_fpu(1); | 128 | lose_fpu(1); |
174 | err = save_fp_context(sc); /* this might fail */ | 129 | err = save_fp_context(sc); /* this might fail */ |
175 | if (save_msa && !err) | ||
176 | err = copy_msa_to_sigcontext(sc); | ||
177 | #endif | 130 | #endif |
178 | return err; | 131 | return err; |
179 | } | 132 | } |
180 | 133 | ||
181 | static int protected_restore_fp_context(struct sigcontext __user *sc, | 134 | static int protected_restore_fp_context(struct sigcontext __user *sc) |
182 | unsigned used_math) | ||
183 | { | 135 | { |
184 | int err, tmp __maybe_unused; | 136 | int err, tmp __maybe_unused; |
185 | bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA); | ||
186 | #ifndef CONFIG_EVA | 137 | #ifndef CONFIG_EVA |
187 | while (1) { | 138 | while (1) { |
188 | lock_fpu_owner(); | 139 | lock_fpu_owner(); |
189 | if (is_fpu_owner()) { | 140 | if (is_fpu_owner()) { |
190 | err = restore_fp_context(sc); | 141 | err = restore_fp_context(sc); |
191 | if (restore_msa && !err) { | ||
192 | enable_msa(); | ||
193 | err = _restore_msa_context(sc); | ||
194 | } else { | ||
195 | /* signal handler may have used MSA */ | ||
196 | disable_msa(); | ||
197 | } | ||
198 | unlock_fpu_owner(); | 142 | unlock_fpu_owner(); |
199 | } else { | 143 | } else { |
200 | unlock_fpu_owner(); | 144 | unlock_fpu_owner(); |
201 | err = copy_fp_from_sigcontext(sc); | 145 | err = copy_fp_from_sigcontext(sc); |
202 | if (!err && (used_math & USEDMATH_MSA)) | ||
203 | err = copy_msa_from_sigcontext(sc); | ||
204 | } | 146 | } |
205 | if (likely(!err)) | 147 | if (likely(!err)) |
206 | break; | 148 | break; |
@@ -216,11 +158,8 @@ static int protected_restore_fp_context(struct sigcontext __user *sc, | |||
216 | * EVA does not have FPU EVA instructions so restoring fpu context | 158 | * EVA does not have FPU EVA instructions so restoring fpu context |
217 | * directly does not work. | 159 | * directly does not work. |
218 | */ | 160 | */ |
219 | enable_msa(); | ||
220 | lose_fpu(0); | 161 | lose_fpu(0); |
221 | err = restore_fp_context(sc); /* this might fail */ | 162 | err = restore_fp_context(sc); /* this might fail */ |
222 | if (restore_msa && !err) | ||
223 | err = copy_msa_from_sigcontext(sc); | ||
224 | #endif | 163 | #endif |
225 | return err; | 164 | return err; |
226 | } | 165 | } |
@@ -252,8 +191,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | |||
252 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | 191 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); |
253 | } | 192 | } |
254 | 193 | ||
255 | used_math = used_math() ? USEDMATH_FP : 0; | 194 | used_math = !!used_math(); |
256 | used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0; | ||
257 | err |= __put_user(used_math, &sc->sc_used_math); | 195 | err |= __put_user(used_math, &sc->sc_used_math); |
258 | 196 | ||
259 | if (used_math) { | 197 | if (used_math) { |
@@ -261,7 +199,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | |||
261 | * Save FPU state to signal context. Signal handler | 199 | * Save FPU state to signal context. Signal handler |
262 | * will "inherit" current FPU state. | 200 | * will "inherit" current FPU state. |
263 | */ | 201 | */ |
264 | err |= protected_save_fp_context(sc, used_math); | 202 | err |= protected_save_fp_context(sc); |
265 | } | 203 | } |
266 | return err; | 204 | return err; |
267 | } | 205 | } |
@@ -286,14 +224,14 @@ int fpcsr_pending(unsigned int __user *fpcsr) | |||
286 | } | 224 | } |
287 | 225 | ||
288 | static int | 226 | static int |
289 | check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math) | 227 | check_and_restore_fp_context(struct sigcontext __user *sc) |
290 | { | 228 | { |
291 | int err, sig; | 229 | int err, sig; |
292 | 230 | ||
293 | err = sig = fpcsr_pending(&sc->sc_fpc_csr); | 231 | err = sig = fpcsr_pending(&sc->sc_fpc_csr); |
294 | if (err > 0) | 232 | if (err > 0) |
295 | err = 0; | 233 | err = 0; |
296 | err |= protected_restore_fp_context(sc, used_math); | 234 | err |= protected_restore_fp_context(sc); |
297 | return err ?: sig; | 235 | return err ?: sig; |
298 | } | 236 | } |
299 | 237 | ||
@@ -333,10 +271,9 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | |||
333 | if (used_math) { | 271 | if (used_math) { |
334 | /* restore fpu context if we have used it before */ | 272 | /* restore fpu context if we have used it before */ |
335 | if (!err) | 273 | if (!err) |
336 | err = check_and_restore_fp_context(sc, used_math); | 274 | err = check_and_restore_fp_context(sc); |
337 | } else { | 275 | } else { |
338 | /* signal handler may have used FPU or MSA. Disable them. */ | 276 | /* signal handler may have used FPU. Give it up. */ |
339 | disable_msa(); | ||
340 | lose_fpu(0); | 277 | lose_fpu(0); |
341 | } | 278 | } |
342 | 279 | ||
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 299f956e4db3..bae2e6ee2109 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <asm/sim.h> | 30 | #include <asm/sim.h> |
31 | #include <asm/ucontext.h> | 31 | #include <asm/ucontext.h> |
32 | #include <asm/fpu.h> | 32 | #include <asm/fpu.h> |
33 | #include <asm/msa.h> | ||
34 | #include <asm/war.h> | 33 | #include <asm/war.h> |
35 | #include <asm/vdso.h> | 34 | #include <asm/vdso.h> |
36 | #include <asm/dsp.h> | 35 | #include <asm/dsp.h> |
@@ -43,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc); | |||
43 | extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); | 42 | extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); |
44 | extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); | 43 | extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); |
45 | 44 | ||
46 | extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc); | ||
47 | extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc); | ||
48 | |||
49 | /* | 45 | /* |
50 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... | 46 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... |
51 | */ | 47 | */ |
@@ -115,59 +111,19 @@ static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc) | |||
115 | } | 111 | } |
116 | 112 | ||
117 | /* | 113 | /* |
118 | * These functions will save only the upper 64 bits of the vector registers, | ||
119 | * since the lower 64 bits have already been saved as the scalar FP context. | ||
120 | */ | ||
121 | static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc) | ||
122 | { | ||
123 | int i; | ||
124 | int err = 0; | ||
125 | |||
126 | for (i = 0; i < NUM_FPU_REGS; i++) { | ||
127 | err |= | ||
128 | __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1), | ||
129 | &sc->sc_msaregs[i]); | ||
130 | } | ||
131 | err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); | ||
132 | |||
133 | return err; | ||
134 | } | ||
135 | |||
136 | static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc) | ||
137 | { | ||
138 | int i; | ||
139 | int err = 0; | ||
140 | u64 val; | ||
141 | |||
142 | for (i = 0; i < NUM_FPU_REGS; i++) { | ||
143 | err |= __get_user(val, &sc->sc_msaregs[i]); | ||
144 | set_fpr64(¤t->thread.fpu.fpr[i], 1, val); | ||
145 | } | ||
146 | err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); | ||
147 | |||
148 | return err; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * sigcontext handlers | 114 | * sigcontext handlers |
153 | */ | 115 | */ |
154 | static int protected_save_fp_context32(struct sigcontext32 __user *sc, | 116 | static int protected_save_fp_context32(struct sigcontext32 __user *sc) |
155 | unsigned used_math) | ||
156 | { | 117 | { |
157 | int err; | 118 | int err; |
158 | bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA); | ||
159 | while (1) { | 119 | while (1) { |
160 | lock_fpu_owner(); | 120 | lock_fpu_owner(); |
161 | if (is_fpu_owner()) { | 121 | if (is_fpu_owner()) { |
162 | err = save_fp_context32(sc); | 122 | err = save_fp_context32(sc); |
163 | if (save_msa && !err) | ||
164 | err = _save_msa_context32(sc); | ||
165 | unlock_fpu_owner(); | 123 | unlock_fpu_owner(); |
166 | } else { | 124 | } else { |
167 | unlock_fpu_owner(); | 125 | unlock_fpu_owner(); |
168 | err = copy_fp_to_sigcontext32(sc); | 126 | err = copy_fp_to_sigcontext32(sc); |
169 | if (save_msa && !err) | ||
170 | err = copy_msa_to_sigcontext32(sc); | ||
171 | } | 127 | } |
172 | if (likely(!err)) | 128 | if (likely(!err)) |
173 | break; | 129 | break; |
@@ -181,28 +137,17 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc, | |||
181 | return err; | 137 | return err; |
182 | } | 138 | } |
183 | 139 | ||
184 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc, | 140 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc) |
185 | unsigned used_math) | ||
186 | { | 141 | { |
187 | int err, tmp __maybe_unused; | 142 | int err, tmp __maybe_unused; |
188 | bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA); | ||
189 | while (1) { | 143 | while (1) { |
190 | lock_fpu_owner(); | 144 | lock_fpu_owner(); |
191 | if (is_fpu_owner()) { | 145 | if (is_fpu_owner()) { |
192 | err = restore_fp_context32(sc); | 146 | err = restore_fp_context32(sc); |
193 | if (restore_msa && !err) { | ||
194 | enable_msa(); | ||
195 | err = _restore_msa_context32(sc); | ||
196 | } else { | ||
197 | /* signal handler may have used MSA */ | ||
198 | disable_msa(); | ||
199 | } | ||
200 | unlock_fpu_owner(); | 147 | unlock_fpu_owner(); |
201 | } else { | 148 | } else { |
202 | unlock_fpu_owner(); | 149 | unlock_fpu_owner(); |
203 | err = copy_fp_from_sigcontext32(sc); | 150 | err = copy_fp_from_sigcontext32(sc); |
204 | if (restore_msa && !err) | ||
205 | err = copy_msa_from_sigcontext32(sc); | ||
206 | } | 151 | } |
207 | if (likely(!err)) | 152 | if (likely(!err)) |
208 | break; | 153 | break; |
@@ -241,8 +186,7 @@ static int setup_sigcontext32(struct pt_regs *regs, | |||
241 | err |= __put_user(mflo3(), &sc->sc_lo3); | 186 | err |= __put_user(mflo3(), &sc->sc_lo3); |
242 | } | 187 | } |
243 | 188 | ||
244 | used_math = used_math() ? USEDMATH_FP : 0; | 189 | used_math = !!used_math(); |
245 | used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0; | ||
246 | err |= __put_user(used_math, &sc->sc_used_math); | 190 | err |= __put_user(used_math, &sc->sc_used_math); |
247 | 191 | ||
248 | if (used_math) { | 192 | if (used_math) { |
@@ -250,21 +194,20 @@ static int setup_sigcontext32(struct pt_regs *regs, | |||
250 | * Save FPU state to signal context. Signal handler | 194 | * Save FPU state to signal context. Signal handler |
251 | * will "inherit" current FPU state. | 195 | * will "inherit" current FPU state. |
252 | */ | 196 | */ |
253 | err |= protected_save_fp_context32(sc, used_math); | 197 | err |= protected_save_fp_context32(sc); |
254 | } | 198 | } |
255 | return err; | 199 | return err; |
256 | } | 200 | } |
257 | 201 | ||
258 | static int | 202 | static int |
259 | check_and_restore_fp_context32(struct sigcontext32 __user *sc, | 203 | check_and_restore_fp_context32(struct sigcontext32 __user *sc) |
260 | unsigned used_math) | ||
261 | { | 204 | { |
262 | int err, sig; | 205 | int err, sig; |
263 | 206 | ||
264 | err = sig = fpcsr_pending(&sc->sc_fpc_csr); | 207 | err = sig = fpcsr_pending(&sc->sc_fpc_csr); |
265 | if (err > 0) | 208 | if (err > 0) |
266 | err = 0; | 209 | err = 0; |
267 | err |= protected_restore_fp_context32(sc, used_math); | 210 | err |= protected_restore_fp_context32(sc); |
268 | return err ?: sig; | 211 | return err ?: sig; |
269 | } | 212 | } |
270 | 213 | ||
@@ -301,10 +244,9 @@ static int restore_sigcontext32(struct pt_regs *regs, | |||
301 | if (used_math) { | 244 | if (used_math) { |
302 | /* restore fpu context if we have used it before */ | 245 | /* restore fpu context if we have used it before */ |
303 | if (!err) | 246 | if (!err) |
304 | err = check_and_restore_fp_context32(sc, used_math); | 247 | err = check_and_restore_fp_context32(sc); |
305 | } else { | 248 | } else { |
306 | /* signal handler may have used FPU or MSA. Disable them. */ | 249 | /* signal handler may have used FPU. Give it up. */ |
307 | disable_msa(); | ||
308 | lose_fpu(0); | 250 | lose_fpu(0); |
309 | } | 251 | } |
310 | 252 | ||
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index df0598d9bfdd..949f2c6827a0 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c | |||
@@ -301,7 +301,7 @@ static int cps_cpu_disable(void) | |||
301 | 301 | ||
302 | core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; | 302 | core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; |
303 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); | 303 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); |
304 | smp_mb__after_atomic_dec(); | 304 | smp_mb__after_atomic(); |
305 | set_cpu_online(cpu, false); | 305 | set_cpu_online(cpu, false); |
306 | cpu_clear(cpu, cpu_callin_map); | 306 | cpu_clear(cpu, cpu_callin_map); |
307 | 307 | ||