aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2015-01-08 07:17:37 -0500
committerRalf Baechle <ralf@linux-mips.org>2015-02-12 06:30:29 -0500
commit9791554b45a2acc28247f66a5fd5bbc212a6b8c8 (patch)
treeb23e43745d5b5f10b004dcbe216b5a02ad6018ec /arch/mips
parentae58d882bfd3e537b1ed4a4c3577ca9ba853f0d8 (diff)
MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS
Userland code may be built using an ABI which permits linking to objects that have more restrictive floating point requirements. For example, userland code may be built to target the O32 FPXX ABI. Such code may be linked with other FPXX code, or code built for either one of the more restrictive FP32 or FP64. When linking with more restrictive code, the overall requirement of the process becomes that of the more restrictive code. The kernel has no way to know in advance which mode the process will need to be executed in, and indeed it may need to change during execution. The dynamic loader is the only code which will know the overall required mode, and so it needs to have a means to instruct the kernel to switch the FP mode of the process. This patch introduces 2 new options to the prctl syscall which provide such a capability. The FP mode of the process is represented as a simple bitmask combining a number of mode bits mirroring those present in the hardware. Userland can either retrieve the current FP mode of the process: mode = prctl(PR_GET_FP_MODE); or modify the current FP mode of the process: err = prctl(PR_SET_FP_MODE, new_mode); Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Matthew Fortune <matthew.fortune@imgtec.com> Cc: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8899/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/include/asm/mmu.h3
-rw-r--r--arch/mips/include/asm/mmu_context.h2
-rw-r--r--arch/mips/include/asm/processor.h11
-rw-r--r--arch/mips/kernel/process.c92
-rw-r--r--arch/mips/kernel/traps.c19
5 files changed, 127 insertions, 0 deletions
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index c436138945a8..1afa1f986df8 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -1,9 +1,12 @@
1#ifndef __ASM_MMU_H 1#ifndef __ASM_MMU_H
2#define __ASM_MMU_H 2#define __ASM_MMU_H
3 3
4#include <linux/atomic.h>
5
4typedef struct { 6typedef struct {
5 unsigned long asid[NR_CPUS]; 7 unsigned long asid[NR_CPUS];
6 void *vdso; 8 void *vdso;
9 atomic_t fp_mode_switching;
7} mm_context_t; 10} mm_context_t;
8 11
9#endif /* __ASM_MMU_H */ 12#endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 2f82568a3ee4..87f11072f557 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -132,6 +132,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
132 for_each_possible_cpu(i) 132 for_each_possible_cpu(i)
133 cpu_context(i, mm) = 0; 133 cpu_context(i, mm) = 0;
134 134
135 atomic_set(&mm->context.fp_mode_switching, 0);
136
135 return 0; 137 return 0;
136} 138}
137 139
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index f1df4cb4a286..9daa38608cd8 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -399,4 +399,15 @@ unsigned long get_wchan(struct task_struct *p);
399 399
400#endif 400#endif
401 401
402/*
403 * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
404 * to the prctl syscall.
405 */
406extern int mips_get_process_fp_mode(struct task_struct *task);
407extern int mips_set_process_fp_mode(struct task_struct *task,
408 unsigned int value);
409
410#define GET_FP_MODE(task) mips_get_process_fp_mode(task)
411#define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value)
412
402#endif /* _ASM_PROCESSOR_H */ 413#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eb76434828e8..4677b4c67da6 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -25,6 +25,7 @@
25#include <linux/completion.h> 25#include <linux/completion.h>
26#include <linux/kallsyms.h> 26#include <linux/kallsyms.h>
27#include <linux/random.h> 27#include <linux/random.h>
28#include <linux/prctl.h>
28 29
29#include <asm/asm.h> 30#include <asm/asm.h>
30#include <asm/bootinfo.h> 31#include <asm/bootinfo.h>
@@ -550,3 +551,94 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
550{ 551{
551 smp_call_function(arch_dump_stack, NULL, 1); 552 smp_call_function(arch_dump_stack, NULL, 1);
552} 553}
554
555int mips_get_process_fp_mode(struct task_struct *task)
556{
557 int value = 0;
558
559 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
560 value |= PR_FP_MODE_FR;
561 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
562 value |= PR_FP_MODE_FRE;
563
564 return value;
565}
566
567int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
568{
569 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
570 unsigned long switch_count;
571 struct task_struct *t;
572
573 /* Check the value is valid */
574 if (value & ~known_bits)
575 return -EOPNOTSUPP;
576
577 /* Avoid inadvertently triggering emulation */
578 if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
579 !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
580 return -EOPNOTSUPP;
581 if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
582 return -EOPNOTSUPP;
583
584 /* Save FP & vector context, then disable FPU & MSA */
585 if (task->signal == current->signal)
586 lose_fpu(1);
587
588 /* Prevent any threads from obtaining live FP context */
589 atomic_set(&task->mm->context.fp_mode_switching, 1);
590 smp_mb__after_atomic();
591
592 /*
593 * If there are multiple online CPUs then wait until all threads whose
594 * FP mode is about to change have been context switched. This approach
595 * allows us to only worry about whether an FP mode switch is in
596 * progress when FP is first used in a tasks time slice. Pretty much all
597 * of the mode switch overhead can thus be confined to cases where mode
598 * switches are actually occuring. That is, to here. However for the
599 * thread performing the mode switch it may take a while...
600 */
601 if (num_online_cpus() > 1) {
602 spin_lock_irq(&task->sighand->siglock);
603
604 for_each_thread(task, t) {
605 if (t == current)
606 continue;
607
608 switch_count = t->nvcsw + t->nivcsw;
609
610 do {
611 spin_unlock_irq(&task->sighand->siglock);
612 cond_resched();
613 spin_lock_irq(&task->sighand->siglock);
614 } while ((t->nvcsw + t->nivcsw) == switch_count);
615 }
616
617 spin_unlock_irq(&task->sighand->siglock);
618 }
619
620 /*
621 * There are now no threads of the process with live FP context, so it
622 * is safe to proceed with the FP mode switch.
623 */
624 for_each_thread(task, t) {
625 /* Update desired FP register width */
626 if (value & PR_FP_MODE_FR) {
627 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
628 } else {
629 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
630 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
631 }
632
633 /* Update desired FP single layout */
634 if (value & PR_FP_MODE_FRE)
635 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
636 else
637 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
638 }
639
640 /* Allow threads to use FP again */
641 atomic_set(&task->mm->context.fp_mode_switching, 0);
642
643 return 0;
644}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index ad3d2031c327..d5fbfb51b9da 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1134,10 +1134,29 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1134 return NOTIFY_OK; 1134 return NOTIFY_OK;
1135} 1135}
1136 1136
1137static int wait_on_fp_mode_switch(atomic_t *p)
1138{
1139 /*
1140 * The FP mode for this task is currently being switched. That may
1141 * involve modifications to the format of this tasks FP context which
1142 * make it unsafe to proceed with execution for the moment. Instead,
1143 * schedule some other task.
1144 */
1145 schedule();
1146 return 0;
1147}
1148
1137static int enable_restore_fp_context(int msa) 1149static int enable_restore_fp_context(int msa)
1138{ 1150{
1139 int err, was_fpu_owner, prior_msa; 1151 int err, was_fpu_owner, prior_msa;
1140 1152
1153 /*
1154 * If an FP mode switch is currently underway, wait for it to
1155 * complete before proceeding.
1156 */
1157 wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1158 wait_on_fp_mode_switch, TASK_KILLABLE);
1159
1141 if (!used_math()) { 1160 if (!used_math()) {
1142 /* First time FP context user. */ 1161 /* First time FP context user. */
1143 preempt_disable(); 1162 preempt_disable();