diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt-fpaff.c | 176 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt.c | 169 |
3 files changed, 177 insertions, 169 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 73983eee1431..4b40015a66d2 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -37,6 +37,7 @@ obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o | |||
37 | obj-$(CONFIG_SMP) += smp.o | 37 | obj-$(CONFIG_SMP) += smp.o |
38 | 38 | ||
39 | obj-$(CONFIG_MIPS_MT) += mips-mt.o | 39 | obj-$(CONFIG_MIPS_MT) += mips-mt.o |
40 | obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o | ||
40 | obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o | 41 | obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o |
41 | obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o | 42 | obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o |
42 | 43 | ||
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c new file mode 100644 index 000000000000..ede5d73d652e --- /dev/null +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels | ||
3 | * Copyright (C) 2005 Mips Technologies, Inc | ||
4 | */ | ||
5 | #include <linux/cpu.h> | ||
6 | #include <linux/cpumask.h> | ||
7 | #include <linux/delay.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/security.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | |||
15 | /* | ||
16 | * CPU mask used to set process affinity for MT VPEs/TCs with FPUs | ||
17 | */ | ||
18 | cpumask_t mt_fpu_cpumask; | ||
19 | |||
20 | static int fpaff_threshold = -1; | ||
21 | unsigned long mt_fpemul_threshold = 0; | ||
22 | |||
23 | /* | ||
24 | * Replacement functions for the sys_sched_setaffinity() and | ||
25 | * sys_sched_getaffinity() system calls, so that we can integrate | ||
26 | * FPU affinity with the user's requested processor affinity. | ||
27 | * This code is 98% identical with the sys_sched_setaffinity() | ||
28 | * and sys_sched_getaffinity() system calls, and should be | ||
29 | * updated when kernel/sched.c changes. | ||
30 | */ | ||
31 | |||
32 | /* | ||
33 | * find_process_by_pid - find a process with a matching PID value. | ||
34 | * used in sys_sched_set/getaffinity() in kernel/sched.c, so | ||
35 | * cloned here. | ||
36 | */ | ||
37 | static inline struct task_struct *find_process_by_pid(pid_t pid) | ||
38 | { | ||
39 | return pid ? find_task_by_pid(pid) : current; | ||
40 | } | ||
41 | |||
42 | |||
43 | /* | ||
44 | * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process | ||
45 | */ | ||
46 | asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | ||
47 | unsigned long __user *user_mask_ptr) | ||
48 | { | ||
49 | cpumask_t new_mask; | ||
50 | cpumask_t effective_mask; | ||
51 | int retval; | ||
52 | struct task_struct *p; | ||
53 | |||
54 | if (len < sizeof(new_mask)) | ||
55 | return -EINVAL; | ||
56 | |||
57 | if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) | ||
58 | return -EFAULT; | ||
59 | |||
60 | lock_cpu_hotplug(); | ||
61 | read_lock(&tasklist_lock); | ||
62 | |||
63 | p = find_process_by_pid(pid); | ||
64 | if (!p) { | ||
65 | read_unlock(&tasklist_lock); | ||
66 | unlock_cpu_hotplug(); | ||
67 | return -ESRCH; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * It is not safe to call set_cpus_allowed with the | ||
72 | * tasklist_lock held. We will bump the task_struct's | ||
73 | * usage count and drop tasklist_lock before invoking | ||
74 | * set_cpus_allowed. | ||
75 | */ | ||
76 | get_task_struct(p); | ||
77 | |||
78 | retval = -EPERM; | ||
79 | if ((current->euid != p->euid) && (current->euid != p->uid) && | ||
80 | !capable(CAP_SYS_NICE)) { | ||
81 | read_unlock(&tasklist_lock); | ||
82 | goto out_unlock; | ||
83 | } | ||
84 | |||
85 | retval = security_task_setscheduler(p, 0, NULL); | ||
86 | if (retval) | ||
87 | goto out_unlock; | ||
88 | |||
89 | /* Record new user-specified CPU set for future reference */ | ||
90 | p->thread.user_cpus_allowed = new_mask; | ||
91 | |||
92 | /* Unlock the task list */ | ||
93 | read_unlock(&tasklist_lock); | ||
94 | |||
95 | /* Compute new global allowed CPU set if necessary */ | ||
96 | if ((p->thread.mflags & MF_FPUBOUND) | ||
97 | && cpus_intersects(new_mask, mt_fpu_cpumask)) { | ||
98 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); | ||
99 | retval = set_cpus_allowed(p, effective_mask); | ||
100 | } else { | ||
101 | p->thread.mflags &= ~MF_FPUBOUND; | ||
102 | retval = set_cpus_allowed(p, new_mask); | ||
103 | } | ||
104 | |||
105 | |||
106 | out_unlock: | ||
107 | put_task_struct(p); | ||
108 | unlock_cpu_hotplug(); | ||
109 | return retval; | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process | ||
114 | */ | ||
115 | asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | ||
116 | unsigned long __user *user_mask_ptr) | ||
117 | { | ||
118 | unsigned int real_len; | ||
119 | cpumask_t mask; | ||
120 | int retval; | ||
121 | struct task_struct *p; | ||
122 | |||
123 | real_len = sizeof(mask); | ||
124 | if (len < real_len) | ||
125 | return -EINVAL; | ||
126 | |||
127 | lock_cpu_hotplug(); | ||
128 | read_lock(&tasklist_lock); | ||
129 | |||
130 | retval = -ESRCH; | ||
131 | p = find_process_by_pid(pid); | ||
132 | if (!p) | ||
133 | goto out_unlock; | ||
134 | retval = security_task_getscheduler(p); | ||
135 | if (retval) | ||
136 | goto out_unlock; | ||
137 | |||
138 | cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); | ||
139 | |||
140 | out_unlock: | ||
141 | read_unlock(&tasklist_lock); | ||
142 | unlock_cpu_hotplug(); | ||
143 | if (retval) | ||
144 | return retval; | ||
145 | if (copy_to_user(user_mask_ptr, &mask, real_len)) | ||
146 | return -EFAULT; | ||
147 | return real_len; | ||
148 | } | ||
149 | |||
150 | |||
151 | static int __init fpaff_thresh(char *str) | ||
152 | { | ||
153 | get_option(&str, &fpaff_threshold); | ||
154 | return 1; | ||
155 | } | ||
156 | __setup("fpaff=", fpaff_thresh); | ||
157 | |||
158 | /* | ||
159 | * FPU Use Factor empirically derived from experiments on 34K | ||
160 | */ | ||
161 | #define FPUSEFACTOR 333 | ||
162 | |||
163 | static __init int mt_fp_affinity_init(void) | ||
164 | { | ||
165 | if (fpaff_threshold >= 0) { | ||
166 | mt_fpemul_threshold = fpaff_threshold; | ||
167 | } else { | ||
168 | mt_fpemul_threshold = | ||
169 | (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; | ||
170 | } | ||
171 | printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n", | ||
172 | mt_fpemul_threshold); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | arch_initcall(mt_fp_affinity_init); | ||
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index b1b994dd41db..1a7d89231299 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -6,7 +6,6 @@ | |||
6 | #include <linux/device.h> | 6 | #include <linux/device.h> |
7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/cpumask.h> | ||
10 | #include <linux/module.h> | 9 | #include <linux/module.h> |
11 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
12 | #include <linux/security.h> | 11 | #include <linux/security.h> |
@@ -23,149 +22,6 @@ | |||
23 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
24 | 23 | ||
25 | /* | 24 | /* |
26 | * CPU mask used to set process affinity for MT VPEs/TCs with FPUs | ||
27 | */ | ||
28 | |||
29 | cpumask_t mt_fpu_cpumask; | ||
30 | |||
31 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
32 | |||
33 | #include <linux/cpu.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <asm/uaccess.h> | ||
36 | |||
37 | unsigned long mt_fpemul_threshold = 0; | ||
38 | |||
39 | /* | ||
40 | * Replacement functions for the sys_sched_setaffinity() and | ||
41 | * sys_sched_getaffinity() system calls, so that we can integrate | ||
42 | * FPU affinity with the user's requested processor affinity. | ||
43 | * This code is 98% identical with the sys_sched_setaffinity() | ||
44 | * and sys_sched_getaffinity() system calls, and should be | ||
45 | * updated when kernel/sched.c changes. | ||
46 | */ | ||
47 | |||
48 | /* | ||
49 | * find_process_by_pid - find a process with a matching PID value. | ||
50 | * used in sys_sched_set/getaffinity() in kernel/sched.c, so | ||
51 | * cloned here. | ||
52 | */ | ||
53 | static inline struct task_struct *find_process_by_pid(pid_t pid) | ||
54 | { | ||
55 | return pid ? find_task_by_pid(pid) : current; | ||
56 | } | ||
57 | |||
58 | |||
59 | /* | ||
60 | * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process | ||
61 | */ | ||
62 | asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | ||
63 | unsigned long __user *user_mask_ptr) | ||
64 | { | ||
65 | cpumask_t new_mask; | ||
66 | cpumask_t effective_mask; | ||
67 | int retval; | ||
68 | struct task_struct *p; | ||
69 | |||
70 | if (len < sizeof(new_mask)) | ||
71 | return -EINVAL; | ||
72 | |||
73 | if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) | ||
74 | return -EFAULT; | ||
75 | |||
76 | lock_cpu_hotplug(); | ||
77 | read_lock(&tasklist_lock); | ||
78 | |||
79 | p = find_process_by_pid(pid); | ||
80 | if (!p) { | ||
81 | read_unlock(&tasklist_lock); | ||
82 | unlock_cpu_hotplug(); | ||
83 | return -ESRCH; | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * It is not safe to call set_cpus_allowed with the | ||
88 | * tasklist_lock held. We will bump the task_struct's | ||
89 | * usage count and drop tasklist_lock before invoking | ||
90 | * set_cpus_allowed. | ||
91 | */ | ||
92 | get_task_struct(p); | ||
93 | |||
94 | retval = -EPERM; | ||
95 | if ((current->euid != p->euid) && (current->euid != p->uid) && | ||
96 | !capable(CAP_SYS_NICE)) { | ||
97 | read_unlock(&tasklist_lock); | ||
98 | goto out_unlock; | ||
99 | } | ||
100 | |||
101 | retval = security_task_setscheduler(p, 0, NULL); | ||
102 | if (retval) | ||
103 | goto out_unlock; | ||
104 | |||
105 | /* Record new user-specified CPU set for future reference */ | ||
106 | p->thread.user_cpus_allowed = new_mask; | ||
107 | |||
108 | /* Unlock the task list */ | ||
109 | read_unlock(&tasklist_lock); | ||
110 | |||
111 | /* Compute new global allowed CPU set if necessary */ | ||
112 | if ((p->thread.mflags & MF_FPUBOUND) | ||
113 | && cpus_intersects(new_mask, mt_fpu_cpumask)) { | ||
114 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); | ||
115 | retval = set_cpus_allowed(p, effective_mask); | ||
116 | } else { | ||
117 | p->thread.mflags &= ~MF_FPUBOUND; | ||
118 | retval = set_cpus_allowed(p, new_mask); | ||
119 | } | ||
120 | |||
121 | |||
122 | out_unlock: | ||
123 | put_task_struct(p); | ||
124 | unlock_cpu_hotplug(); | ||
125 | return retval; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process | ||
130 | */ | ||
131 | asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | ||
132 | unsigned long __user *user_mask_ptr) | ||
133 | { | ||
134 | unsigned int real_len; | ||
135 | cpumask_t mask; | ||
136 | int retval; | ||
137 | struct task_struct *p; | ||
138 | |||
139 | real_len = sizeof(mask); | ||
140 | if (len < real_len) | ||
141 | return -EINVAL; | ||
142 | |||
143 | lock_cpu_hotplug(); | ||
144 | read_lock(&tasklist_lock); | ||
145 | |||
146 | retval = -ESRCH; | ||
147 | p = find_process_by_pid(pid); | ||
148 | if (!p) | ||
149 | goto out_unlock; | ||
150 | retval = security_task_getscheduler(p); | ||
151 | if (retval) | ||
152 | goto out_unlock; | ||
153 | |||
154 | cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); | ||
155 | |||
156 | out_unlock: | ||
157 | read_unlock(&tasklist_lock); | ||
158 | unlock_cpu_hotplug(); | ||
159 | if (retval) | ||
160 | return retval; | ||
161 | if (copy_to_user(user_mask_ptr, &mask, real_len)) | ||
162 | return -EFAULT; | ||
163 | return real_len; | ||
164 | } | ||
165 | |||
166 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
167 | |||
168 | /* | ||
169 | * Dump new MIPS MT state for the core. Does not leave TCs halted. | 25 | * Dump new MIPS MT state for the core. Does not leave TCs halted. |
170 | * Takes an argument which taken to be a pre-call MVPControl value. | 26 | * Takes an argument which taken to be a pre-call MVPControl value. |
171 | */ | 27 | */ |
@@ -314,17 +170,6 @@ static int __init ndflush(char *s) | |||
314 | return 1; | 170 | return 1; |
315 | } | 171 | } |
316 | __setup("ndflush=", ndflush); | 172 | __setup("ndflush=", ndflush); |
317 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
318 | static int fpaff_threshold = -1; | ||
319 | |||
320 | static int __init fpaff_thresh(char *str) | ||
321 | { | ||
322 | get_option(&str, &fpaff_threshold); | ||
323 | return 1; | ||
324 | } | ||
325 | |||
326 | __setup("fpaff=", fpaff_thresh); | ||
327 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
328 | 173 | ||
329 | static unsigned int itc_base = 0; | 174 | static unsigned int itc_base = 0; |
330 | 175 | ||
@@ -380,20 +225,6 @@ void mips_mt_set_cpuoptions(void) | |||
380 | if (mt_n_dflushes != 1) | 225 | if (mt_n_dflushes != 1) |
381 | printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes); | 226 | printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes); |
382 | 227 | ||
383 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
384 | /* FPU Use Factor empirically derived from experiments on 34K */ | ||
385 | #define FPUSEFACTOR 333 | ||
386 | |||
387 | if (fpaff_threshold >= 0) { | ||
388 | mt_fpemul_threshold = fpaff_threshold; | ||
389 | } else { | ||
390 | mt_fpemul_threshold = | ||
391 | (FPUSEFACTOR * (loops_per_jiffy / (500000 / HZ))) / HZ; | ||
392 | } | ||
393 | printk("FPU Affinity set after %ld emulations\n", | ||
394 | mt_fpemul_threshold); | ||
395 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
396 | |||
397 | if (itc_base != 0) { | 228 | if (itc_base != 0) { |
398 | /* | 229 | /* |
399 | * Configure ITC mapping. This code is very | 230 | * Configure ITC mapping. This code is very |