diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-07 05:15:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-07 05:15:40 -0400 |
commit | 5e34437840d33554f69380584311743b39e8fbeb (patch) | |
tree | e081135619ee146af5efb9ee883afca950df5757 /arch/x86/kernel/process.c | |
parent | 77d05632baee21b1cef8730d7c06aa69601e4dca (diff) | |
parent | d508afb437daee7cf07da085b635c44a4ebf9b38 (diff) |
Merge branch 'linus' into core/softlockup
Conflicts:
kernel/sysctl.c
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r-- | arch/x86/kernel/process.c | 219 |
1 files changed, 211 insertions, 8 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6d12f7e37f8c..ca989158e847 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -1,16 +1,19 @@ | |||
1 | #include <linux/errno.h> | 1 | #include <linux/errno.h> |
2 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
4 | #include <asm/idle.h> | ||
5 | #include <linux/smp.h> | 4 | #include <linux/smp.h> |
5 | #include <linux/prctl.h> | ||
6 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/pm.h> | 9 | #include <linux/pm.h> |
10 | #include <linux/clockchips.h> | 10 | #include <linux/clockchips.h> |
11 | #include <linux/ftrace.h> | 11 | #include <trace/power.h> |
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/apic.h> | 13 | #include <asm/apic.h> |
14 | #include <asm/idle.h> | ||
15 | #include <asm/uaccess.h> | ||
16 | #include <asm/i387.h> | ||
14 | 17 | ||
15 | unsigned long idle_halt; | 18 | unsigned long idle_halt; |
16 | EXPORT_SYMBOL(idle_halt); | 19 | EXPORT_SYMBOL(idle_halt); |
@@ -19,6 +22,9 @@ EXPORT_SYMBOL(idle_nomwait); | |||
19 | 22 | ||
20 | struct kmem_cache *task_xstate_cachep; | 23 | struct kmem_cache *task_xstate_cachep; |
21 | 24 | ||
25 | DEFINE_TRACE(power_start); | ||
26 | DEFINE_TRACE(power_end); | ||
27 | |||
22 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 28 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
23 | { | 29 | { |
24 | *dst = *src; | 30 | *dst = *src; |
@@ -56,6 +62,193 @@ void arch_task_cache_init(void) | |||
56 | } | 62 | } |
57 | 63 | ||
58 | /* | 64 | /* |
65 | * Free current thread data structures etc.. | ||
66 | */ | ||
67 | void exit_thread(void) | ||
68 | { | ||
69 | struct task_struct *me = current; | ||
70 | struct thread_struct *t = &me->thread; | ||
71 | unsigned long *bp = t->io_bitmap_ptr; | ||
72 | |||
73 | if (bp) { | ||
74 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | ||
75 | |||
76 | t->io_bitmap_ptr = NULL; | ||
77 | clear_thread_flag(TIF_IO_BITMAP); | ||
78 | /* | ||
79 | * Careful, clear this in the TSS too: | ||
80 | */ | ||
81 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | ||
82 | t->io_bitmap_max = 0; | ||
83 | put_cpu(); | ||
84 | kfree(bp); | ||
85 | } | ||
86 | |||
87 | ds_exit_thread(current); | ||
88 | } | ||
89 | |||
90 | void flush_thread(void) | ||
91 | { | ||
92 | struct task_struct *tsk = current; | ||
93 | |||
94 | #ifdef CONFIG_X86_64 | ||
95 | if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { | ||
96 | clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); | ||
97 | if (test_tsk_thread_flag(tsk, TIF_IA32)) { | ||
98 | clear_tsk_thread_flag(tsk, TIF_IA32); | ||
99 | } else { | ||
100 | set_tsk_thread_flag(tsk, TIF_IA32); | ||
101 | current_thread_info()->status |= TS_COMPAT; | ||
102 | } | ||
103 | } | ||
104 | #endif | ||
105 | |||
106 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
107 | |||
108 | tsk->thread.debugreg0 = 0; | ||
109 | tsk->thread.debugreg1 = 0; | ||
110 | tsk->thread.debugreg2 = 0; | ||
111 | tsk->thread.debugreg3 = 0; | ||
112 | tsk->thread.debugreg6 = 0; | ||
113 | tsk->thread.debugreg7 = 0; | ||
114 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
115 | /* | ||
116 | * Forget coprocessor state.. | ||
117 | */ | ||
118 | tsk->fpu_counter = 0; | ||
119 | clear_fpu(tsk); | ||
120 | clear_used_math(); | ||
121 | } | ||
122 | |||
123 | static void hard_disable_TSC(void) | ||
124 | { | ||
125 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
126 | } | ||
127 | |||
128 | void disable_TSC(void) | ||
129 | { | ||
130 | preempt_disable(); | ||
131 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
132 | /* | ||
133 | * Must flip the CPU state synchronously with | ||
134 | * TIF_NOTSC in the current running context. | ||
135 | */ | ||
136 | hard_disable_TSC(); | ||
137 | preempt_enable(); | ||
138 | } | ||
139 | |||
140 | static void hard_enable_TSC(void) | ||
141 | { | ||
142 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
143 | } | ||
144 | |||
145 | static void enable_TSC(void) | ||
146 | { | ||
147 | preempt_disable(); | ||
148 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
149 | /* | ||
150 | * Must flip the CPU state synchronously with | ||
151 | * TIF_NOTSC in the current running context. | ||
152 | */ | ||
153 | hard_enable_TSC(); | ||
154 | preempt_enable(); | ||
155 | } | ||
156 | |||
157 | int get_tsc_mode(unsigned long adr) | ||
158 | { | ||
159 | unsigned int val; | ||
160 | |||
161 | if (test_thread_flag(TIF_NOTSC)) | ||
162 | val = PR_TSC_SIGSEGV; | ||
163 | else | ||
164 | val = PR_TSC_ENABLE; | ||
165 | |||
166 | return put_user(val, (unsigned int __user *)adr); | ||
167 | } | ||
168 | |||
169 | int set_tsc_mode(unsigned int val) | ||
170 | { | ||
171 | if (val == PR_TSC_SIGSEGV) | ||
172 | disable_TSC(); | ||
173 | else if (val == PR_TSC_ENABLE) | ||
174 | enable_TSC(); | ||
175 | else | ||
176 | return -EINVAL; | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
182 | struct tss_struct *tss) | ||
183 | { | ||
184 | struct thread_struct *prev, *next; | ||
185 | |||
186 | prev = &prev_p->thread; | ||
187 | next = &next_p->thread; | ||
188 | |||
189 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
190 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
191 | ds_switch_to(prev_p, next_p); | ||
192 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
193 | update_debugctlmsr(next->debugctlmsr); | ||
194 | |||
195 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
196 | set_debugreg(next->debugreg0, 0); | ||
197 | set_debugreg(next->debugreg1, 1); | ||
198 | set_debugreg(next->debugreg2, 2); | ||
199 | set_debugreg(next->debugreg3, 3); | ||
200 | /* no 4 and 5 */ | ||
201 | set_debugreg(next->debugreg6, 6); | ||
202 | set_debugreg(next->debugreg7, 7); | ||
203 | } | ||
204 | |||
205 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
206 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
207 | /* prev and next are different */ | ||
208 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
209 | hard_disable_TSC(); | ||
210 | else | ||
211 | hard_enable_TSC(); | ||
212 | } | ||
213 | |||
214 | if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
215 | /* | ||
216 | * Copy the relevant range of the IO bitmap. | ||
217 | * Normally this is 128 bytes or less: | ||
218 | */ | ||
219 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, | ||
220 | max(prev->io_bitmap_max, next->io_bitmap_max)); | ||
221 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | ||
222 | /* | ||
223 | * Clear any possible leftover bits: | ||
224 | */ | ||
225 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | ||
226 | } | ||
227 | } | ||
228 | |||
229 | int sys_fork(struct pt_regs *regs) | ||
230 | { | ||
231 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * This is trivial, and on the face of it looks like it | ||
236 | * could equally well be done in user mode. | ||
237 | * | ||
238 | * Not so, for quite unobvious reasons - register pressure. | ||
239 | * In user mode vfork() cannot have a stack frame, and if | ||
240 | * done by calling the "clone()" system call directly, you | ||
241 | * do not have enough call-clobbered registers to hold all | ||
242 | * the information you need. | ||
243 | */ | ||
244 | int sys_vfork(struct pt_regs *regs) | ||
245 | { | ||
246 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, | ||
247 | NULL, NULL); | ||
248 | } | ||
249 | |||
250 | |||
251 | /* | ||
59 | * Idle related variables and functions | 252 | * Idle related variables and functions |
60 | */ | 253 | */ |
61 | unsigned long boot_option_idle_override = 0; | 254 | unsigned long boot_option_idle_override = 0; |
@@ -135,7 +328,7 @@ void stop_this_cpu(void *dummy) | |||
135 | /* | 328 | /* |
136 | * Remove this CPU: | 329 | * Remove this CPU: |
137 | */ | 330 | */ |
138 | cpu_clear(smp_processor_id(), cpu_online_map); | 331 | set_cpu_online(smp_processor_id(), false); |
139 | disable_local_APIC(); | 332 | disable_local_APIC(); |
140 | 333 | ||
141 | for (;;) { | 334 | for (;;) { |
@@ -285,12 +478,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
285 | return 1; | 478 | return 1; |
286 | } | 479 | } |
287 | 480 | ||
288 | static cpumask_t c1e_mask = CPU_MASK_NONE; | 481 | static cpumask_var_t c1e_mask; |
289 | static int c1e_detected; | 482 | static int c1e_detected; |
290 | 483 | ||
291 | void c1e_remove_cpu(int cpu) | 484 | void c1e_remove_cpu(int cpu) |
292 | { | 485 | { |
293 | cpu_clear(cpu, c1e_mask); | 486 | if (c1e_mask != NULL) |
487 | cpumask_clear_cpu(cpu, c1e_mask); | ||
294 | } | 488 | } |
295 | 489 | ||
296 | /* | 490 | /* |
@@ -319,8 +513,8 @@ static void c1e_idle(void) | |||
319 | if (c1e_detected) { | 513 | if (c1e_detected) { |
320 | int cpu = smp_processor_id(); | 514 | int cpu = smp_processor_id(); |
321 | 515 | ||
322 | if (!cpu_isset(cpu, c1e_mask)) { | 516 | if (!cpumask_test_cpu(cpu, c1e_mask)) { |
323 | cpu_set(cpu, c1e_mask); | 517 | cpumask_set_cpu(cpu, c1e_mask); |
324 | /* | 518 | /* |
325 | * Force broadcast so ACPI can not interfere. Needs | 519 | * Force broadcast so ACPI can not interfere. Needs |
326 | * to run with interrupts enabled as it uses | 520 | * to run with interrupts enabled as it uses |
@@ -350,7 +544,7 @@ static void c1e_idle(void) | |||
350 | 544 | ||
351 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 545 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
352 | { | 546 | { |
353 | #ifdef CONFIG_X86_SMP | 547 | #ifdef CONFIG_SMP |
354 | if (pm_idle == poll_idle && smp_num_siblings > 1) { | 548 | if (pm_idle == poll_idle && smp_num_siblings > 1) { |
355 | printk(KERN_WARNING "WARNING: polling idle and HT enabled," | 549 | printk(KERN_WARNING "WARNING: polling idle and HT enabled," |
356 | " performance may degrade.\n"); | 550 | " performance may degrade.\n"); |
@@ -372,6 +566,15 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
372 | pm_idle = default_idle; | 566 | pm_idle = default_idle; |
373 | } | 567 | } |
374 | 568 | ||
569 | void __init init_c1e_mask(void) | ||
570 | { | ||
571 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ | ||
572 | if (pm_idle == c1e_idle) { | ||
573 | alloc_cpumask_var(&c1e_mask, GFP_KERNEL); | ||
574 | cpumask_clear(c1e_mask); | ||
575 | } | ||
576 | } | ||
577 | |||
375 | static int __init idle_setup(char *str) | 578 | static int __init idle_setup(char *str) |
376 | { | 579 | { |
377 | if (!str) | 580 | if (!str) |