diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2009-03-23 09:50:03 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2009-03-23 16:20:20 -0400 |
| commit | 80c5520811d3805adcb15c570ea5e2d489fa5d0b (patch) | |
| tree | ae797a7f4af39f80e77526533d06ac23b439f0ab /arch/x86/kernel/process.c | |
| parent | b3e3b302cf6dc8d60b67f0e84d1fa5648889c038 (diff) | |
| parent | 8c083f081d0014057901c68a0a3e0f8ca7ac8d23 (diff) | |
Merge branch 'cpus4096' into irq/threaded
Conflicts:
arch/parisc/kernel/irq.c
kernel/irq/handle.c
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/process.c')
| -rw-r--r-- | arch/x86/kernel/process.c | 213 |
1 files changed, 206 insertions, 7 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6d12f7e37f8c..78533a519d8f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | #include <linux/errno.h> | 1 | #include <linux/errno.h> |
| 2 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
| 3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
| 4 | #include <asm/idle.h> | ||
| 5 | #include <linux/smp.h> | 4 | #include <linux/smp.h> |
| 5 | #include <linux/prctl.h> | ||
| 6 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
| 7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
| 8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| @@ -11,6 +11,9 @@ | |||
| 11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
| 12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
| 13 | #include <asm/apic.h> | 13 | #include <asm/apic.h> |
| 14 | #include <asm/idle.h> | ||
| 15 | #include <asm/uaccess.h> | ||
| 16 | #include <asm/i387.h> | ||
| 14 | 17 | ||
| 15 | unsigned long idle_halt; | 18 | unsigned long idle_halt; |
| 16 | EXPORT_SYMBOL(idle_halt); | 19 | EXPORT_SYMBOL(idle_halt); |
| @@ -56,6 +59,192 @@ void arch_task_cache_init(void) | |||
| 56 | } | 59 | } |
| 57 | 60 | ||
| 58 | /* | 61 | /* |
| 62 | * Free current thread data structures etc.. | ||
| 63 | */ | ||
| 64 | void exit_thread(void) | ||
| 65 | { | ||
| 66 | struct task_struct *me = current; | ||
| 67 | struct thread_struct *t = &me->thread; | ||
| 68 | |||
| 69 | if (me->thread.io_bitmap_ptr) { | ||
| 70 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | ||
| 71 | |||
| 72 | kfree(t->io_bitmap_ptr); | ||
| 73 | t->io_bitmap_ptr = NULL; | ||
| 74 | clear_thread_flag(TIF_IO_BITMAP); | ||
| 75 | /* | ||
| 76 | * Careful, clear this in the TSS too: | ||
| 77 | */ | ||
| 78 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | ||
| 79 | t->io_bitmap_max = 0; | ||
| 80 | put_cpu(); | ||
| 81 | } | ||
| 82 | |||
| 83 | ds_exit_thread(current); | ||
| 84 | } | ||
| 85 | |||
| 86 | void flush_thread(void) | ||
| 87 | { | ||
| 88 | struct task_struct *tsk = current; | ||
| 89 | |||
| 90 | #ifdef CONFIG_X86_64 | ||
| 91 | if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { | ||
| 92 | clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); | ||
| 93 | if (test_tsk_thread_flag(tsk, TIF_IA32)) { | ||
| 94 | clear_tsk_thread_flag(tsk, TIF_IA32); | ||
| 95 | } else { | ||
| 96 | set_tsk_thread_flag(tsk, TIF_IA32); | ||
| 97 | current_thread_info()->status |= TS_COMPAT; | ||
| 98 | } | ||
| 99 | } | ||
| 100 | #endif | ||
| 101 | |||
| 102 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
| 103 | |||
| 104 | tsk->thread.debugreg0 = 0; | ||
| 105 | tsk->thread.debugreg1 = 0; | ||
| 106 | tsk->thread.debugreg2 = 0; | ||
| 107 | tsk->thread.debugreg3 = 0; | ||
| 108 | tsk->thread.debugreg6 = 0; | ||
| 109 | tsk->thread.debugreg7 = 0; | ||
| 110 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
| 111 | /* | ||
| 112 | * Forget coprocessor state.. | ||
| 113 | */ | ||
| 114 | tsk->fpu_counter = 0; | ||
| 115 | clear_fpu(tsk); | ||
| 116 | clear_used_math(); | ||
| 117 | } | ||
| 118 | |||
| 119 | static void hard_disable_TSC(void) | ||
| 120 | { | ||
| 121 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
| 122 | } | ||
| 123 | |||
| 124 | void disable_TSC(void) | ||
| 125 | { | ||
| 126 | preempt_disable(); | ||
| 127 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
| 128 | /* | ||
| 129 | * Must flip the CPU state synchronously with | ||
| 130 | * TIF_NOTSC in the current running context. | ||
| 131 | */ | ||
| 132 | hard_disable_TSC(); | ||
| 133 | preempt_enable(); | ||
| 134 | } | ||
| 135 | |||
| 136 | static void hard_enable_TSC(void) | ||
| 137 | { | ||
| 138 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
| 139 | } | ||
| 140 | |||
| 141 | static void enable_TSC(void) | ||
| 142 | { | ||
| 143 | preempt_disable(); | ||
| 144 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
| 145 | /* | ||
| 146 | * Must flip the CPU state synchronously with | ||
| 147 | * TIF_NOTSC in the current running context. | ||
| 148 | */ | ||
| 149 | hard_enable_TSC(); | ||
| 150 | preempt_enable(); | ||
| 151 | } | ||
| 152 | |||
| 153 | int get_tsc_mode(unsigned long adr) | ||
| 154 | { | ||
| 155 | unsigned int val; | ||
| 156 | |||
| 157 | if (test_thread_flag(TIF_NOTSC)) | ||
| 158 | val = PR_TSC_SIGSEGV; | ||
| 159 | else | ||
| 160 | val = PR_TSC_ENABLE; | ||
| 161 | |||
| 162 | return put_user(val, (unsigned int __user *)adr); | ||
| 163 | } | ||
| 164 | |||
| 165 | int set_tsc_mode(unsigned int val) | ||
| 166 | { | ||
| 167 | if (val == PR_TSC_SIGSEGV) | ||
| 168 | disable_TSC(); | ||
| 169 | else if (val == PR_TSC_ENABLE) | ||
| 170 | enable_TSC(); | ||
| 171 | else | ||
| 172 | return -EINVAL; | ||
| 173 | |||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
| 178 | struct tss_struct *tss) | ||
| 179 | { | ||
| 180 | struct thread_struct *prev, *next; | ||
| 181 | |||
| 182 | prev = &prev_p->thread; | ||
| 183 | next = &next_p->thread; | ||
| 184 | |||
| 185 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
| 186 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
| 187 | ds_switch_to(prev_p, next_p); | ||
| 188 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
| 189 | update_debugctlmsr(next->debugctlmsr); | ||
| 190 | |||
| 191 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
| 192 | set_debugreg(next->debugreg0, 0); | ||
| 193 | set_debugreg(next->debugreg1, 1); | ||
| 194 | set_debugreg(next->debugreg2, 2); | ||
| 195 | set_debugreg(next->debugreg3, 3); | ||
| 196 | /* no 4 and 5 */ | ||
| 197 | set_debugreg(next->debugreg6, 6); | ||
| 198 | set_debugreg(next->debugreg7, 7); | ||
| 199 | } | ||
| 200 | |||
| 201 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
| 202 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
| 203 | /* prev and next are different */ | ||
| 204 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
| 205 | hard_disable_TSC(); | ||
| 206 | else | ||
| 207 | hard_enable_TSC(); | ||
| 208 | } | ||
| 209 | |||
| 210 | if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
| 211 | /* | ||
| 212 | * Copy the relevant range of the IO bitmap. | ||
| 213 | * Normally this is 128 bytes or less: | ||
| 214 | */ | ||
| 215 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, | ||
| 216 | max(prev->io_bitmap_max, next->io_bitmap_max)); | ||
| 217 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | ||
| 218 | /* | ||
| 219 | * Clear any possible leftover bits: | ||
| 220 | */ | ||
| 221 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | int sys_fork(struct pt_regs *regs) | ||
| 226 | { | ||
| 227 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
| 228 | } | ||
| 229 | |||
| 230 | /* | ||
| 231 | * This is trivial, and on the face of it looks like it | ||
| 232 | * could equally well be done in user mode. | ||
| 233 | * | ||
| 234 | * Not so, for quite unobvious reasons - register pressure. | ||
| 235 | * In user mode vfork() cannot have a stack frame, and if | ||
| 236 | * done by calling the "clone()" system call directly, you | ||
| 237 | * do not have enough call-clobbered registers to hold all | ||
| 238 | * the information you need. | ||
| 239 | */ | ||
| 240 | int sys_vfork(struct pt_regs *regs) | ||
| 241 | { | ||
| 242 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, | ||
| 243 | NULL, NULL); | ||
| 244 | } | ||
| 245 | |||
| 246 | |||
| 247 | /* | ||
| 59 | * Idle related variables and functions | 248 | * Idle related variables and functions |
| 60 | */ | 249 | */ |
| 61 | unsigned long boot_option_idle_override = 0; | 250 | unsigned long boot_option_idle_override = 0; |
| @@ -135,7 +324,7 @@ void stop_this_cpu(void *dummy) | |||
| 135 | /* | 324 | /* |
| 136 | * Remove this CPU: | 325 | * Remove this CPU: |
| 137 | */ | 326 | */ |
| 138 | cpu_clear(smp_processor_id(), cpu_online_map); | 327 | set_cpu_online(smp_processor_id(), false); |
| 139 | disable_local_APIC(); | 328 | disable_local_APIC(); |
| 140 | 329 | ||
| 141 | for (;;) { | 330 | for (;;) { |
| @@ -285,12 +474,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
| 285 | return 1; | 474 | return 1; |
| 286 | } | 475 | } |
| 287 | 476 | ||
| 288 | static cpumask_t c1e_mask = CPU_MASK_NONE; | 477 | static cpumask_var_t c1e_mask; |
| 289 | static int c1e_detected; | 478 | static int c1e_detected; |
| 290 | 479 | ||
| 291 | void c1e_remove_cpu(int cpu) | 480 | void c1e_remove_cpu(int cpu) |
| 292 | { | 481 | { |
| 293 | cpu_clear(cpu, c1e_mask); | 482 | if (c1e_mask != NULL) |
| 483 | cpumask_clear_cpu(cpu, c1e_mask); | ||
| 294 | } | 484 | } |
| 295 | 485 | ||
| 296 | /* | 486 | /* |
| @@ -319,8 +509,8 @@ static void c1e_idle(void) | |||
| 319 | if (c1e_detected) { | 509 | if (c1e_detected) { |
| 320 | int cpu = smp_processor_id(); | 510 | int cpu = smp_processor_id(); |
| 321 | 511 | ||
| 322 | if (!cpu_isset(cpu, c1e_mask)) { | 512 | if (!cpumask_test_cpu(cpu, c1e_mask)) { |
| 323 | cpu_set(cpu, c1e_mask); | 513 | cpumask_set_cpu(cpu, c1e_mask); |
| 324 | /* | 514 | /* |
| 325 | * Force broadcast so ACPI can not interfere. Needs | 515 | * Force broadcast so ACPI can not interfere. Needs |
| 326 | * to run with interrupts enabled as it uses | 516 | * to run with interrupts enabled as it uses |
| @@ -350,7 +540,7 @@ static void c1e_idle(void) | |||
| 350 | 540 | ||
| 351 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 541 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
| 352 | { | 542 | { |
| 353 | #ifdef CONFIG_X86_SMP | 543 | #ifdef CONFIG_SMP |
| 354 | if (pm_idle == poll_idle && smp_num_siblings > 1) { | 544 | if (pm_idle == poll_idle && smp_num_siblings > 1) { |
| 355 | printk(KERN_WARNING "WARNING: polling idle and HT enabled," | 545 | printk(KERN_WARNING "WARNING: polling idle and HT enabled," |
| 356 | " performance may degrade.\n"); | 546 | " performance may degrade.\n"); |
| @@ -372,6 +562,15 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
| 372 | pm_idle = default_idle; | 562 | pm_idle = default_idle; |
| 373 | } | 563 | } |
| 374 | 564 | ||
| 565 | void __init init_c1e_mask(void) | ||
| 566 | { | ||
| 567 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ | ||
| 568 | if (pm_idle == c1e_idle) { | ||
| 569 | alloc_cpumask_var(&c1e_mask, GFP_KERNEL); | ||
| 570 | cpumask_clear(c1e_mask); | ||
| 571 | } | ||
| 572 | } | ||
| 573 | |||
| 375 | static int __init idle_setup(char *str) | 574 | static int __init idle_setup(char *str) |
| 376 | { | 575 | { |
| 377 | if (!str) | 576 | if (!str) |
