diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-15 07:46:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-15 07:46:29 -0400 |
commit | b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (patch) | |
tree | 53ccb1c2c14751fe69cf93102e76e97021f6df07 /arch/x86/kernel/process_32.c | |
parent | 4f962d4d65923d7b722192e729840cfb79af0a5a (diff) | |
parent | 278429cff8809958d25415ba0ed32b59866ab1a8 (diff) |
Merge branch 'linus' into stackprotector
Conflicts:
arch/x86/kernel/Makefile
include/asm-x86/pda.h
Diffstat (limited to 'arch/x86/kernel/process_32.c')
-rw-r--r-- | arch/x86/kernel/process_32.c | 156 |
1 files changed, 60 insertions, 96 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index e2db9ac5c61c..0a1302fe6d45 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/tick.h> | 37 | #include <linux/tick.h> |
38 | #include <linux/percpu.h> | 38 | #include <linux/percpu.h> |
39 | #include <linux/prctl.h> | 39 | #include <linux/prctl.h> |
40 | #include <linux/dmi.h> | ||
40 | 41 | ||
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
@@ -55,14 +56,12 @@ | |||
55 | #include <asm/tlbflush.h> | 56 | #include <asm/tlbflush.h> |
56 | #include <asm/cpu.h> | 57 | #include <asm/cpu.h> |
57 | #include <asm/kdebug.h> | 58 | #include <asm/kdebug.h> |
59 | #include <asm/idle.h> | ||
60 | #include <asm/syscalls.h> | ||
61 | #include <asm/smp.h> | ||
58 | 62 | ||
59 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 63 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
60 | 64 | ||
61 | static int hlt_counter; | ||
62 | |||
63 | unsigned long boot_option_idle_override = 0; | ||
64 | EXPORT_SYMBOL(boot_option_idle_override); | ||
65 | |||
66 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | 65 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
67 | EXPORT_PER_CPU_SYMBOL(current_task); | 66 | EXPORT_PER_CPU_SYMBOL(current_task); |
68 | 67 | ||
@@ -77,80 +76,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
77 | return ((unsigned long *)tsk->thread.sp)[3]; | 76 | return ((unsigned long *)tsk->thread.sp)[3]; |
78 | } | 77 | } |
79 | 78 | ||
80 | /* | 79 | #ifndef CONFIG_SMP |
81 | * Powermanagement idle function, if any.. | ||
82 | */ | ||
83 | void (*pm_idle)(void); | ||
84 | EXPORT_SYMBOL(pm_idle); | ||
85 | |||
86 | void disable_hlt(void) | ||
87 | { | ||
88 | hlt_counter++; | ||
89 | } | ||
90 | |||
91 | EXPORT_SYMBOL(disable_hlt); | ||
92 | |||
93 | void enable_hlt(void) | ||
94 | { | ||
95 | hlt_counter--; | ||
96 | } | ||
97 | |||
98 | EXPORT_SYMBOL(enable_hlt); | ||
99 | |||
100 | /* | ||
101 | * We use this if we don't have any better | ||
102 | * idle routine.. | ||
103 | */ | ||
104 | void default_idle(void) | ||
105 | { | ||
106 | if (!hlt_counter && boot_cpu_data.hlt_works_ok) { | ||
107 | current_thread_info()->status &= ~TS_POLLING; | ||
108 | /* | ||
109 | * TS_POLLING-cleared state must be visible before we | ||
110 | * test NEED_RESCHED: | ||
111 | */ | ||
112 | smp_mb(); | ||
113 | |||
114 | if (!need_resched()) | ||
115 | safe_halt(); /* enables interrupts racelessly */ | ||
116 | else | ||
117 | local_irq_enable(); | ||
118 | current_thread_info()->status |= TS_POLLING; | ||
119 | } else { | ||
120 | local_irq_enable(); | ||
121 | /* loop is done by the caller */ | ||
122 | cpu_relax(); | ||
123 | } | ||
124 | } | ||
125 | #ifdef CONFIG_APM_MODULE | ||
126 | EXPORT_SYMBOL(default_idle); | ||
127 | #endif | ||
128 | |||
129 | #ifdef CONFIG_HOTPLUG_CPU | ||
130 | #include <asm/nmi.h> | ||
131 | /* We don't actually take CPU down, just spin without interrupts. */ | ||
132 | static inline void play_dead(void) | ||
133 | { | ||
134 | /* This must be done before dead CPU ack */ | ||
135 | cpu_exit_clear(); | ||
136 | wbinvd(); | ||
137 | mb(); | ||
138 | /* Ack it */ | ||
139 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
140 | |||
141 | /* | ||
142 | * With physical CPU hotplug, we should halt the cpu | ||
143 | */ | ||
144 | local_irq_disable(); | ||
145 | while (1) | ||
146 | halt(); | ||
147 | } | ||
148 | #else | ||
149 | static inline void play_dead(void) | 80 | static inline void play_dead(void) |
150 | { | 81 | { |
151 | BUG(); | 82 | BUG(); |
152 | } | 83 | } |
153 | #endif /* CONFIG_HOTPLUG_CPU */ | 84 | #endif |
154 | 85 | ||
155 | /* | 86 | /* |
156 | * The idle thread. There's no useful work to be | 87 | * The idle thread. There's no useful work to be |
@@ -166,26 +97,24 @@ void cpu_idle(void) | |||
166 | 97 | ||
167 | /* endless idle loop with no priority at all */ | 98 | /* endless idle loop with no priority at all */ |
168 | while (1) { | 99 | while (1) { |
169 | tick_nohz_stop_sched_tick(); | 100 | tick_nohz_stop_sched_tick(1); |
170 | while (!need_resched()) { | 101 | while (!need_resched()) { |
171 | void (*idle)(void); | ||
172 | 102 | ||
173 | check_pgt_cache(); | 103 | check_pgt_cache(); |
174 | rmb(); | 104 | rmb(); |
175 | idle = pm_idle; | ||
176 | 105 | ||
177 | if (rcu_pending(cpu)) | 106 | if (rcu_pending(cpu)) |
178 | rcu_check_callbacks(cpu, 0); | 107 | rcu_check_callbacks(cpu, 0); |
179 | 108 | ||
180 | if (!idle) | ||
181 | idle = default_idle; | ||
182 | |||
183 | if (cpu_is_offline(cpu)) | 109 | if (cpu_is_offline(cpu)) |
184 | play_dead(); | 110 | play_dead(); |
185 | 111 | ||
186 | local_irq_disable(); | 112 | local_irq_disable(); |
187 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | 113 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
188 | idle(); | 114 | /* Don't trace irqs off for idle */ |
115 | stop_critical_timings(); | ||
116 | pm_idle(); | ||
117 | start_critical_timings(); | ||
189 | } | 118 | } |
190 | tick_nohz_restart_sched_tick(); | 119 | tick_nohz_restart_sched_tick(); |
191 | preempt_enable_no_resched(); | 120 | preempt_enable_no_resched(); |
@@ -194,12 +123,13 @@ void cpu_idle(void) | |||
194 | } | 123 | } |
195 | } | 124 | } |
196 | 125 | ||
197 | void __show_registers(struct pt_regs *regs, int all) | 126 | void __show_regs(struct pt_regs *regs, int all) |
198 | { | 127 | { |
199 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | 128 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; |
200 | unsigned long d0, d1, d2, d3, d6, d7; | 129 | unsigned long d0, d1, d2, d3, d6, d7; |
201 | unsigned long sp; | 130 | unsigned long sp; |
202 | unsigned short ss, gs; | 131 | unsigned short ss, gs; |
132 | const char *board; | ||
203 | 133 | ||
204 | if (user_mode_vm(regs)) { | 134 | if (user_mode_vm(regs)) { |
205 | sp = regs->sp; | 135 | sp = regs->sp; |
@@ -212,11 +142,15 @@ void __show_registers(struct pt_regs *regs, int all) | |||
212 | } | 142 | } |
213 | 143 | ||
214 | printk("\n"); | 144 | printk("\n"); |
215 | printk("Pid: %d, comm: %s %s (%s %.*s)\n", | 145 | |
146 | board = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
147 | if (!board) | ||
148 | board = ""; | ||
149 | printk("Pid: %d, comm: %s %s (%s %.*s) %s\n", | ||
216 | task_pid_nr(current), current->comm, | 150 | task_pid_nr(current), current->comm, |
217 | print_tainted(), init_utsname()->release, | 151 | print_tainted(), init_utsname()->release, |
218 | (int)strcspn(init_utsname()->version, " "), | 152 | (int)strcspn(init_utsname()->version, " "), |
219 | init_utsname()->version); | 153 | init_utsname()->version, board); |
220 | 154 | ||
221 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", | 155 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", |
222 | (u16)regs->cs, regs->ip, regs->flags, | 156 | (u16)regs->cs, regs->ip, regs->flags, |
@@ -255,7 +189,7 @@ void __show_registers(struct pt_regs *regs, int all) | |||
255 | 189 | ||
256 | void show_regs(struct pt_regs *regs) | 190 | void show_regs(struct pt_regs *regs) |
257 | { | 191 | { |
258 | __show_registers(regs, 1); | 192 | __show_regs(regs, 1); |
259 | show_trace(NULL, regs, ®s->sp, regs->bp); | 193 | show_trace(NULL, regs, ®s->sp, regs->bp); |
260 | } | 194 | } |
261 | 195 | ||
@@ -316,6 +250,14 @@ void exit_thread(void) | |||
316 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; | 250 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
317 | put_cpu(); | 251 | put_cpu(); |
318 | } | 252 | } |
253 | #ifdef CONFIG_X86_DS | ||
254 | /* Free any DS contexts that have not been properly released. */ | ||
255 | if (unlikely(current->thread.ds_ctx)) { | ||
256 | /* we clear debugctl to make sure DS is not used. */ | ||
257 | update_debugctlmsr(0); | ||
258 | ds_free(current->thread.ds_ctx); | ||
259 | } | ||
260 | #endif /* CONFIG_X86_DS */ | ||
319 | } | 261 | } |
320 | 262 | ||
321 | void flush_thread(void) | 263 | void flush_thread(void) |
@@ -477,6 +419,35 @@ int set_tsc_mode(unsigned int val) | |||
477 | return 0; | 419 | return 0; |
478 | } | 420 | } |
479 | 421 | ||
422 | #ifdef CONFIG_X86_DS | ||
423 | static int update_debugctl(struct thread_struct *prev, | ||
424 | struct thread_struct *next, unsigned long debugctl) | ||
425 | { | ||
426 | unsigned long ds_prev = 0; | ||
427 | unsigned long ds_next = 0; | ||
428 | |||
429 | if (prev->ds_ctx) | ||
430 | ds_prev = (unsigned long)prev->ds_ctx->ds; | ||
431 | if (next->ds_ctx) | ||
432 | ds_next = (unsigned long)next->ds_ctx->ds; | ||
433 | |||
434 | if (ds_next != ds_prev) { | ||
435 | /* we clear debugctl to make sure DS | ||
436 | * is not in use when we change it */ | ||
437 | debugctl = 0; | ||
438 | update_debugctlmsr(0); | ||
439 | wrmsr(MSR_IA32_DS_AREA, ds_next, 0); | ||
440 | } | ||
441 | return debugctl; | ||
442 | } | ||
443 | #else | ||
444 | static int update_debugctl(struct thread_struct *prev, | ||
445 | struct thread_struct *next, unsigned long debugctl) | ||
446 | { | ||
447 | return debugctl; | ||
448 | } | ||
449 | #endif /* CONFIG_X86_DS */ | ||
450 | |||
480 | static noinline void | 451 | static noinline void |
481 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | 452 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
482 | struct tss_struct *tss) | 453 | struct tss_struct *tss) |
@@ -487,14 +458,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
487 | prev = &prev_p->thread; | 458 | prev = &prev_p->thread; |
488 | next = &next_p->thread; | 459 | next = &next_p->thread; |
489 | 460 | ||
490 | debugctl = prev->debugctlmsr; | 461 | debugctl = update_debugctl(prev, next, prev->debugctlmsr); |
491 | if (next->ds_area_msr != prev->ds_area_msr) { | ||
492 | /* we clear debugctl to make sure DS | ||
493 | * is not in use when we change it */ | ||
494 | debugctl = 0; | ||
495 | update_debugctlmsr(0); | ||
496 | wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); | ||
497 | } | ||
498 | 462 | ||
499 | if (next->debugctlmsr != debugctl) | 463 | if (next->debugctlmsr != debugctl) |
500 | update_debugctlmsr(next->debugctlmsr); | 464 | update_debugctlmsr(next->debugctlmsr); |
@@ -518,13 +482,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
518 | hard_enable_TSC(); | 482 | hard_enable_TSC(); |
519 | } | 483 | } |
520 | 484 | ||
521 | #ifdef X86_BTS | 485 | #ifdef CONFIG_X86_PTRACE_BTS |
522 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) | 486 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) |
523 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); | 487 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); |
524 | 488 | ||
525 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) | 489 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) |
526 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); | 490 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); |
527 | #endif | 491 | #endif /* CONFIG_X86_PTRACE_BTS */ |
528 | 492 | ||
529 | 493 | ||
530 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | 494 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { |