aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/alternative.c10
-rw-r--r--arch/i386/kernel/cpuid.c6
-rw-r--r--arch/i386/kernel/entry.S36
-rw-r--r--arch/i386/kernel/irq.c6
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--arch/i386/kernel/stacktrace.c98
-rw-r--r--arch/i386/kernel/traps.c39
8 files changed, 161 insertions, 37 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index cbc1184e9473..1b452a1665c4 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -9,6 +9,7 @@ obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
9 pci-dma.o i386_ksyms.o i387.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o bootflag.o \
10 quirks.o i8237.o topology.o alternative.o i8253.o tsc.o 10 quirks.o i8237.o topology.o alternative.o i8253.o tsc.o
11 11
12obj-$(CONFIG_STACKTRACE) += stacktrace.o
12obj-y += cpu/ 13obj-y += cpu/
13obj-y += acpi/ 14obj-y += acpi/
14obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o 15obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 7b421b3a053e..28ab80649764 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -303,6 +303,16 @@ void alternatives_smp_switch(int smp)
303 struct smp_alt_module *mod; 303 struct smp_alt_module *mod;
304 unsigned long flags; 304 unsigned long flags;
305 305
306#ifdef CONFIG_LOCKDEP
307 /*
308 * A not yet fixed binutils section handling bug prevents
309 * alternatives-replacement from working reliably, so turn
310 * it off:
311 */
312 printk("lockdep: not fixing up alternatives.\n");
313 return;
314#endif
315
306 if (no_replacement || smp_alt_once) 316 if (no_replacement || smp_alt_once)
307 return; 317 return;
308 BUG_ON(!smp && (num_online_cpus() > 1)); 318 BUG_ON(!smp && (num_online_cpus() > 1));
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index a8d3ecdc3897..fde8bea85cee 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -167,6 +167,7 @@ static int cpuid_class_device_create(int i)
167 return err; 167 return err;
168} 168}
169 169
170#ifdef CONFIG_HOTPLUG_CPU
170static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 171static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
171{ 172{
172 unsigned int cpu = (unsigned long)hcpu; 173 unsigned int cpu = (unsigned long)hcpu;
@@ -186,6 +187,7 @@ static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
186{ 187{
187 .notifier_call = cpuid_class_cpu_callback, 188 .notifier_call = cpuid_class_cpu_callback,
188}; 189};
190#endif /* !CONFIG_HOTPLUG_CPU */
189 191
190static int __init cpuid_init(void) 192static int __init cpuid_init(void)
191{ 193{
@@ -208,7 +210,7 @@ static int __init cpuid_init(void)
208 if (err != 0) 210 if (err != 0)
209 goto out_class; 211 goto out_class;
210 } 212 }
211 register_cpu_notifier(&cpuid_class_cpu_notifier); 213 register_hotcpu_notifier(&cpuid_class_cpu_notifier);
212 214
213 err = 0; 215 err = 0;
214 goto out; 216 goto out;
@@ -233,7 +235,7 @@ static void __exit cpuid_exit(void)
233 class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 235 class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
234 class_destroy(cpuid_class); 236 class_destroy(cpuid_class);
235 unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); 237 unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
236 unregister_cpu_notifier(&cpuid_class_cpu_notifier); 238 unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
237} 239}
238 240
239module_init(cpuid_init); 241module_init(cpuid_init);
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 787190c45fdb..d9a260f2efb4 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -42,6 +42,7 @@
42 42
43#include <linux/linkage.h> 43#include <linux/linkage.h>
44#include <asm/thread_info.h> 44#include <asm/thread_info.h>
45#include <asm/irqflags.h>
45#include <asm/errno.h> 46#include <asm/errno.h>
46#include <asm/segment.h> 47#include <asm/segment.h>
47#include <asm/smp.h> 48#include <asm/smp.h>
@@ -76,12 +77,21 @@ NT_MASK = 0x00004000
76VM_MASK = 0x00020000 77VM_MASK = 0x00020000
77 78
78#ifdef CONFIG_PREEMPT 79#ifdef CONFIG_PREEMPT
79#define preempt_stop cli 80#define preempt_stop cli; TRACE_IRQS_OFF
80#else 81#else
81#define preempt_stop 82#define preempt_stop
82#define resume_kernel restore_nocheck 83#define resume_kernel restore_nocheck
83#endif 84#endif
84 85
86.macro TRACE_IRQS_IRET
87#ifdef CONFIG_TRACE_IRQFLAGS
88 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
89 jz 1f
90 TRACE_IRQS_ON
911:
92#endif
93.endm
94
85#ifdef CONFIG_VM86 95#ifdef CONFIG_VM86
86#define resume_userspace_sig check_userspace 96#define resume_userspace_sig check_userspace
87#else 97#else
@@ -257,6 +267,10 @@ ENTRY(sysenter_entry)
257 CFI_REGISTER esp, ebp 267 CFI_REGISTER esp, ebp
258 movl TSS_sysenter_esp0(%esp),%esp 268 movl TSS_sysenter_esp0(%esp),%esp
259sysenter_past_esp: 269sysenter_past_esp:
270 /*
271 * No need to follow this irqs on/off section: the syscall
272 * disabled irqs and here we enable it straight after entry:
273 */
260 sti 274 sti
261 pushl $(__USER_DS) 275 pushl $(__USER_DS)
262 CFI_ADJUST_CFA_OFFSET 4 276 CFI_ADJUST_CFA_OFFSET 4
@@ -303,6 +317,7 @@ sysenter_past_esp:
303 call *sys_call_table(,%eax,4) 317 call *sys_call_table(,%eax,4)
304 movl %eax,EAX(%esp) 318 movl %eax,EAX(%esp)
305 cli 319 cli
320 TRACE_IRQS_OFF
306 movl TI_flags(%ebp), %ecx 321 movl TI_flags(%ebp), %ecx
307 testw $_TIF_ALLWORK_MASK, %cx 322 testw $_TIF_ALLWORK_MASK, %cx
308 jne syscall_exit_work 323 jne syscall_exit_work
@@ -310,6 +325,7 @@ sysenter_past_esp:
310 movl EIP(%esp), %edx 325 movl EIP(%esp), %edx
311 movl OLDESP(%esp), %ecx 326 movl OLDESP(%esp), %ecx
312 xorl %ebp,%ebp 327 xorl %ebp,%ebp
328 TRACE_IRQS_ON
313 sti 329 sti
314 sysexit 330 sysexit
315 CFI_ENDPROC 331 CFI_ENDPROC
@@ -339,6 +355,7 @@ syscall_exit:
339 cli # make sure we don't miss an interrupt 355 cli # make sure we don't miss an interrupt
340 # setting need_resched or sigpending 356 # setting need_resched or sigpending
341 # between sampling and the iret 357 # between sampling and the iret
358 TRACE_IRQS_OFF
342 movl TI_flags(%ebp), %ecx 359 movl TI_flags(%ebp), %ecx
343 testw $_TIF_ALLWORK_MASK, %cx # current->work 360 testw $_TIF_ALLWORK_MASK, %cx # current->work
344 jne syscall_exit_work 361 jne syscall_exit_work
@@ -355,12 +372,15 @@ restore_all:
355 CFI_REMEMBER_STATE 372 CFI_REMEMBER_STATE
356 je ldt_ss # returning to user-space with LDT SS 373 je ldt_ss # returning to user-space with LDT SS
357restore_nocheck: 374restore_nocheck:
375 TRACE_IRQS_IRET
376restore_nocheck_notrace:
358 RESTORE_REGS 377 RESTORE_REGS
359 addl $4, %esp 378 addl $4, %esp
360 CFI_ADJUST_CFA_OFFSET -4 379 CFI_ADJUST_CFA_OFFSET -4
3611: iret 3801: iret
362.section .fixup,"ax" 381.section .fixup,"ax"
363iret_exc: 382iret_exc:
383 TRACE_IRQS_ON
364 sti 384 sti
365 pushl $0 # no error code 385 pushl $0 # no error code
366 pushl $do_iret_error 386 pushl $do_iret_error
@@ -386,11 +406,13 @@ ldt_ss:
386 subl $8, %esp # reserve space for switch16 pointer 406 subl $8, %esp # reserve space for switch16 pointer
387 CFI_ADJUST_CFA_OFFSET 8 407 CFI_ADJUST_CFA_OFFSET 8
388 cli 408 cli
409 TRACE_IRQS_OFF
389 movl %esp, %eax 410 movl %esp, %eax
390 /* Set up the 16bit stack frame with switch32 pointer on top, 411 /* Set up the 16bit stack frame with switch32 pointer on top,
391 * and a switch16 pointer on top of the current frame. */ 412 * and a switch16 pointer on top of the current frame. */
392 call setup_x86_bogus_stack 413 call setup_x86_bogus_stack
393 CFI_ADJUST_CFA_OFFSET -8 # frame has moved 414 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
415 TRACE_IRQS_IRET
394 RESTORE_REGS 416 RESTORE_REGS
395 lss 20+4(%esp), %esp # switch to 16bit stack 417 lss 20+4(%esp), %esp # switch to 16bit stack
3961: iret 4181: iret
@@ -411,6 +433,7 @@ work_resched:
411 cli # make sure we don't miss an interrupt 433 cli # make sure we don't miss an interrupt
412 # setting need_resched or sigpending 434 # setting need_resched or sigpending
413 # between sampling and the iret 435 # between sampling and the iret
436 TRACE_IRQS_OFF
414 movl TI_flags(%ebp), %ecx 437 movl TI_flags(%ebp), %ecx
415 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other 438 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
416 # than syscall tracing? 439 # than syscall tracing?
@@ -462,6 +485,7 @@ syscall_trace_entry:
462syscall_exit_work: 485syscall_exit_work:
463 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl 486 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
464 jz work_pending 487 jz work_pending
488 TRACE_IRQS_ON
465 sti # could let do_syscall_trace() call 489 sti # could let do_syscall_trace() call
466 # schedule() instead 490 # schedule() instead
467 movl %esp, %eax 491 movl %esp, %eax
@@ -535,9 +559,14 @@ ENTRY(irq_entries_start)
535vector=vector+1 559vector=vector+1
536.endr 560.endr
537 561
562/*
563 * the CPU automatically disables interrupts when executing an IRQ vector,
564 * so IRQ-flags tracing has to follow that:
565 */
538 ALIGN 566 ALIGN
539common_interrupt: 567common_interrupt:
540 SAVE_ALL 568 SAVE_ALL
569 TRACE_IRQS_OFF
541 movl %esp,%eax 570 movl %esp,%eax
542 call do_IRQ 571 call do_IRQ
543 jmp ret_from_intr 572 jmp ret_from_intr
@@ -549,9 +578,10 @@ ENTRY(name) \
549 pushl $~(nr); \ 578 pushl $~(nr); \
550 CFI_ADJUST_CFA_OFFSET 4; \ 579 CFI_ADJUST_CFA_OFFSET 4; \
551 SAVE_ALL; \ 580 SAVE_ALL; \
581 TRACE_IRQS_OFF \
552 movl %esp,%eax; \ 582 movl %esp,%eax; \
553 call smp_/**/name; \ 583 call smp_/**/name; \
554 jmp ret_from_intr; \ 584 jmp ret_from_intr; \
555 CFI_ENDPROC 585 CFI_ENDPROC
556 586
557/* The include is where all of the SMP etc. interrupts come from */ 587/* The include is where all of the SMP etc. interrupts come from */
@@ -726,7 +756,7 @@ nmi_stack_correct:
726 xorl %edx,%edx # zero error code 756 xorl %edx,%edx # zero error code
727 movl %esp,%eax # pt_regs pointer 757 movl %esp,%eax # pt_regs pointer
728 call do_nmi 758 call do_nmi
729 jmp restore_all 759 jmp restore_nocheck_notrace
730 CFI_ENDPROC 760 CFI_ENDPROC
731 761
732nmi_stack_fixup: 762nmi_stack_fixup:
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 16b491703967..6cb529f60dcc 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -166,7 +166,7 @@ void irq_ctx_init(int cpu)
166 irqctx->tinfo.task = NULL; 166 irqctx->tinfo.task = NULL;
167 irqctx->tinfo.exec_domain = NULL; 167 irqctx->tinfo.exec_domain = NULL;
168 irqctx->tinfo.cpu = cpu; 168 irqctx->tinfo.cpu = cpu;
169 irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET; 169 irqctx->tinfo.preempt_count = 0;
170 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 170 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
171 171
172 softirq_ctx[cpu] = irqctx; 172 softirq_ctx[cpu] = irqctx;
@@ -211,6 +211,10 @@ asmlinkage void do_softirq(void)
211 : "0"(isp) 211 : "0"(isp)
212 : "memory", "cc", "edx", "ecx", "eax" 212 : "memory", "cc", "edx", "ecx", "eax"
213 ); 213 );
214 /*
215 * Shouldnt happen, we returned above if in_interrupt():
216 */
217 WARN_ON_ONCE(softirq_count());
214 } 218 }
215 219
216 local_irq_restore(flags); 220 local_irq_restore(flags);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index a76e93146585..2dd928a84645 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -107,7 +107,7 @@ int nmi_active;
107static __init void nmi_cpu_busy(void *data) 107static __init void nmi_cpu_busy(void *data)
108{ 108{
109 volatile int *endflag = data; 109 volatile int *endflag = data;
110 local_irq_enable(); 110 local_irq_enable_in_hardirq();
111 /* Intentionally don't use cpu_relax here. This is 111 /* Intentionally don't use cpu_relax here. This is
112 to make sure that the performance counter really ticks, 112 to make sure that the performance counter really ticks,
113 even if there is a simulator or similar that catches the 113 even if there is a simulator or similar that catches the
diff --git a/arch/i386/kernel/stacktrace.c b/arch/i386/kernel/stacktrace.c
new file mode 100644
index 000000000000..e62a037ab399
--- /dev/null
+++ b/arch/i386/kernel/stacktrace.c
@@ -0,0 +1,98 @@
1/*
2 * arch/i386/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
6 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 */
8#include <linux/sched.h>
9#include <linux/stacktrace.h>
10
11static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
12{
13 return p > (void *)tinfo &&
14 p < (void *)tinfo + THREAD_SIZE - 3;
15}
16
17/*
18 * Save stack-backtrace addresses into a stack_trace buffer:
19 */
20static inline unsigned long
21save_context_stack(struct stack_trace *trace, unsigned int skip,
22 struct thread_info *tinfo, unsigned long *stack,
23 unsigned long ebp)
24{
25 unsigned long addr;
26
27#ifdef CONFIG_FRAME_POINTER
28 while (valid_stack_ptr(tinfo, (void *)ebp)) {
29 addr = *(unsigned long *)(ebp + 4);
30 if (!skip)
31 trace->entries[trace->nr_entries++] = addr;
32 else
33 skip--;
34 if (trace->nr_entries >= trace->max_entries)
35 break;
36 /*
37 * break out of recursive entries (such as
38 * end_of_stack_stop_unwind_function):
39 */
40 if (ebp == *(unsigned long *)ebp)
41 break;
42
43 ebp = *(unsigned long *)ebp;
44 }
45#else
46 while (valid_stack_ptr(tinfo, stack)) {
47 addr = *stack++;
48 if (__kernel_text_address(addr)) {
49 if (!skip)
50 trace->entries[trace->nr_entries++] = addr;
51 else
52 skip--;
53 if (trace->nr_entries >= trace->max_entries)
54 break;
55 }
56 }
57#endif
58
59 return ebp;
60}
61
62/*
63 * Save stack-backtrace addresses into a stack_trace buffer.
64 * If all_contexts is set, all contexts (hardirq, softirq and process)
65 * are saved. If not set then only the current context is saved.
66 */
67void save_stack_trace(struct stack_trace *trace,
68 struct task_struct *task, int all_contexts,
69 unsigned int skip)
70{
71 unsigned long ebp;
72 unsigned long *stack = &ebp;
73
74 WARN_ON(trace->nr_entries || !trace->max_entries);
75
76 if (!task || task == current) {
77 /* Grab ebp right from our regs: */
78 asm ("movl %%ebp, %0" : "=r" (ebp));
79 } else {
80 /* ebp is the last reg pushed by switch_to(): */
81 ebp = *(unsigned long *) task->thread.esp;
82 }
83
84 while (1) {
85 struct thread_info *context = (struct thread_info *)
86 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
87
88 ebp = save_context_stack(trace, skip, context, stack, ebp);
89 stack = (unsigned long *)context->previous_esp;
90 if (!all_contexts || !stack ||
91 trace->nr_entries >= trace->max_entries)
92 break;
93 trace->entries[trace->nr_entries++] = ULONG_MAX;
94 if (trace->nr_entries >= trace->max_entries)
95 break;
96 }
97}
98
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index e8c6086b2aa1..2bf8b55b91f8 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -115,28 +115,13 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
115} 115}
116 116
117/* 117/*
118 * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line. 118 * Print one address/symbol entries per line.
119 */ 119 */
120static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl, 120static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
121 int printed)
122{ 121{
123 if (!printed)
124 printk(log_lvl);
125
126#if CONFIG_STACK_BACKTRACE_COLS == 1
127 printk(" [<%08lx>] ", addr); 122 printk(" [<%08lx>] ", addr);
128#else
129 printk(" <%08lx> ", addr);
130#endif
131 print_symbol("%s", addr);
132 123
133 printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; 124 print_symbol("%s\n", addr);
134 if (printed)
135 printk(" ");
136 else
137 printk("\n");
138
139 return printed;
140} 125}
141 126
142static inline unsigned long print_context_stack(struct thread_info *tinfo, 127static inline unsigned long print_context_stack(struct thread_info *tinfo,
@@ -144,12 +129,11 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
144 char *log_lvl) 129 char *log_lvl)
145{ 130{
146 unsigned long addr; 131 unsigned long addr;
147 int printed = 0; /* nr of entries already printed on current line */
148 132
149#ifdef CONFIG_FRAME_POINTER 133#ifdef CONFIG_FRAME_POINTER
150 while (valid_stack_ptr(tinfo, (void *)ebp)) { 134 while (valid_stack_ptr(tinfo, (void *)ebp)) {
151 addr = *(unsigned long *)(ebp + 4); 135 addr = *(unsigned long *)(ebp + 4);
152 printed = print_addr_and_symbol(addr, log_lvl, printed); 136 print_addr_and_symbol(addr, log_lvl);
153 /* 137 /*
154 * break out of recursive entries (such as 138 * break out of recursive entries (such as
155 * end_of_stack_stop_unwind_function): 139 * end_of_stack_stop_unwind_function):
@@ -162,28 +146,23 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
162 while (valid_stack_ptr(tinfo, stack)) { 146 while (valid_stack_ptr(tinfo, stack)) {
163 addr = *stack++; 147 addr = *stack++;
164 if (__kernel_text_address(addr)) 148 if (__kernel_text_address(addr))
165 printed = print_addr_and_symbol(addr, log_lvl, printed); 149 print_addr_and_symbol(addr, log_lvl);
166 } 150 }
167#endif 151#endif
168 if (printed)
169 printk("\n");
170
171 return ebp; 152 return ebp;
172} 153}
173 154
174static asmlinkage int show_trace_unwind(struct unwind_frame_info *info, void *log_lvl) 155static asmlinkage int
156show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
175{ 157{
176 int n = 0; 158 int n = 0;
177 int printed = 0; /* nr of entries already printed on current line */
178 159
179 while (unwind(info) == 0 && UNW_PC(info)) { 160 while (unwind(info) == 0 && UNW_PC(info)) {
180 ++n; 161 n++;
181 printed = print_addr_and_symbol(UNW_PC(info), log_lvl, printed); 162 print_addr_and_symbol(UNW_PC(info), log_lvl);
182 if (arch_unw_user_mode(info)) 163 if (arch_unw_user_mode(info))
183 break; 164 break;
184 } 165 }
185 if (printed)
186 printk("\n");
187 return n; 166 return n;
188} 167}
189 168