aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/Kconfig.debug4
-rw-r--r--arch/i386/kernel/entry.S36
-rw-r--r--arch/i386/kernel/irq.c6
-rw-r--r--include/asm-i386/irqflags.h56
-rw-r--r--include/asm-i386/spinlock.h5
-rw-r--r--include/asm-i386/system.h20
6 files changed, 104 insertions, 23 deletions
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index b20ddd04f3d8..b31c0802e1cc 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -1,5 +1,9 @@
1menu "Kernel hacking" 1menu "Kernel hacking"
2 2
3config TRACE_IRQFLAGS_SUPPORT
4 bool
5 default y
6
3source "lib/Kconfig.debug" 7source "lib/Kconfig.debug"
4 8
5config EARLY_PRINTK 9config EARLY_PRINTK
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 787190c45fdb..d9a260f2efb4 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -42,6 +42,7 @@
42 42
43#include <linux/linkage.h> 43#include <linux/linkage.h>
44#include <asm/thread_info.h> 44#include <asm/thread_info.h>
45#include <asm/irqflags.h>
45#include <asm/errno.h> 46#include <asm/errno.h>
46#include <asm/segment.h> 47#include <asm/segment.h>
47#include <asm/smp.h> 48#include <asm/smp.h>
@@ -76,12 +77,21 @@ NT_MASK = 0x00004000
76VM_MASK = 0x00020000 77VM_MASK = 0x00020000
77 78
78#ifdef CONFIG_PREEMPT 79#ifdef CONFIG_PREEMPT
79#define preempt_stop cli 80#define preempt_stop cli; TRACE_IRQS_OFF
80#else 81#else
81#define preempt_stop 82#define preempt_stop
82#define resume_kernel restore_nocheck 83#define resume_kernel restore_nocheck
83#endif 84#endif
84 85
86.macro TRACE_IRQS_IRET
87#ifdef CONFIG_TRACE_IRQFLAGS
88 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
89 jz 1f
90 TRACE_IRQS_ON
911:
92#endif
93.endm
94
85#ifdef CONFIG_VM86 95#ifdef CONFIG_VM86
86#define resume_userspace_sig check_userspace 96#define resume_userspace_sig check_userspace
87#else 97#else
@@ -257,6 +267,10 @@ ENTRY(sysenter_entry)
257 CFI_REGISTER esp, ebp 267 CFI_REGISTER esp, ebp
258 movl TSS_sysenter_esp0(%esp),%esp 268 movl TSS_sysenter_esp0(%esp),%esp
259sysenter_past_esp: 269sysenter_past_esp:
270 /*
271 * No need to follow this irqs on/off section: the syscall
272 * disabled irqs and here we enable it straight after entry:
273 */
260 sti 274 sti
261 pushl $(__USER_DS) 275 pushl $(__USER_DS)
262 CFI_ADJUST_CFA_OFFSET 4 276 CFI_ADJUST_CFA_OFFSET 4
@@ -303,6 +317,7 @@ sysenter_past_esp:
303 call *sys_call_table(,%eax,4) 317 call *sys_call_table(,%eax,4)
304 movl %eax,EAX(%esp) 318 movl %eax,EAX(%esp)
305 cli 319 cli
320 TRACE_IRQS_OFF
306 movl TI_flags(%ebp), %ecx 321 movl TI_flags(%ebp), %ecx
307 testw $_TIF_ALLWORK_MASK, %cx 322 testw $_TIF_ALLWORK_MASK, %cx
308 jne syscall_exit_work 323 jne syscall_exit_work
@@ -310,6 +325,7 @@ sysenter_past_esp:
310 movl EIP(%esp), %edx 325 movl EIP(%esp), %edx
311 movl OLDESP(%esp), %ecx 326 movl OLDESP(%esp), %ecx
312 xorl %ebp,%ebp 327 xorl %ebp,%ebp
328 TRACE_IRQS_ON
313 sti 329 sti
314 sysexit 330 sysexit
315 CFI_ENDPROC 331 CFI_ENDPROC
@@ -339,6 +355,7 @@ syscall_exit:
339 cli # make sure we don't miss an interrupt 355 cli # make sure we don't miss an interrupt
340 # setting need_resched or sigpending 356 # setting need_resched or sigpending
341 # between sampling and the iret 357 # between sampling and the iret
358 TRACE_IRQS_OFF
342 movl TI_flags(%ebp), %ecx 359 movl TI_flags(%ebp), %ecx
343 testw $_TIF_ALLWORK_MASK, %cx # current->work 360 testw $_TIF_ALLWORK_MASK, %cx # current->work
344 jne syscall_exit_work 361 jne syscall_exit_work
@@ -355,12 +372,15 @@ restore_all:
355 CFI_REMEMBER_STATE 372 CFI_REMEMBER_STATE
356 je ldt_ss # returning to user-space with LDT SS 373 je ldt_ss # returning to user-space with LDT SS
357restore_nocheck: 374restore_nocheck:
375 TRACE_IRQS_IRET
376restore_nocheck_notrace:
358 RESTORE_REGS 377 RESTORE_REGS
359 addl $4, %esp 378 addl $4, %esp
360 CFI_ADJUST_CFA_OFFSET -4 379 CFI_ADJUST_CFA_OFFSET -4
3611: iret 3801: iret
362.section .fixup,"ax" 381.section .fixup,"ax"
363iret_exc: 382iret_exc:
383 TRACE_IRQS_ON
364 sti 384 sti
365 pushl $0 # no error code 385 pushl $0 # no error code
366 pushl $do_iret_error 386 pushl $do_iret_error
@@ -386,11 +406,13 @@ ldt_ss:
386 subl $8, %esp # reserve space for switch16 pointer 406 subl $8, %esp # reserve space for switch16 pointer
387 CFI_ADJUST_CFA_OFFSET 8 407 CFI_ADJUST_CFA_OFFSET 8
388 cli 408 cli
409 TRACE_IRQS_OFF
389 movl %esp, %eax 410 movl %esp, %eax
390 /* Set up the 16bit stack frame with switch32 pointer on top, 411 /* Set up the 16bit stack frame with switch32 pointer on top,
391 * and a switch16 pointer on top of the current frame. */ 412 * and a switch16 pointer on top of the current frame. */
392 call setup_x86_bogus_stack 413 call setup_x86_bogus_stack
393 CFI_ADJUST_CFA_OFFSET -8 # frame has moved 414 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
415 TRACE_IRQS_IRET
394 RESTORE_REGS 416 RESTORE_REGS
395 lss 20+4(%esp), %esp # switch to 16bit stack 417 lss 20+4(%esp), %esp # switch to 16bit stack
3961: iret 4181: iret
@@ -411,6 +433,7 @@ work_resched:
411 cli # make sure we don't miss an interrupt 433 cli # make sure we don't miss an interrupt
412 # setting need_resched or sigpending 434 # setting need_resched or sigpending
413 # between sampling and the iret 435 # between sampling and the iret
436 TRACE_IRQS_OFF
414 movl TI_flags(%ebp), %ecx 437 movl TI_flags(%ebp), %ecx
415 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other 438 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
416 # than syscall tracing? 439 # than syscall tracing?
@@ -462,6 +485,7 @@ syscall_trace_entry:
462syscall_exit_work: 485syscall_exit_work:
463 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl 486 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
464 jz work_pending 487 jz work_pending
488 TRACE_IRQS_ON
465 sti # could let do_syscall_trace() call 489 sti # could let do_syscall_trace() call
466 # schedule() instead 490 # schedule() instead
467 movl %esp, %eax 491 movl %esp, %eax
@@ -535,9 +559,14 @@ ENTRY(irq_entries_start)
535vector=vector+1 559vector=vector+1
536.endr 560.endr
537 561
562/*
563 * the CPU automatically disables interrupts when executing an IRQ vector,
564 * so IRQ-flags tracing has to follow that:
565 */
538 ALIGN 566 ALIGN
539common_interrupt: 567common_interrupt:
540 SAVE_ALL 568 SAVE_ALL
569 TRACE_IRQS_OFF
541 movl %esp,%eax 570 movl %esp,%eax
542 call do_IRQ 571 call do_IRQ
543 jmp ret_from_intr 572 jmp ret_from_intr
@@ -549,9 +578,10 @@ ENTRY(name) \
549 pushl $~(nr); \ 578 pushl $~(nr); \
550 CFI_ADJUST_CFA_OFFSET 4; \ 579 CFI_ADJUST_CFA_OFFSET 4; \
551 SAVE_ALL; \ 580 SAVE_ALL; \
581 TRACE_IRQS_OFF \
552 movl %esp,%eax; \ 582 movl %esp,%eax; \
553 call smp_/**/name; \ 583 call smp_/**/name; \
554 jmp ret_from_intr; \ 584 jmp ret_from_intr; \
555 CFI_ENDPROC 585 CFI_ENDPROC
556 586
557/* The include is where all of the SMP etc. interrupts come from */ 587/* The include is where all of the SMP etc. interrupts come from */
@@ -726,7 +756,7 @@ nmi_stack_correct:
726 xorl %edx,%edx # zero error code 756 xorl %edx,%edx # zero error code
727 movl %esp,%eax # pt_regs pointer 757 movl %esp,%eax # pt_regs pointer
728 call do_nmi 758 call do_nmi
729 jmp restore_all 759 jmp restore_nocheck_notrace
730 CFI_ENDPROC 760 CFI_ENDPROC
731 761
732nmi_stack_fixup: 762nmi_stack_fixup:
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 16b491703967..6cb529f60dcc 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -166,7 +166,7 @@ void irq_ctx_init(int cpu)
166 irqctx->tinfo.task = NULL; 166 irqctx->tinfo.task = NULL;
167 irqctx->tinfo.exec_domain = NULL; 167 irqctx->tinfo.exec_domain = NULL;
168 irqctx->tinfo.cpu = cpu; 168 irqctx->tinfo.cpu = cpu;
169 irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET; 169 irqctx->tinfo.preempt_count = 0;
170 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 170 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
171 171
172 softirq_ctx[cpu] = irqctx; 172 softirq_ctx[cpu] = irqctx;
@@ -211,6 +211,10 @@ asmlinkage void do_softirq(void)
211 : "0"(isp) 211 : "0"(isp)
212 : "memory", "cc", "edx", "ecx", "eax" 212 : "memory", "cc", "edx", "ecx", "eax"
213 ); 213 );
214 /*
215 * Shouldnt happen, we returned above if in_interrupt():
216 */
217 WARN_ON_ONCE(softirq_count());
214 } 218 }
215 219
216 local_irq_restore(flags); 220 local_irq_restore(flags);
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
new file mode 100644
index 000000000000..ca777e894c92
--- /dev/null
+++ b/include/asm-i386/irqflags.h
@@ -0,0 +1,56 @@
1/*
2 * include/asm-i386/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() macros from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#define raw_local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
14#define raw_local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
15#define raw_local_irq_disable() __asm__ __volatile__("cli": : :"memory")
16#define raw_local_irq_enable() __asm__ __volatile__("sti": : :"memory")
17/* used in the idle loop; sti takes one instruction cycle to complete */
18#define raw_safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
19/* used when interrupts are already enabled or to shutdown the processor */
20#define halt() __asm__ __volatile__("hlt": : :"memory")
21
22#define raw_irqs_disabled_flags(flags) (!((flags) & (1<<9)))
23
24/* For spinlocks etc */
25#define raw_local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
26
27/*
28 * Do the CPU's IRQ-state tracing from assembly code. We call a
29 * C function, so save all the C-clobbered registers:
30 */
31#ifdef CONFIG_TRACE_IRQFLAGS
32
33# define TRACE_IRQS_ON \
34 pushl %eax; \
35 pushl %ecx; \
36 pushl %edx; \
37 call trace_hardirqs_on; \
38 popl %edx; \
39 popl %ecx; \
40 popl %eax;
41
42# define TRACE_IRQS_OFF \
43 pushl %eax; \
44 pushl %ecx; \
45 pushl %edx; \
46 call trace_hardirqs_off; \
47 popl %edx; \
48 popl %ecx; \
49 popl %eax;
50
51#else
52# define TRACE_IRQS_ON
53# define TRACE_IRQS_OFF
54#endif
55
56#endif
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 04ba30234c48..7e29b51bcaa0 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -31,6 +31,11 @@
31 "jmp 1b\n" \ 31 "jmp 1b\n" \
32 "3:\n\t" 32 "3:\n\t"
33 33
34/*
35 * NOTE: there's an irqs-on section here, which normally would have to be
36 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
37 * __raw_spin_lock_string_flags().
38 */
34#define __raw_spin_lock_string_flags \ 39#define __raw_spin_lock_string_flags \
35 "\n1:\t" \ 40 "\n1:\t" \
36 "lock ; decb %0\n\t" \ 41 "lock ; decb %0\n\t" \
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index cab0180567f9..db398d88b1d9 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -456,25 +456,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
456 456
457#define set_wmb(var, value) do { var = value; wmb(); } while (0) 457#define set_wmb(var, value) do { var = value; wmb(); } while (0)
458 458
459/* interrupt control.. */ 459#include <linux/irqflags.h>
460#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
461#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
462#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
463#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
464/* used in the idle loop; sti takes one instruction cycle to complete */
465#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
466/* used when interrupts are already enabled or to shutdown the processor */
467#define halt() __asm__ __volatile__("hlt": : :"memory")
468
469#define irqs_disabled() \
470({ \
471 unsigned long flags; \
472 local_save_flags(flags); \
473 !(flags & (1<<9)); \
474})
475
476/* For spinlocks etc */
477#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
478 460
479/* 461/*
480 * disable hlt during certain critical i/o operations 462 * disable hlt during certain critical i/o operations