aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--include/asm-powerpc/irqflags.h31
-rw-r--r--include/linux/hardirq.h26
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/interrupt.h11
-rw-r--r--include/linux/irqflags.h96
-rw-r--r--include/linux/sched.h15
-rw-r--r--kernel/fork.c19
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/softirq.c137
10 files changed, 313 insertions, 30 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 525baab45d2d..027728b95429 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -429,7 +429,7 @@ void do_softirq(void)
429 local_bh_disable(); 429 local_bh_disable();
430 do_softirq_onstack(); 430 do_softirq_onstack();
431 account_system_vtime(current); 431 account_system_vtime(current);
432 __local_bh_enable(); 432 _local_bh_enable();
433 } 433 }
434 434
435 local_irq_restore(flags); 435 local_irq_restore(flags);
diff --git a/include/asm-powerpc/irqflags.h b/include/asm-powerpc/irqflags.h
new file mode 100644
index 000000000000..7970cbaeaa54
--- /dev/null
+++ b/include/asm-powerpc/irqflags.h
@@ -0,0 +1,31 @@
1/*
2 * include/asm-powerpc/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() macros from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13/*
14 * Get definitions for raw_local_save_flags(x), etc.
15 */
16#include <asm-powerpc/hw_irq.h>
17
18/*
19 * Do the CPU's IRQ-state tracing from assembly code. We call a
20 * C function, so save all the C-clobbered registers:
21 */
22#ifdef CONFIG_TRACE_IRQFLAGS
23
24#error No support on PowerPC yet for CONFIG_TRACE_IRQFLAGS
25
26#else
27# define TRACE_IRQS_ON
28# define TRACE_IRQS_OFF
29#endif
30
31#endif
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 114ae583cca9..b1d4332b5cf0 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -86,9 +86,6 @@ extern void synchronize_irq(unsigned int irq);
86# define synchronize_irq(irq) barrier() 86# define synchronize_irq(irq) barrier()
87#endif 87#endif
88 88
89#define nmi_enter() irq_enter()
90#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
91
92struct task_struct; 89struct task_struct;
93 90
94#ifndef CONFIG_VIRT_CPU_ACCOUNTING 91#ifndef CONFIG_VIRT_CPU_ACCOUNTING
@@ -97,12 +94,35 @@ static inline void account_system_vtime(struct task_struct *tsk)
97} 94}
98#endif 95#endif
99 96
97/*
98 * It is safe to do non-atomic ops on ->hardirq_context,
99 * because NMI handlers may not preempt and the ops are
100 * always balanced, so the interrupted value of ->hardirq_context
101 * will always be restored.
102 */
100#define irq_enter() \ 103#define irq_enter() \
101 do { \ 104 do { \
102 account_system_vtime(current); \ 105 account_system_vtime(current); \
103 add_preempt_count(HARDIRQ_OFFSET); \ 106 add_preempt_count(HARDIRQ_OFFSET); \
107 trace_hardirq_enter(); \
108 } while (0)
109
110/*
111 * Exit irq context without processing softirqs:
112 */
113#define __irq_exit() \
114 do { \
115 trace_hardirq_exit(); \
116 account_system_vtime(current); \
117 sub_preempt_count(HARDIRQ_OFFSET); \
104 } while (0) 118 } while (0)
105 119
120/*
121 * Exit irq context and process softirqs if needed:
122 */
106extern void irq_exit(void); 123extern void irq_exit(void);
107 124
125#define nmi_enter() irq_enter()
126#define nmi_exit() __irq_exit()
127
108#endif /* LINUX_HARDIRQ_H */ 128#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 1b7bb37624bb..444a3ae0de2a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/file.h> 4#include <linux/file.h>
5#include <linux/rcupdate.h> 5#include <linux/rcupdate.h>
6#include <linux/irqflags.h>
6 7
7#define INIT_FDTABLE \ 8#define INIT_FDTABLE \
8{ \ 9{ \
@@ -124,6 +125,7 @@ extern struct group_info init_groups;
124 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 125 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
125 .fs_excl = ATOMIC_INIT(0), \ 126 .fs_excl = ATOMIC_INIT(0), \
126 .pi_lock = SPIN_LOCK_UNLOCKED, \ 127 .pi_lock = SPIN_LOCK_UNLOCKED, \
128 INIT_TRACE_IRQFLAGS \
127} 129}
128 130
129 131
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 73463fbb38e4..d5afee95fd43 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -10,6 +10,7 @@
10#include <linux/irqreturn.h> 10#include <linux/irqreturn.h>
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/irqflags.h>
13#include <asm/atomic.h> 14#include <asm/atomic.h>
14#include <asm/ptrace.h> 15#include <asm/ptrace.h>
15#include <asm/system.h> 16#include <asm/system.h>
@@ -199,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x)
199#define save_and_cli(x) save_and_cli(&x) 200#define save_and_cli(x) save_and_cli(&x)
200#endif /* CONFIG_SMP */ 201#endif /* CONFIG_SMP */
201 202
202/* SoftIRQ primitives. */ 203extern void local_bh_disable(void);
203#define local_bh_disable() \ 204extern void __local_bh_enable(void);
204 do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) 205extern void _local_bh_enable(void);
205#define __local_bh_enable() \
206 do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
207
208extern void local_bh_enable(void); 206extern void local_bh_enable(void);
207extern void local_bh_enable_ip(unsigned long ip);
209 208
210/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 209/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
211 frequency threaded job scheduling. For almost all the purposes 210 frequency threaded job scheduling. For almost all the purposes
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
new file mode 100644
index 000000000000..412e025bc5c7
--- /dev/null
+++ b/include/linux/irqflags.h
@@ -0,0 +1,96 @@
1/*
2 * include/linux/irqflags.h
3 *
4 * IRQ flags tracing: follow the state of the hardirq and softirq flags and
5 * provide callbacks for transitions between ON and OFF states.
6 *
7 * This file gets included from lowlevel asm headers too, to provide
8 * wrapped versions of the local_irq_*() APIs, based on the
9 * raw_local_irq_*() macros from the lowlevel headers.
10 */
11#ifndef _LINUX_TRACE_IRQFLAGS_H
12#define _LINUX_TRACE_IRQFLAGS_H
13
14#ifdef CONFIG_TRACE_IRQFLAGS
15 extern void trace_hardirqs_on(void);
16 extern void trace_hardirqs_off(void);
17 extern void trace_softirqs_on(unsigned long ip);
18 extern void trace_softirqs_off(unsigned long ip);
19# define trace_hardirq_context(p) ((p)->hardirq_context)
20# define trace_softirq_context(p) ((p)->softirq_context)
21# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
22# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
23# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
24# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
25# define trace_softirq_enter() do { current->softirq_context++; } while (0)
26# define trace_softirq_exit() do { current->softirq_context--; } while (0)
27# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
28#else
29# define trace_hardirqs_on() do { } while (0)
30# define trace_hardirqs_off() do { } while (0)
31# define trace_softirqs_on(ip) do { } while (0)
32# define trace_softirqs_off(ip) do { } while (0)
33# define trace_hardirq_context(p) 0
34# define trace_softirq_context(p) 0
35# define trace_hardirqs_enabled(p) 0
36# define trace_softirqs_enabled(p) 0
37# define trace_hardirq_enter() do { } while (0)
38# define trace_hardirq_exit() do { } while (0)
39# define trace_softirq_enter() do { } while (0)
40# define trace_softirq_exit() do { } while (0)
41# define INIT_TRACE_IRQFLAGS
42#endif
43
44#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
45
46#include <asm/irqflags.h>
47
48#define local_irq_enable() \
49 do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
50#define local_irq_disable() \
51 do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
52#define local_irq_save(flags) \
53 do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
54
55#define local_irq_restore(flags) \
56 do { \
57 if (raw_irqs_disabled_flags(flags)) { \
58 raw_local_irq_restore(flags); \
59 trace_hardirqs_off(); \
60 } else { \
61 trace_hardirqs_on(); \
62 raw_local_irq_restore(flags); \
63 } \
64 } while (0)
65#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
66/*
67 * The local_irq_*() APIs are equal to the raw_local_irq*()
68 * if !TRACE_IRQFLAGS.
69 */
70# define raw_local_irq_disable() local_irq_disable()
71# define raw_local_irq_enable() local_irq_enable()
72# define raw_local_irq_save(flags) local_irq_save(flags)
73# define raw_local_irq_restore(flags) local_irq_restore(flags)
74#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
75
76#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
77#define safe_halt() \
78 do { \
79 trace_hardirqs_on(); \
80 raw_safe_halt(); \
81 } while (0)
82
83#define local_save_flags(flags) raw_local_save_flags(flags)
84
85#define irqs_disabled() \
86({ \
87 unsigned long flags; \
88 \
89 raw_local_save_flags(flags); \
90 raw_irqs_disabled_flags(flags); \
91})
92
93#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
94#endif /* CONFIG_X86 */
95
96#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bdabeee10a78..ad7a89014d29 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -871,6 +871,21 @@ struct task_struct {
871 /* mutex deadlock detection */ 871 /* mutex deadlock detection */
872 struct mutex_waiter *blocked_on; 872 struct mutex_waiter *blocked_on;
873#endif 873#endif
874#ifdef CONFIG_TRACE_IRQFLAGS
875 unsigned int irq_events;
876 int hardirqs_enabled;
877 unsigned long hardirq_enable_ip;
878 unsigned int hardirq_enable_event;
879 unsigned long hardirq_disable_ip;
880 unsigned int hardirq_disable_event;
881 int softirqs_enabled;
882 unsigned long softirq_disable_ip;
883 unsigned int softirq_disable_event;
884 unsigned long softirq_enable_ip;
885 unsigned int softirq_enable_event;
886 int hardirq_context;
887 int softirq_context;
888#endif
874 889
875/* journalling filesystem info */ 890/* journalling filesystem info */
876 void *journal_info; 891 void *journal_info;
diff --git a/kernel/fork.c b/kernel/fork.c
index 1cd46a4fb0d3..b7db7fb74f53 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -968,6 +968,10 @@ static task_t *copy_process(unsigned long clone_flags,
968 if (!p) 968 if (!p)
969 goto fork_out; 969 goto fork_out;
970 970
971#ifdef CONFIG_TRACE_IRQFLAGS
972 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
973 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
974#endif
971 retval = -EAGAIN; 975 retval = -EAGAIN;
972 if (atomic_read(&p->user->processes) >= 976 if (atomic_read(&p->user->processes) >=
973 p->signal->rlim[RLIMIT_NPROC].rlim_cur) { 977 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
@@ -1042,6 +1046,21 @@ static task_t *copy_process(unsigned long clone_flags,
1042 } 1046 }
1043 mpol_fix_fork_child_flag(p); 1047 mpol_fix_fork_child_flag(p);
1044#endif 1048#endif
1049#ifdef CONFIG_TRACE_IRQFLAGS
1050 p->irq_events = 0;
1051 p->hardirqs_enabled = 0;
1052 p->hardirq_enable_ip = 0;
1053 p->hardirq_enable_event = 0;
1054 p->hardirq_disable_ip = _THIS_IP_;
1055 p->hardirq_disable_event = 0;
1056 p->softirqs_enabled = 1;
1057 p->softirq_enable_ip = _THIS_IP_;
1058 p->softirq_enable_event = 0;
1059 p->softirq_disable_ip = 0;
1060 p->softirq_disable_event = 0;
1061 p->hardirq_context = 0;
1062 p->softirq_context = 0;
1063#endif
1045 1064
1046 rt_mutex_init_task(p); 1065 rt_mutex_init_task(p);
1047 1066
diff --git a/kernel/sched.c b/kernel/sched.c
index 48c1faa60a67..911829966534 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4462,7 +4462,9 @@ int __sched cond_resched_softirq(void)
4462 BUG_ON(!in_softirq()); 4462 BUG_ON(!in_softirq());
4463 4463
4464 if (need_resched() && __resched_legal()) { 4464 if (need_resched() && __resched_legal()) {
4465 __local_bh_enable(); 4465 raw_local_irq_disable();
4466 _local_bh_enable();
4467 raw_local_irq_enable();
4466 __cond_resched(); 4468 __cond_resched();
4467 local_bh_disable(); 4469 local_bh_disable();
4468 return 1; 4470 return 1;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8f03e3b89b55..584609b6a66e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -62,6 +62,119 @@ static inline void wakeup_softirqd(void)
62} 62}
63 63
64/* 64/*
65 * This one is for softirq.c-internal use,
66 * where hardirqs are disabled legitimately:
67 */
68static void __local_bh_disable(unsigned long ip)
69{
70 unsigned long flags;
71
72 WARN_ON_ONCE(in_irq());
73
74 raw_local_irq_save(flags);
75 add_preempt_count(SOFTIRQ_OFFSET);
76 /*
77 * Were softirqs turned off above:
78 */
79 if (softirq_count() == SOFTIRQ_OFFSET)
80 trace_softirqs_off(ip);
81 raw_local_irq_restore(flags);
82}
83
84void local_bh_disable(void)
85{
86 __local_bh_disable((unsigned long)__builtin_return_address(0));
87}
88
89EXPORT_SYMBOL(local_bh_disable);
90
91void __local_bh_enable(void)
92{
93 WARN_ON_ONCE(in_irq());
94
95 /*
96 * softirqs should never be enabled by __local_bh_enable(),
97 * it always nests inside local_bh_enable() sections:
98 */
99 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
100
101 sub_preempt_count(SOFTIRQ_OFFSET);
102}
103EXPORT_SYMBOL_GPL(__local_bh_enable);
104
105/*
106 * Special-case - softirqs can safely be enabled in
107 * cond_resched_softirq(), or by __do_softirq(),
108 * without processing still-pending softirqs:
109 */
110void _local_bh_enable(void)
111{
112 WARN_ON_ONCE(in_irq());
113 WARN_ON_ONCE(!irqs_disabled());
114
115 if (softirq_count() == SOFTIRQ_OFFSET)
116 trace_softirqs_on((unsigned long)__builtin_return_address(0));
117 sub_preempt_count(SOFTIRQ_OFFSET);
118}
119
120EXPORT_SYMBOL(_local_bh_enable);
121
122void local_bh_enable(void)
123{
124 unsigned long flags;
125
126 WARN_ON_ONCE(in_irq());
127 WARN_ON_ONCE(irqs_disabled());
128
129 local_irq_save(flags);
130 /*
131 * Are softirqs going to be turned on now:
132 */
133 if (softirq_count() == SOFTIRQ_OFFSET)
134 trace_softirqs_on((unsigned long)__builtin_return_address(0));
135 /*
136 * Keep preemption disabled until we are done with
137 * softirq processing:
138 */
139 sub_preempt_count(SOFTIRQ_OFFSET - 1);
140
141 if (unlikely(!in_interrupt() && local_softirq_pending()))
142 do_softirq();
143
144 dec_preempt_count();
145 local_irq_restore(flags);
146 preempt_check_resched();
147}
148EXPORT_SYMBOL(local_bh_enable);
149
150void local_bh_enable_ip(unsigned long ip)
151{
152 unsigned long flags;
153
154 WARN_ON_ONCE(in_irq());
155
156 local_irq_save(flags);
157 /*
158 * Are softirqs going to be turned on now:
159 */
160 if (softirq_count() == SOFTIRQ_OFFSET)
161 trace_softirqs_on(ip);
162 /*
163 * Keep preemption disabled until we are done with
164 * softirq processing:
165 */
166 sub_preempt_count(SOFTIRQ_OFFSET - 1);
167
168 if (unlikely(!in_interrupt() && local_softirq_pending()))
169 do_softirq();
170
171 dec_preempt_count();
172 local_irq_restore(flags);
173 preempt_check_resched();
174}
175EXPORT_SYMBOL(local_bh_enable_ip);
176
177/*
65 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 178 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
66 * and we fall back to softirqd after that. 179 * and we fall back to softirqd after that.
67 * 180 *
@@ -80,8 +193,9 @@ asmlinkage void __do_softirq(void)
80 int cpu; 193 int cpu;
81 194
82 pending = local_softirq_pending(); 195 pending = local_softirq_pending();
196 __local_bh_disable((unsigned long)__builtin_return_address(0));
197 trace_softirq_enter();
83 198
84 local_bh_disable();
85 cpu = smp_processor_id(); 199 cpu = smp_processor_id();
86restart: 200restart:
87 /* Reset the pending bitmask before enabling irqs */ 201 /* Reset the pending bitmask before enabling irqs */
@@ -109,7 +223,8 @@ restart:
109 if (pending) 223 if (pending)
110 wakeup_softirqd(); 224 wakeup_softirqd();
111 225
112 __local_bh_enable(); 226 trace_softirq_exit();
227 _local_bh_enable();
113} 228}
114 229
115#ifndef __ARCH_HAS_DO_SOFTIRQ 230#ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -136,23 +251,6 @@ EXPORT_SYMBOL(do_softirq);
136 251
137#endif 252#endif
138 253
139void local_bh_enable(void)
140{
141 WARN_ON(irqs_disabled());
142 /*
143 * Keep preemption disabled until we are done with
144 * softirq processing:
145 */
146 sub_preempt_count(SOFTIRQ_OFFSET - 1);
147
148 if (unlikely(!in_interrupt() && local_softirq_pending()))
149 do_softirq();
150
151 dec_preempt_count();
152 preempt_check_resched();
153}
154EXPORT_SYMBOL(local_bh_enable);
155
156#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 254#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
157# define invoke_softirq() __do_softirq() 255# define invoke_softirq() __do_softirq()
158#else 256#else
@@ -165,6 +263,7 @@ EXPORT_SYMBOL(local_bh_enable);
165void irq_exit(void) 263void irq_exit(void)
166{ 264{
167 account_system_vtime(current); 265 account_system_vtime(current);
266 trace_hardirq_exit();
168 sub_preempt_count(IRQ_EXIT_OFFSET); 267 sub_preempt_count(IRQ_EXIT_OFFSET);
169 if (!in_interrupt() && local_softirq_pending()) 268 if (!in_interrupt() && local_softirq_pending())
170 invoke_softirq(); 269 invoke_softirq();