aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 03:24:42 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 18:27:03 -0400
commitde30a2b355ea85350ca2f58f3b9bf4e5bc007986 (patch)
tree0bef670aff65614b3c78ca13b20307355b8221d5 /kernel
parent5bdc9b447c0076f494a56fdcd93ee8c5e78a2afd (diff)
[PATCH] lockdep: irqtrace subsystem, core
Accurate hard-IRQ-flags and softirq-flags state tracing. This allows us to attach extra functionality to IRQ flags on/off events (such as trace-on/off). Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c19
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/softirq.c137
3 files changed, 140 insertions, 20 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 1cd46a4fb0d3..b7db7fb74f53 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -968,6 +968,10 @@ static task_t *copy_process(unsigned long clone_flags,
968 if (!p) 968 if (!p)
969 goto fork_out; 969 goto fork_out;
970 970
971#ifdef CONFIG_TRACE_IRQFLAGS
972 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
973 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
974#endif
971 retval = -EAGAIN; 975 retval = -EAGAIN;
972 if (atomic_read(&p->user->processes) >= 976 if (atomic_read(&p->user->processes) >=
973 p->signal->rlim[RLIMIT_NPROC].rlim_cur) { 977 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
@@ -1042,6 +1046,21 @@ static task_t *copy_process(unsigned long clone_flags,
1042 } 1046 }
1043 mpol_fix_fork_child_flag(p); 1047 mpol_fix_fork_child_flag(p);
1044#endif 1048#endif
1049#ifdef CONFIG_TRACE_IRQFLAGS
1050 p->irq_events = 0;
1051 p->hardirqs_enabled = 0;
1052 p->hardirq_enable_ip = 0;
1053 p->hardirq_enable_event = 0;
1054 p->hardirq_disable_ip = _THIS_IP_;
1055 p->hardirq_disable_event = 0;
1056 p->softirqs_enabled = 1;
1057 p->softirq_enable_ip = _THIS_IP_;
1058 p->softirq_enable_event = 0;
1059 p->softirq_disable_ip = 0;
1060 p->softirq_disable_event = 0;
1061 p->hardirq_context = 0;
1062 p->softirq_context = 0;
1063#endif
1045 1064
1046 rt_mutex_init_task(p); 1065 rt_mutex_init_task(p);
1047 1066
diff --git a/kernel/sched.c b/kernel/sched.c
index 48c1faa60a67..911829966534 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4462,7 +4462,9 @@ int __sched cond_resched_softirq(void)
4462 BUG_ON(!in_softirq()); 4462 BUG_ON(!in_softirq());
4463 4463
4464 if (need_resched() && __resched_legal()) { 4464 if (need_resched() && __resched_legal()) {
4465 __local_bh_enable(); 4465 raw_local_irq_disable();
4466 _local_bh_enable();
4467 raw_local_irq_enable();
4466 __cond_resched(); 4468 __cond_resched();
4467 local_bh_disable(); 4469 local_bh_disable();
4468 return 1; 4470 return 1;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8f03e3b89b55..584609b6a66e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -62,6 +62,119 @@ static inline void wakeup_softirqd(void)
62} 62}
63 63
64/* 64/*
65 * This one is for softirq.c-internal use,
66 * where hardirqs are disabled legitimately:
67 */
68static void __local_bh_disable(unsigned long ip)
69{
70 unsigned long flags;
71
72 WARN_ON_ONCE(in_irq());
73
74 raw_local_irq_save(flags);
75 add_preempt_count(SOFTIRQ_OFFSET);
76 /*
77 * Were softirqs turned off above:
78 */
79 if (softirq_count() == SOFTIRQ_OFFSET)
80 trace_softirqs_off(ip);
81 raw_local_irq_restore(flags);
82}
83
84void local_bh_disable(void)
85{
86 __local_bh_disable((unsigned long)__builtin_return_address(0));
87}
88
89EXPORT_SYMBOL(local_bh_disable);
90
91void __local_bh_enable(void)
92{
93 WARN_ON_ONCE(in_irq());
94
95 /*
96 * softirqs should never be enabled by __local_bh_enable(),
97 * it always nests inside local_bh_enable() sections:
98 */
99 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
100
101 sub_preempt_count(SOFTIRQ_OFFSET);
102}
103EXPORT_SYMBOL_GPL(__local_bh_enable);
104
105/*
106 * Special-case - softirqs can safely be enabled in
107 * cond_resched_softirq(), or by __do_softirq(),
108 * without processing still-pending softirqs:
109 */
110void _local_bh_enable(void)
111{
112 WARN_ON_ONCE(in_irq());
113 WARN_ON_ONCE(!irqs_disabled());
114
115 if (softirq_count() == SOFTIRQ_OFFSET)
116 trace_softirqs_on((unsigned long)__builtin_return_address(0));
117 sub_preempt_count(SOFTIRQ_OFFSET);
118}
119
120EXPORT_SYMBOL(_local_bh_enable);
121
122void local_bh_enable(void)
123{
124 unsigned long flags;
125
126 WARN_ON_ONCE(in_irq());
127 WARN_ON_ONCE(irqs_disabled());
128
129 local_irq_save(flags);
130 /*
131 * Are softirqs going to be turned on now:
132 */
133 if (softirq_count() == SOFTIRQ_OFFSET)
134 trace_softirqs_on((unsigned long)__builtin_return_address(0));
135 /*
136 * Keep preemption disabled until we are done with
137 * softirq processing:
138 */
139 sub_preempt_count(SOFTIRQ_OFFSET - 1);
140
141 if (unlikely(!in_interrupt() && local_softirq_pending()))
142 do_softirq();
143
144 dec_preempt_count();
145 local_irq_restore(flags);
146 preempt_check_resched();
147}
148EXPORT_SYMBOL(local_bh_enable);
149
150void local_bh_enable_ip(unsigned long ip)
151{
152 unsigned long flags;
153
154 WARN_ON_ONCE(in_irq());
155
156 local_irq_save(flags);
157 /*
158 * Are softirqs going to be turned on now:
159 */
160 if (softirq_count() == SOFTIRQ_OFFSET)
161 trace_softirqs_on(ip);
162 /*
163 * Keep preemption disabled until we are done with
164 * softirq processing:
165 */
166 sub_preempt_count(SOFTIRQ_OFFSET - 1);
167
168 if (unlikely(!in_interrupt() && local_softirq_pending()))
169 do_softirq();
170
171 dec_preempt_count();
172 local_irq_restore(flags);
173 preempt_check_resched();
174}
175EXPORT_SYMBOL(local_bh_enable_ip);
176
177/*
65 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 178 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
66 * and we fall back to softirqd after that. 179 * and we fall back to softirqd after that.
67 * 180 *
@@ -80,8 +193,9 @@ asmlinkage void __do_softirq(void)
80 int cpu; 193 int cpu;
81 194
82 pending = local_softirq_pending(); 195 pending = local_softirq_pending();
196 __local_bh_disable((unsigned long)__builtin_return_address(0));
197 trace_softirq_enter();
83 198
84 local_bh_disable();
85 cpu = smp_processor_id(); 199 cpu = smp_processor_id();
86restart: 200restart:
87 /* Reset the pending bitmask before enabling irqs */ 201 /* Reset the pending bitmask before enabling irqs */
@@ -109,7 +223,8 @@ restart:
109 if (pending) 223 if (pending)
110 wakeup_softirqd(); 224 wakeup_softirqd();
111 225
112 __local_bh_enable(); 226 trace_softirq_exit();
227 _local_bh_enable();
113} 228}
114 229
115#ifndef __ARCH_HAS_DO_SOFTIRQ 230#ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -136,23 +251,6 @@ EXPORT_SYMBOL(do_softirq);
136 251
137#endif 252#endif
138 253
139void local_bh_enable(void)
140{
141 WARN_ON(irqs_disabled());
142 /*
143 * Keep preemption disabled until we are done with
144 * softirq processing:
145 */
146 sub_preempt_count(SOFTIRQ_OFFSET - 1);
147
148 if (unlikely(!in_interrupt() && local_softirq_pending()))
149 do_softirq();
150
151 dec_preempt_count();
152 preempt_check_resched();
153}
154EXPORT_SYMBOL(local_bh_enable);
155
156#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 254#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
157# define invoke_softirq() __do_softirq() 255# define invoke_softirq() __do_softirq()
158#else 256#else
@@ -165,6 +263,7 @@ EXPORT_SYMBOL(local_bh_enable);
165void irq_exit(void) 263void irq_exit(void)
166{ 264{
167 account_system_vtime(current); 265 account_system_vtime(current);
266 trace_hardirq_exit();
168 sub_preempt_count(IRQ_EXIT_OFFSET); 267 sub_preempt_count(IRQ_EXIT_OFFSET);
169 if (!in_interrupt() && local_softirq_pending()) 268 if (!in_interrupt() && local_softirq_pending())
170 invoke_softirq(); 269 invoke_softirq();