aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c67
1 files changed, 19 insertions, 48 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 05f248039d77..f6b03d56c2bf 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -131,23 +131,17 @@ void _local_bh_enable(void)
131 131
132EXPORT_SYMBOL(_local_bh_enable); 132EXPORT_SYMBOL(_local_bh_enable);
133 133
134void local_bh_enable(void) 134static inline void _local_bh_enable_ip(unsigned long ip)
135{ 135{
136 WARN_ON_ONCE(in_irq() || irqs_disabled());
136#ifdef CONFIG_TRACE_IRQFLAGS 137#ifdef CONFIG_TRACE_IRQFLAGS
137 unsigned long flags; 138 local_irq_disable();
138
139 WARN_ON_ONCE(in_irq());
140#endif
141 WARN_ON_ONCE(irqs_disabled());
142
143#ifdef CONFIG_TRACE_IRQFLAGS
144 local_irq_save(flags);
145#endif 139#endif
146 /* 140 /*
147 * Are softirqs going to be turned on now: 141 * Are softirqs going to be turned on now:
148 */ 142 */
149 if (softirq_count() == SOFTIRQ_OFFSET) 143 if (softirq_count() == SOFTIRQ_OFFSET)
150 trace_softirqs_on((unsigned long)__builtin_return_address(0)); 144 trace_softirqs_on(ip);
151 /* 145 /*
152 * Keep preemption disabled until we are done with 146 * Keep preemption disabled until we are done with
153 * softirq processing: 147 * softirq processing:
@@ -159,40 +153,20 @@ void local_bh_enable(void)
159 153
160 dec_preempt_count(); 154 dec_preempt_count();
161#ifdef CONFIG_TRACE_IRQFLAGS 155#ifdef CONFIG_TRACE_IRQFLAGS
162 local_irq_restore(flags); 156 local_irq_enable();
163#endif 157#endif
164 preempt_check_resched(); 158 preempt_check_resched();
165} 159}
160
161void local_bh_enable(void)
162{
163 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
164}
166EXPORT_SYMBOL(local_bh_enable); 165EXPORT_SYMBOL(local_bh_enable);
167 166
168void local_bh_enable_ip(unsigned long ip) 167void local_bh_enable_ip(unsigned long ip)
169{ 168{
170#ifdef CONFIG_TRACE_IRQFLAGS 169 _local_bh_enable_ip(ip);
171 unsigned long flags;
172
173 WARN_ON_ONCE(in_irq());
174
175 local_irq_save(flags);
176#endif
177 /*
178 * Are softirqs going to be turned on now:
179 */
180 if (softirq_count() == SOFTIRQ_OFFSET)
181 trace_softirqs_on(ip);
182 /*
183 * Keep preemption disabled until we are done with
184 * softirq processing:
185 */
186 sub_preempt_count(SOFTIRQ_OFFSET - 1);
187
188 if (unlikely(!in_interrupt() && local_softirq_pending()))
189 do_softirq();
190
191 dec_preempt_count();
192#ifdef CONFIG_TRACE_IRQFLAGS
193 local_irq_restore(flags);
194#endif
195 preempt_check_resched();
196} 170}
197EXPORT_SYMBOL(local_bh_enable_ip); 171EXPORT_SYMBOL(local_bh_enable_ip);
198 172
@@ -347,9 +321,8 @@ void raise_softirq(unsigned int nr)
347 local_irq_restore(flags); 321 local_irq_restore(flags);
348} 322}
349 323
350void open_softirq(int nr, void (*action)(struct softirq_action*), void *data) 324void open_softirq(int nr, void (*action)(struct softirq_action *))
351{ 325{
352 softirq_vec[nr].data = data;
353 softirq_vec[nr].action = action; 326 softirq_vec[nr].action = action;
354} 327}
355 328
@@ -360,10 +333,8 @@ struct tasklet_head
360 struct tasklet_struct **tail; 333 struct tasklet_struct **tail;
361}; 334};
362 335
363/* Some compilers disobey section attribute on statics when not 336static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
364 initialized -- RR */ 337static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
365static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
366static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
367 338
368void __tasklet_schedule(struct tasklet_struct *t) 339void __tasklet_schedule(struct tasklet_struct *t)
369{ 340{
@@ -503,8 +474,8 @@ void __init softirq_init(void)
503 &per_cpu(tasklet_hi_vec, cpu).head; 474 &per_cpu(tasklet_hi_vec, cpu).head;
504 } 475 }
505 476
506 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL); 477 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
507 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL); 478 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
508} 479}
509 480
510static int ksoftirqd(void * __bind_cpu) 481static int ksoftirqd(void * __bind_cpu)
@@ -645,7 +616,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
645 616
646 p = per_cpu(ksoftirqd, hotcpu); 617 p = per_cpu(ksoftirqd, hotcpu);
647 per_cpu(ksoftirqd, hotcpu) = NULL; 618 per_cpu(ksoftirqd, hotcpu) = NULL;
648 sched_setscheduler(p, SCHED_FIFO, &param); 619 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
649 kthread_stop(p); 620 kthread_stop(p);
650 takeover_tasklets(hotcpu); 621 takeover_tasklets(hotcpu);
651 break; 622 break;
@@ -674,12 +645,12 @@ __init int spawn_ksoftirqd(void)
674/* 645/*
675 * Call a function on all processors 646 * Call a function on all processors
676 */ 647 */
677int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait) 648int on_each_cpu(void (*func) (void *info), void *info, int wait)
678{ 649{
679 int ret = 0; 650 int ret = 0;
680 651
681 preempt_disable(); 652 preempt_disable();
682 ret = smp_call_function(func, info, retry, wait); 653 ret = smp_call_function(func, info, wait);
683 local_irq_disable(); 654 local_irq_disable();
684 func(info); 655 func(info);
685 local_irq_enable(); 656 local_irq_enable();