aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-11 20:20:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-11 20:20:12 -0500
commit39cf275a1a18ba3c7eb9b986c5c9b35b57332798 (patch)
tree40b119ca9d2fbaf8128d3fa25f4c64669002b0c0 /kernel/softirq.c
parentad5d69899e52792671c1aa6c7360464c7edfe09c (diff)
parente5137b50a0640009fd63a3e65c14bc6e1be8796a (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: "The main changes in this cycle are: - (much) improved CONFIG_NUMA_BALANCING support from Mel Gorman, Rik van Riel, Peter Zijlstra et al. Yay! - optimize preemption counter handling: merge the NEED_RESCHED flag into the preempt_count variable, by Peter Zijlstra. - wait.h fixes and code reorganization from Peter Zijlstra - cfs_bandwidth fixes from Ben Segall - SMP load-balancer cleanups from Peter Zijstra - idle balancer improvements from Jason Low - other fixes and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (129 commits) ftrace, sched: Add TRACE_FLAG_PREEMPT_RESCHED stop_machine: Fix race between stop_two_cpus() and stop_cpus() sched: Remove unnecessary iteration over sched domains to update nr_busy_cpus sched: Fix asymmetric scheduling for POWER7 sched: Move completion code from core.c to completion.c sched: Move wait code from core.c to wait.c sched: Move wait.c into kernel/sched/ sched/wait: Fix __wait_event_interruptible_lock_irq_timeout() sched: Avoid throttle_cfs_rq() racing with period_timer stopping sched: Guarantee new group-entities always have weight sched: Fix hrtimer_cancel()/rq->lock deadlock sched: Fix cfs_bandwidth misuse of hrtimer_expires_remaining sched: Fix race on toggling cfs_bandwidth_used sched: Remove extra put_online_cpus() inside sched_setaffinity() sched/rt: Fix task_tick_rt() comment sched/wait: Fix build breakage sched/wait: Introduce prepare_to_wait_event() sched/wait: Add ___wait_cond_timeout() to wait_event*_timeout() too sched: Remove get_online_cpus() usage sched: Fix race in migrate_swap_stop() ...
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index dacd0ab51df4..b24988353458 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -99,13 +99,13 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
99 99
100 raw_local_irq_save(flags); 100 raw_local_irq_save(flags);
101 /* 101 /*
102 * The preempt tracer hooks into add_preempt_count and will break 102 * The preempt tracer hooks into preempt_count_add and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET 103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared. 104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually 105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later. 106 * call the trace_preempt_off later.
107 */ 107 */
108 preempt_count() += cnt; 108 __preempt_count_add(cnt);
109 /* 109 /*
110 * Were softirqs turned off above: 110 * Were softirqs turned off above:
111 */ 111 */
@@ -119,7 +119,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
119#else /* !CONFIG_TRACE_IRQFLAGS */ 119#else /* !CONFIG_TRACE_IRQFLAGS */
120static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) 120static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
121{ 121{
122 add_preempt_count(cnt); 122 preempt_count_add(cnt);
123 barrier(); 123 barrier();
124} 124}
125#endif /* CONFIG_TRACE_IRQFLAGS */ 125#endif /* CONFIG_TRACE_IRQFLAGS */
@@ -137,7 +137,7 @@ static void __local_bh_enable(unsigned int cnt)
137 137
138 if (softirq_count() == cnt) 138 if (softirq_count() == cnt)
139 trace_softirqs_on(_RET_IP_); 139 trace_softirqs_on(_RET_IP_);
140 sub_preempt_count(cnt); 140 preempt_count_sub(cnt);
141} 141}
142 142
143/* 143/*
@@ -168,7 +168,7 @@ static inline void _local_bh_enable_ip(unsigned long ip)
168 * Keep preemption disabled until we are done with 168 * Keep preemption disabled until we are done with
169 * softirq processing: 169 * softirq processing:
170 */ 170 */
171 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); 171 preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
172 172
173 if (unlikely(!in_interrupt() && local_softirq_pending())) { 173 if (unlikely(!in_interrupt() && local_softirq_pending())) {
174 /* 174 /*
@@ -178,7 +178,7 @@ static inline void _local_bh_enable_ip(unsigned long ip)
178 do_softirq(); 178 do_softirq();
179 } 179 }
180 180
181 dec_preempt_count(); 181 preempt_count_dec();
182#ifdef CONFIG_TRACE_IRQFLAGS 182#ifdef CONFIG_TRACE_IRQFLAGS
183 local_irq_enable(); 183 local_irq_enable();
184#endif 184#endif
@@ -260,7 +260,7 @@ restart:
260 " exited with %08x?\n", vec_nr, 260 " exited with %08x?\n", vec_nr,
261 softirq_to_name[vec_nr], h->action, 261 softirq_to_name[vec_nr], h->action,
262 prev_count, preempt_count()); 262 prev_count, preempt_count());
263 preempt_count() = prev_count; 263 preempt_count_set(prev_count);
264 } 264 }
265 265
266 rcu_bh_qs(cpu); 266 rcu_bh_qs(cpu);
@@ -378,7 +378,7 @@ void irq_exit(void)
378 378
379 account_irq_exit_time(current); 379 account_irq_exit_time(current);
380 trace_hardirq_exit(); 380 trace_hardirq_exit();
381 sub_preempt_count(HARDIRQ_OFFSET); 381 preempt_count_sub(HARDIRQ_OFFSET);
382 if (!in_interrupt() && local_softirq_pending()) 382 if (!in_interrupt() && local_softirq_pending())
383 invoke_softirq(); 383 invoke_softirq();
384 384