diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-20 13:42:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-20 13:42:08 -0500 |
commit | a0fa1dd3cdbccec9597fe53b6177a9aa6e20f2f8 (patch) | |
tree | b249854573815eedf377e554f0ea516f86411841 /kernel/softirq.c | |
parent | 9326657abe1a83ed4b4f396b923ca1217fd50cba (diff) | |
parent | eaad45132c564ce377e6dce05e78e08e456d5315 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
- Add the initial implementation of SCHED_DEADLINE support: a real-time
scheduling policy where tasks that meet their deadlines and
periodically execute their instances in less than their runtime quota
see real-time scheduling and won't miss any of their deadlines.
Tasks that go over their quota get delayed (Available to privileged
users for now)
- Clean up and fix preempt_enable_no_resched() abuse all around the
tree
- Do sched_clock() performance optimizations on x86 and elsewhere
- Fix and improve auto-NUMA balancing
- Fix and clean up the idle loop
- Apply various cleanups and fixes
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (60 commits)
sched: Fix __sched_setscheduler() nice test
sched: Move SCHED_RESET_ON_FORK into attr::sched_flags
sched: Fix up attr::sched_priority warning
sched: Fix up scheduler syscall LTP fails
sched: Preserve the nice level over sched_setscheduler() and sched_setparam() calls
sched/core: Fix htmldocs warnings
sched/deadline: No need to check p if dl_se is valid
sched/deadline: Remove unused variables
sched/deadline: Fix sparse static warnings
m68k: Fix build warning in mac_via.h
sched, thermal: Clean up preempt_enable_no_resched() abuse
sched, net: Fixup busy_loop_us_clock()
sched, net: Clean up preempt_enable_no_resched() abuse
sched/preempt: Fix up missed PREEMPT_NEED_RESCHED folding
sched/preempt, locking: Rework local_bh_{dis,en}able()
sched/clock, x86: Avoid a runtime condition in native_sched_clock()
sched/clock: Fix up clear_sched_clock_stable()
sched/clock, x86: Use a static_key for sched_clock_stable
sched/clock: Remove local_irq_disable() from the clocks
sched/clock, x86: Rewrite cyc2ns() to avoid the need to disable IRQs
...
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r-- | kernel/softirq.c | 39 |
1 files changed, 8 insertions, 31 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index 9a4500e4c189..8b93b3770f85 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -89,7 +89,7 @@ static void wakeup_softirqd(void) | |||
89 | * where hardirqs are disabled legitimately: | 89 | * where hardirqs are disabled legitimately: |
90 | */ | 90 | */ |
91 | #ifdef CONFIG_TRACE_IRQFLAGS | 91 | #ifdef CONFIG_TRACE_IRQFLAGS |
92 | static void __local_bh_disable(unsigned long ip, unsigned int cnt) | 92 | void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
93 | { | 93 | { |
94 | unsigned long flags; | 94 | unsigned long flags; |
95 | 95 | ||
@@ -107,33 +107,21 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) | |||
107 | /* | 107 | /* |
108 | * Were softirqs turned off above: | 108 | * Were softirqs turned off above: |
109 | */ | 109 | */ |
110 | if (softirq_count() == cnt) | 110 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
111 | trace_softirqs_off(ip); | 111 | trace_softirqs_off(ip); |
112 | raw_local_irq_restore(flags); | 112 | raw_local_irq_restore(flags); |
113 | 113 | ||
114 | if (preempt_count() == cnt) | 114 | if (preempt_count() == cnt) |
115 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | 115 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
116 | } | 116 | } |
117 | #else /* !CONFIG_TRACE_IRQFLAGS */ | 117 | EXPORT_SYMBOL(__local_bh_disable_ip); |
118 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) | ||
119 | { | ||
120 | preempt_count_add(cnt); | ||
121 | barrier(); | ||
122 | } | ||
123 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 118 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
124 | 119 | ||
125 | void local_bh_disable(void) | ||
126 | { | ||
127 | __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET); | ||
128 | } | ||
129 | |||
130 | EXPORT_SYMBOL(local_bh_disable); | ||
131 | |||
132 | static void __local_bh_enable(unsigned int cnt) | 120 | static void __local_bh_enable(unsigned int cnt) |
133 | { | 121 | { |
134 | WARN_ON_ONCE(!irqs_disabled()); | 122 | WARN_ON_ONCE(!irqs_disabled()); |
135 | 123 | ||
136 | if (softirq_count() == cnt) | 124 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
137 | trace_softirqs_on(_RET_IP_); | 125 | trace_softirqs_on(_RET_IP_); |
138 | preempt_count_sub(cnt); | 126 | preempt_count_sub(cnt); |
139 | } | 127 | } |
@@ -151,7 +139,7 @@ void _local_bh_enable(void) | |||
151 | 139 | ||
152 | EXPORT_SYMBOL(_local_bh_enable); | 140 | EXPORT_SYMBOL(_local_bh_enable); |
153 | 141 | ||
154 | static inline void _local_bh_enable_ip(unsigned long ip) | 142 | void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
155 | { | 143 | { |
156 | WARN_ON_ONCE(in_irq() || irqs_disabled()); | 144 | WARN_ON_ONCE(in_irq() || irqs_disabled()); |
157 | #ifdef CONFIG_TRACE_IRQFLAGS | 145 | #ifdef CONFIG_TRACE_IRQFLAGS |
@@ -166,7 +154,7 @@ static inline void _local_bh_enable_ip(unsigned long ip) | |||
166 | * Keep preemption disabled until we are done with | 154 | * Keep preemption disabled until we are done with |
167 | * softirq processing: | 155 | * softirq processing: |
168 | */ | 156 | */ |
169 | preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1); | 157 | preempt_count_sub(cnt - 1); |
170 | 158 | ||
171 | if (unlikely(!in_interrupt() && local_softirq_pending())) { | 159 | if (unlikely(!in_interrupt() && local_softirq_pending())) { |
172 | /* | 160 | /* |
@@ -182,18 +170,7 @@ static inline void _local_bh_enable_ip(unsigned long ip) | |||
182 | #endif | 170 | #endif |
183 | preempt_check_resched(); | 171 | preempt_check_resched(); |
184 | } | 172 | } |
185 | 173 | EXPORT_SYMBOL(__local_bh_enable_ip); | |
186 | void local_bh_enable(void) | ||
187 | { | ||
188 | _local_bh_enable_ip(_RET_IP_); | ||
189 | } | ||
190 | EXPORT_SYMBOL(local_bh_enable); | ||
191 | |||
192 | void local_bh_enable_ip(unsigned long ip) | ||
193 | { | ||
194 | _local_bh_enable_ip(ip); | ||
195 | } | ||
196 | EXPORT_SYMBOL(local_bh_enable_ip); | ||
197 | 174 | ||
198 | /* | 175 | /* |
199 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, | 176 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
@@ -264,7 +241,7 @@ asmlinkage void __do_softirq(void) | |||
264 | pending = local_softirq_pending(); | 241 | pending = local_softirq_pending(); |
265 | account_irq_enter_time(current); | 242 | account_irq_enter_time(current); |
266 | 243 | ||
267 | __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); | 244 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
268 | in_hardirq = lockdep_softirq_start(); | 245 | in_hardirq = lockdep_softirq_start(); |
269 | 246 | ||
270 | cpu = smp_processor_id(); | 247 | cpu = smp_processor_id(); |