diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 13:31:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 13:31:44 -0400 |
commit | 2ba68940c893c8f0bfc8573c041254251bb6aeab (patch) | |
tree | fa83ebb01d32abd98123fa28f9f6f0b3eaeee25d /kernel/softirq.c | |
parent | 9c2b957db1772ebf942ae7a9346b14eba6c8ca66 (diff) | |
parent | 600e145882802d6ccbfe2c4aea243d97caeb91a9 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes for v3.4 from Ingo Molnar
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
printk: Make it compile with !CONFIG_PRINTK
sched/x86: Fix overflow in cyc2ns_offset
sched: Fix nohz load accounting -- again!
sched: Update yield() docs
printk/sched: Introduce special printk_sched() for those awkward moments
sched/nohz: Correctly initialize 'next_balance' in 'nohz' idle balancer
sched: Cleanup cpu_active madness
sched: Fix load-balance wreckage
sched: Clean up parameter passing of proc_sched_autogroup_set_nice()
sched: Ditch per cgroup task lists for load-balancing
sched: Rename load-balancing fields
sched: Move load-balancing arguments into helper struct
sched/rt: Do not submit new work when PI-blocked
sched/rt: Prevent idle task boosting
sched/wait: Add __wake_up_all_locked() API
sched/rt: Document scheduler related skip-resched-check sites
sched/rt: Use schedule_preempt_disabled()
sched/rt: Add schedule_preempt_disabled()
sched/rt: Do not throttle when PI boosting
sched/rt: Keep period timer ticking when rt throttling is active
...
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r-- | kernel/softirq.c | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index 8afc6a8d4d7c..15352e0cbd5d 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -343,7 +343,7 @@ void irq_exit(void) | |||
343 | tick_nohz_irq_exit(); | 343 | tick_nohz_irq_exit(); |
344 | #endif | 344 | #endif |
345 | rcu_irq_exit(); | 345 | rcu_irq_exit(); |
346 | preempt_enable_no_resched(); | 346 | sched_preempt_enable_no_resched(); |
347 | } | 347 | } |
348 | 348 | ||
349 | /* | 349 | /* |
@@ -740,9 +740,7 @@ static int run_ksoftirqd(void * __bind_cpu) | |||
740 | while (!kthread_should_stop()) { | 740 | while (!kthread_should_stop()) { |
741 | preempt_disable(); | 741 | preempt_disable(); |
742 | if (!local_softirq_pending()) { | 742 | if (!local_softirq_pending()) { |
743 | preempt_enable_no_resched(); | 743 | schedule_preempt_disabled(); |
744 | schedule(); | ||
745 | preempt_disable(); | ||
746 | } | 744 | } |
747 | 745 | ||
748 | __set_current_state(TASK_RUNNING); | 746 | __set_current_state(TASK_RUNNING); |
@@ -757,7 +755,7 @@ static int run_ksoftirqd(void * __bind_cpu) | |||
757 | if (local_softirq_pending()) | 755 | if (local_softirq_pending()) |
758 | __do_softirq(); | 756 | __do_softirq(); |
759 | local_irq_enable(); | 757 | local_irq_enable(); |
760 | preempt_enable_no_resched(); | 758 | sched_preempt_enable_no_resched(); |
761 | cond_resched(); | 759 | cond_resched(); |
762 | preempt_disable(); | 760 | preempt_disable(); |
763 | rcu_note_context_switch((long)__bind_cpu); | 761 | rcu_note_context_switch((long)__bind_cpu); |