diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-22 21:27:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-22 21:27:32 -0400 |
commit | d79ee93de909dfb252279b9a95978bbda9a814a9 (patch) | |
tree | bfccca60fd36259ff4bcc5e78a2c272fbd680065 /kernel/sched/rt.c | |
parent | 2ff2b289a695807e291e1ed9f639d8a3ba5f4254 (diff) | |
parent | 1c2927f18576d65631d8e0ddd19e1d023183222e (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
"The biggest change is the cleanup/simplification of the load-balancer:
instead of the current practice of architectures twiddling scheduler
internal data structures and providing the scheduler domains in
colorfully inconsistent ways, we now have generic scheduler code in
kernel/sched/core.c:sched_init_numa() that looks at the architecture's
node_distance() parameters and (while not fully trusting it) deducts a
NUMA topology from it.
This inevitably changes balancing behavior - hopefully for the better.
There are various smaller optimizations, cleanups and fixlets as well"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Taint kernel with TAINT_WARN after sleep-in-atomic bug
sched: Remove stale power aware scheduling remnants and dysfunctional knobs
sched/debug: Fix printing large integers on 32-bit platforms
sched/fair: Improve the ->group_imb logic
sched/nohz: Fix rq->cpu_load[] calculations
sched/numa: Don't scale the imbalance
sched/fair: Revert sched-domain iteration breakage
sched/x86: Rewrite set_cpu_sibling_map()
sched/numa: Fix the new NUMA topology bits
sched/numa: Rewrite the CONFIG_NUMA sched domain support
sched/fair: Propagate 'struct lb_env' usage into find_busiest_group
sched/fair: Add some serialization to the sched_domain load-balance walk
sched/fair: Let minimally loaded cpu balance the group
sched: Change rq->nr_running to unsigned int
x86/numa: Check for nonsensical topologies on real hw as well
x86/numa: Hard partition cpu topology masks on node boundaries
x86/numa: Allow specifying node_distance() for numa=fake
x86/sched: Make mwait_usable() heed to "idle=" kernel parameters properly
sched: Update documentation and comments
sched_rt: Avoid unnecessary dequeue and enqueue of pushable tasks in set_cpus_allowed_rt()
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 56 |
1 files changed, 26 insertions, 30 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 44af55e6d5d0..c5565c3c515f 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1803,44 +1803,40 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
1803 | static void set_cpus_allowed_rt(struct task_struct *p, | 1803 | static void set_cpus_allowed_rt(struct task_struct *p, |
1804 | const struct cpumask *new_mask) | 1804 | const struct cpumask *new_mask) |
1805 | { | 1805 | { |
1806 | int weight = cpumask_weight(new_mask); | 1806 | struct rq *rq; |
1807 | int weight; | ||
1807 | 1808 | ||
1808 | BUG_ON(!rt_task(p)); | 1809 | BUG_ON(!rt_task(p)); |
1809 | 1810 | ||
1810 | /* | 1811 | if (!p->on_rq) |
1811 | * Update the migration status of the RQ if we have an RT task | 1812 | return; |
1812 | * which is running AND changing its weight value. | ||
1813 | */ | ||
1814 | if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) { | ||
1815 | struct rq *rq = task_rq(p); | ||
1816 | |||
1817 | if (!task_current(rq, p)) { | ||
1818 | /* | ||
1819 | * Make sure we dequeue this task from the pushable list | ||
1820 | * before going further. It will either remain off of | ||
1821 | * the list because we are no longer pushable, or it | ||
1822 | * will be requeued. | ||
1823 | */ | ||
1824 | if (p->rt.nr_cpus_allowed > 1) | ||
1825 | dequeue_pushable_task(rq, p); | ||
1826 | 1813 | ||
1827 | /* | 1814 | weight = cpumask_weight(new_mask); |
1828 | * Requeue if our weight is changing and still > 1 | ||
1829 | */ | ||
1830 | if (weight > 1) | ||
1831 | enqueue_pushable_task(rq, p); | ||
1832 | 1815 | ||
1833 | } | 1816 | /* |
1817 | * Only update if the process changes its state from whether it | ||
1818 | * can migrate or not. | ||
1819 | */ | ||
1820 | if ((p->rt.nr_cpus_allowed > 1) == (weight > 1)) | ||
1821 | return; | ||
1834 | 1822 | ||
1835 | if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { | 1823 | rq = task_rq(p); |
1836 | rq->rt.rt_nr_migratory++; | ||
1837 | } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { | ||
1838 | BUG_ON(!rq->rt.rt_nr_migratory); | ||
1839 | rq->rt.rt_nr_migratory--; | ||
1840 | } | ||
1841 | 1824 | ||
1842 | update_rt_migration(&rq->rt); | 1825 | /* |
1826 | * The process used to be able to migrate OR it can now migrate | ||
1827 | */ | ||
1828 | if (weight <= 1) { | ||
1829 | if (!task_current(rq, p)) | ||
1830 | dequeue_pushable_task(rq, p); | ||
1831 | BUG_ON(!rq->rt.rt_nr_migratory); | ||
1832 | rq->rt.rt_nr_migratory--; | ||
1833 | } else { | ||
1834 | if (!task_current(rq, p)) | ||
1835 | enqueue_pushable_task(rq, p); | ||
1836 | rq->rt.rt_nr_migratory++; | ||
1843 | } | 1837 | } |
1838 | |||
1839 | update_rt_migration(&rq->rt); | ||
1844 | } | 1840 | } |
1845 | 1841 | ||
1846 | /* Assumes rq->lock is held */ | 1842 | /* Assumes rq->lock is held */ |