aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 17:46:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 17:46:05 -0400
commit7fda0412c5f7afdd1a5ff518f98dee5157266d8a (patch)
treed312af46758fa9b59431a479d258b54184a00591 /kernel
parent6b8212a313dae341ef3a2e413dfec5c4dea59617 (diff)
parent160594e99dbbb0a5600ad922c630952c7c1c14bf (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar. * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: cpusets: Remove an unused variable sched/rt: Improve pick_next_highest_task_rt() sched: Fix select_fallback_rq() vs cpu_active/cpu_online sched/x86/smp: Do not enable IRQs over calibrate_delay() sched: Fix compiler warning about declared inline after use MAINTAINERS: Update email address for SCHEDULER and PERF EVENTS
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c21
-rw-r--r--kernel/sched/core.c62
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/sched/rt.c2
4 files changed, 59 insertions, 42 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1010cc61931f..b96ad75b7e64 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2162,10 +2162,9 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2162 mutex_unlock(&callback_mutex); 2162 mutex_unlock(&callback_mutex);
2163} 2163}
2164 2164
2165int cpuset_cpus_allowed_fallback(struct task_struct *tsk) 2165void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2166{ 2166{
2167 const struct cpuset *cs; 2167 const struct cpuset *cs;
2168 int cpu;
2169 2168
2170 rcu_read_lock(); 2169 rcu_read_lock();
2171 cs = task_cs(tsk); 2170 cs = task_cs(tsk);
@@ -2186,22 +2185,10 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2186 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary 2185 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2187 * set any mask even if it is not right from task_cs() pov, 2186 * set any mask even if it is not right from task_cs() pov,
2188 * the pending set_cpus_allowed_ptr() will fix things. 2187 * the pending set_cpus_allowed_ptr() will fix things.
2188 *
2189 * select_fallback_rq() will fix things ups and set cpu_possible_mask
2190 * if required.
2189 */ 2191 */
2190
2191 cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
2192 if (cpu >= nr_cpu_ids) {
2193 /*
2194 * Either tsk->cpus_allowed is wrong (see above) or it
2195 * is actually empty. The latter case is only possible
2196 * if we are racing with remove_tasks_in_empty_cpuset().
2197 * Like above we can temporary set any mask and rely on
2198 * set_cpus_allowed_ptr() as synchronization point.
2199 */
2200 do_set_cpus_allowed(tsk, cpu_possible_mask);
2201 cpu = cpumask_any(cpu_active_mask);
2202 }
2203
2204 return cpu;
2205} 2192}
2206 2193
2207void cpuset_init_current_mems_allowed(void) 2194void cpuset_init_current_mems_allowed(void)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 157fb9b2b186..e3ed0ecee7c7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1265,29 +1265,59 @@ EXPORT_SYMBOL_GPL(kick_process);
1265 */ 1265 */
1266static int select_fallback_rq(int cpu, struct task_struct *p) 1266static int select_fallback_rq(int cpu, struct task_struct *p)
1267{ 1267{
1268 int dest_cpu;
1269 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); 1268 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
1269 enum { cpuset, possible, fail } state = cpuset;
1270 int dest_cpu;
1270 1271
1271 /* Look for allowed, online CPU in same node. */ 1272 /* Look for allowed, online CPU in same node. */
1272 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) 1273 for_each_cpu_mask(dest_cpu, *nodemask) {
1274 if (!cpu_online(dest_cpu))
1275 continue;
1276 if (!cpu_active(dest_cpu))
1277 continue;
1273 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1278 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1274 return dest_cpu; 1279 return dest_cpu;
1280 }
1275 1281
1276 /* Any allowed, online CPU? */ 1282 for (;;) {
1277 dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask); 1283 /* Any allowed, online CPU? */
1278 if (dest_cpu < nr_cpu_ids) 1284 for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) {
1279 return dest_cpu; 1285 if (!cpu_online(dest_cpu))
1286 continue;
1287 if (!cpu_active(dest_cpu))
1288 continue;
1289 goto out;
1290 }
1280 1291
1281 /* No more Mr. Nice Guy. */ 1292 switch (state) {
1282 dest_cpu = cpuset_cpus_allowed_fallback(p); 1293 case cpuset:
1283 /* 1294 /* No more Mr. Nice Guy. */
1284 * Don't tell them about moving exiting tasks or 1295 cpuset_cpus_allowed_fallback(p);
1285 * kernel threads (both mm NULL), since they never 1296 state = possible;
1286 * leave kernel. 1297 break;
1287 */ 1298
1288 if (p->mm && printk_ratelimit()) { 1299 case possible:
1289 printk_sched("process %d (%s) no longer affine to cpu%d\n", 1300 do_set_cpus_allowed(p, cpu_possible_mask);
1290 task_pid_nr(p), p->comm, cpu); 1301 state = fail;
1302 break;
1303
1304 case fail:
1305 BUG();
1306 break;
1307 }
1308 }
1309
1310out:
1311 if (state != cpuset) {
1312 /*
1313 * Don't tell them about moving exiting tasks or
1314 * kernel threads (both mm NULL), since they never
1315 * leave kernel.
1316 */
1317 if (p->mm && printk_ratelimit()) {
1318 printk_sched("process %d (%s) no longer affine to cpu%d\n",
1319 task_pid_nr(p), p->comm, cpu);
1320 }
1291 } 1321 }
1292 1322
1293 return dest_cpu; 1323 return dest_cpu;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 94340c7544a9..0d97ebdc58f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -416,8 +416,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
416 416
417#endif /* CONFIG_FAIR_GROUP_SCHED */ 417#endif /* CONFIG_FAIR_GROUP_SCHED */
418 418
419static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 419static __always_inline
420 unsigned long delta_exec); 420void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
421 421
422/************************************************************** 422/**************************************************************
423 * Scheduling class tree data structure manipulation methods: 423 * Scheduling class tree data structure manipulation methods:
@@ -1162,7 +1162,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1162 __clear_buddies_skip(se); 1162 __clear_buddies_skip(se);
1163} 1163}
1164 1164
1165static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 1165static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1166 1166
1167static void 1167static void
1168dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 1168dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -1546,8 +1546,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1546 resched_task(rq_of(cfs_rq)->curr); 1546 resched_task(rq_of(cfs_rq)->curr);
1547} 1547}
1548 1548
1549static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 1549static __always_inline
1550 unsigned long delta_exec) 1550void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
1551{ 1551{
1552 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 1552 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
1553 return; 1553 return;
@@ -2073,11 +2073,11 @@ void unthrottle_offline_cfs_rqs(struct rq *rq)
2073} 2073}
2074 2074
2075#else /* CONFIG_CFS_BANDWIDTH */ 2075#else /* CONFIG_CFS_BANDWIDTH */
2076static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 2076static __always_inline
2077 unsigned long delta_exec) {} 2077void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {}
2078static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 2078static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2079static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 2079static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
2080static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 2080static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2081 2081
2082static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 2082static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2083{ 2083{
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index b60dad720173..44af55e6d5d0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1428,7 +1428,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1428next_idx: 1428next_idx:
1429 if (idx >= MAX_RT_PRIO) 1429 if (idx >= MAX_RT_PRIO)
1430 continue; 1430 continue;
1431 if (next && next->prio < idx) 1431 if (next && next->prio <= idx)
1432 continue; 1432 continue;
1433 list_for_each_entry(rt_se, array->queue + idx, run_list) { 1433 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1434 struct task_struct *p; 1434 struct task_struct *p;