aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--kernel/cpuset.c21
-rw-r--r--kernel/sched/core.c62
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/sched/rt.c2
7 files changed, 63 insertions, 53 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index f9faadef7ab7..64168f6dd89e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5185,7 +5185,7 @@ F: kernel/delayacct.c
5185PERFORMANCE EVENTS SUBSYSTEM 5185PERFORMANCE EVENTS SUBSYSTEM
5186M: Peter Zijlstra <a.p.zijlstra@chello.nl> 5186M: Peter Zijlstra <a.p.zijlstra@chello.nl>
5187M: Paul Mackerras <paulus@samba.org> 5187M: Paul Mackerras <paulus@samba.org>
5188M: Ingo Molnar <mingo@elte.hu> 5188M: Ingo Molnar <mingo@redhat.com>
5189M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 5189M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
5190T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core 5190T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
5191S: Supported 5191S: Supported
@@ -5833,7 +5833,7 @@ S: Maintained
5833F: drivers/watchdog/sc1200wdt.c 5833F: drivers/watchdog/sc1200wdt.c
5834 5834
5835SCHEDULER 5835SCHEDULER
5836M: Ingo Molnar <mingo@elte.hu> 5836M: Ingo Molnar <mingo@redhat.com>
5837M: Peter Zijlstra <peterz@infradead.org> 5837M: Peter Zijlstra <peterz@infradead.org>
5838T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core 5838T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
5839S: Maintained 5839S: Maintained
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5104a2b685cf..ce13315d48fb 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -219,14 +219,9 @@ static void __cpuinit smp_callin(void)
219 * Update loops_per_jiffy in cpu_data. Previous call to 219 * Update loops_per_jiffy in cpu_data. Previous call to
220 * smp_store_cpu_info() stored a value that is close but not as 220 * smp_store_cpu_info() stored a value that is close but not as
221 * accurate as the value just calculated. 221 * accurate as the value just calculated.
222 *
223 * Need to enable IRQs because it can take longer and then
224 * the NMI watchdog might kill us.
225 */ 222 */
226 local_irq_enable();
227 calibrate_delay(); 223 calibrate_delay();
228 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; 224 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
229 local_irq_disable();
230 pr_debug("Stack at about %p\n", &cpuid); 225 pr_debug("Stack at about %p\n", &cpuid);
231 226
232 /* 227 /*
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 7a7e5fd2a277..668f66baac7b 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -22,7 +22,7 @@ extern int cpuset_init(void);
22extern void cpuset_init_smp(void); 22extern void cpuset_init_smp(void);
23extern void cpuset_update_active_cpus(void); 23extern void cpuset_update_active_cpus(void);
24extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 24extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
25extern int cpuset_cpus_allowed_fallback(struct task_struct *p); 25extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
26extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 26extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
27#define cpuset_current_mems_allowed (current->mems_allowed) 27#define cpuset_current_mems_allowed (current->mems_allowed)
28void cpuset_init_current_mems_allowed(void); 28void cpuset_init_current_mems_allowed(void);
@@ -135,10 +135,8 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
135 cpumask_copy(mask, cpu_possible_mask); 135 cpumask_copy(mask, cpu_possible_mask);
136} 136}
137 137
138static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) 138static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
139{ 139{
140 do_set_cpus_allowed(p, cpu_possible_mask);
141 return cpumask_any(cpu_active_mask);
142} 140}
143 141
144static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 142static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1010cc61931f..b96ad75b7e64 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2162,10 +2162,9 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2162 mutex_unlock(&callback_mutex); 2162 mutex_unlock(&callback_mutex);
2163} 2163}
2164 2164
2165int cpuset_cpus_allowed_fallback(struct task_struct *tsk) 2165void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2166{ 2166{
2167 const struct cpuset *cs; 2167 const struct cpuset *cs;
2168 int cpu;
2169 2168
2170 rcu_read_lock(); 2169 rcu_read_lock();
2171 cs = task_cs(tsk); 2170 cs = task_cs(tsk);
@@ -2186,22 +2185,10 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2186 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary 2185 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2187 * set any mask even if it is not right from task_cs() pov, 2186 * set any mask even if it is not right from task_cs() pov,
2188 * the pending set_cpus_allowed_ptr() will fix things. 2187 * the pending set_cpus_allowed_ptr() will fix things.
2188 *
2189 * select_fallback_rq() will fix things ups and set cpu_possible_mask
2190 * if required.
2189 */ 2191 */
2190
2191 cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
2192 if (cpu >= nr_cpu_ids) {
2193 /*
2194 * Either tsk->cpus_allowed is wrong (see above) or it
2195 * is actually empty. The latter case is only possible
2196 * if we are racing with remove_tasks_in_empty_cpuset().
2197 * Like above we can temporary set any mask and rely on
2198 * set_cpus_allowed_ptr() as synchronization point.
2199 */
2200 do_set_cpus_allowed(tsk, cpu_possible_mask);
2201 cpu = cpumask_any(cpu_active_mask);
2202 }
2203
2204 return cpu;
2205} 2192}
2206 2193
2207void cpuset_init_current_mems_allowed(void) 2194void cpuset_init_current_mems_allowed(void)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 157fb9b2b186..e3ed0ecee7c7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1265,29 +1265,59 @@ EXPORT_SYMBOL_GPL(kick_process);
1265 */ 1265 */
1266static int select_fallback_rq(int cpu, struct task_struct *p) 1266static int select_fallback_rq(int cpu, struct task_struct *p)
1267{ 1267{
1268 int dest_cpu;
1269 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); 1268 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
1269 enum { cpuset, possible, fail } state = cpuset;
1270 int dest_cpu;
1270 1271
1271 /* Look for allowed, online CPU in same node. */ 1272 /* Look for allowed, online CPU in same node. */
1272 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) 1273 for_each_cpu_mask(dest_cpu, *nodemask) {
1274 if (!cpu_online(dest_cpu))
1275 continue;
1276 if (!cpu_active(dest_cpu))
1277 continue;
1273 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1278 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1274 return dest_cpu; 1279 return dest_cpu;
1280 }
1275 1281
1276 /* Any allowed, online CPU? */ 1282 for (;;) {
1277 dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask); 1283 /* Any allowed, online CPU? */
1278 if (dest_cpu < nr_cpu_ids) 1284 for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) {
1279 return dest_cpu; 1285 if (!cpu_online(dest_cpu))
1286 continue;
1287 if (!cpu_active(dest_cpu))
1288 continue;
1289 goto out;
1290 }
1280 1291
1281 /* No more Mr. Nice Guy. */ 1292 switch (state) {
1282 dest_cpu = cpuset_cpus_allowed_fallback(p); 1293 case cpuset:
1283 /* 1294 /* No more Mr. Nice Guy. */
1284 * Don't tell them about moving exiting tasks or 1295 cpuset_cpus_allowed_fallback(p);
1285 * kernel threads (both mm NULL), since they never 1296 state = possible;
1286 * leave kernel. 1297 break;
1287 */ 1298
1288 if (p->mm && printk_ratelimit()) { 1299 case possible:
1289 printk_sched("process %d (%s) no longer affine to cpu%d\n", 1300 do_set_cpus_allowed(p, cpu_possible_mask);
1290 task_pid_nr(p), p->comm, cpu); 1301 state = fail;
1302 break;
1303
1304 case fail:
1305 BUG();
1306 break;
1307 }
1308 }
1309
1310out:
1311 if (state != cpuset) {
1312 /*
1313 * Don't tell them about moving exiting tasks or
1314 * kernel threads (both mm NULL), since they never
1315 * leave kernel.
1316 */
1317 if (p->mm && printk_ratelimit()) {
1318 printk_sched("process %d (%s) no longer affine to cpu%d\n",
1319 task_pid_nr(p), p->comm, cpu);
1320 }
1291 } 1321 }
1292 1322
1293 return dest_cpu; 1323 return dest_cpu;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 94340c7544a9..0d97ebdc58f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -416,8 +416,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
416 416
417#endif /* CONFIG_FAIR_GROUP_SCHED */ 417#endif /* CONFIG_FAIR_GROUP_SCHED */
418 418
419static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 419static __always_inline
420 unsigned long delta_exec); 420void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
421 421
422/************************************************************** 422/**************************************************************
423 * Scheduling class tree data structure manipulation methods: 423 * Scheduling class tree data structure manipulation methods:
@@ -1162,7 +1162,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1162 __clear_buddies_skip(se); 1162 __clear_buddies_skip(se);
1163} 1163}
1164 1164
1165static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 1165static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1166 1166
1167static void 1167static void
1168dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 1168dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -1546,8 +1546,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1546 resched_task(rq_of(cfs_rq)->curr); 1546 resched_task(rq_of(cfs_rq)->curr);
1547} 1547}
1548 1548
1549static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 1549static __always_inline
1550 unsigned long delta_exec) 1550void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
1551{ 1551{
1552 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 1552 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
1553 return; 1553 return;
@@ -2073,11 +2073,11 @@ void unthrottle_offline_cfs_rqs(struct rq *rq)
2073} 2073}
2074 2074
2075#else /* CONFIG_CFS_BANDWIDTH */ 2075#else /* CONFIG_CFS_BANDWIDTH */
2076static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 2076static __always_inline
2077 unsigned long delta_exec) {} 2077void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {}
2078static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 2078static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2079static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 2079static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
2080static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 2080static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2081 2081
2082static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 2082static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2083{ 2083{
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index b60dad720173..44af55e6d5d0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1428,7 +1428,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1428next_idx: 1428next_idx:
1429 if (idx >= MAX_RT_PRIO) 1429 if (idx >= MAX_RT_PRIO)
1430 continue; 1430 continue;
1431 if (next && next->prio < idx) 1431 if (next && next->prio <= idx)
1432 continue; 1432 continue;
1433 list_for_each_entry(rt_se, array->queue + idx, run_list) { 1433 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1434 struct task_struct *p; 1434 struct task_struct *p;