diff options
author | David S. Miller <davem@davemloft.net> | 2016-06-30 05:03:36 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-06-30 05:03:36 -0400 |
commit | ee58b57100ca953da7320c285315a95db2f7053d (patch) | |
tree | 77b815a31240adc4d6326346908137fc6c2c3a96 /kernel/sched/core.c | |
parent | 6f30e8b022c8e3a722928ddb1a2ae0be852fcc0e (diff) | |
parent | e7bdea7750eb2a64aea4a08fa5c0a31719c8155d (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several cases of overlapping changes, except the packet scheduler
conflicts which deal with the addition of the free list parameter
to qdisc_enqueue().
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 42 |
1 files changed, 31 insertions, 11 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f2cae4620c7..51d7105f529a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1536,7 +1536,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
1536 | for (;;) { | 1536 | for (;;) { |
1537 | /* Any allowed, online CPU? */ | 1537 | /* Any allowed, online CPU? */ |
1538 | for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { | 1538 | for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { |
1539 | if (!cpu_active(dest_cpu)) | 1539 | if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) |
1540 | continue; | ||
1541 | if (!cpu_online(dest_cpu)) | ||
1540 | continue; | 1542 | continue; |
1541 | goto out; | 1543 | goto out; |
1542 | } | 1544 | } |
@@ -2253,9 +2255,11 @@ int sysctl_numa_balancing(struct ctl_table *table, int write, | |||
2253 | #endif | 2255 | #endif |
2254 | #endif | 2256 | #endif |
2255 | 2257 | ||
2258 | #ifdef CONFIG_SCHEDSTATS | ||
2259 | |||
2256 | DEFINE_STATIC_KEY_FALSE(sched_schedstats); | 2260 | DEFINE_STATIC_KEY_FALSE(sched_schedstats); |
2261 | static bool __initdata __sched_schedstats = false; | ||
2257 | 2262 | ||
2258 | #ifdef CONFIG_SCHEDSTATS | ||
2259 | static void set_schedstats(bool enabled) | 2263 | static void set_schedstats(bool enabled) |
2260 | { | 2264 | { |
2261 | if (enabled) | 2265 | if (enabled) |
@@ -2278,11 +2282,16 @@ static int __init setup_schedstats(char *str) | |||
2278 | if (!str) | 2282 | if (!str) |
2279 | goto out; | 2283 | goto out; |
2280 | 2284 | ||
2285 | /* | ||
2286 | * This code is called before jump labels have been set up, so we can't | ||
2287 | * change the static branch directly just yet. Instead set a temporary | ||
2288 | * variable so init_schedstats() can do it later. | ||
2289 | */ | ||
2281 | if (!strcmp(str, "enable")) { | 2290 | if (!strcmp(str, "enable")) { |
2282 | set_schedstats(true); | 2291 | __sched_schedstats = true; |
2283 | ret = 1; | 2292 | ret = 1; |
2284 | } else if (!strcmp(str, "disable")) { | 2293 | } else if (!strcmp(str, "disable")) { |
2285 | set_schedstats(false); | 2294 | __sched_schedstats = false; |
2286 | ret = 1; | 2295 | ret = 1; |
2287 | } | 2296 | } |
2288 | out: | 2297 | out: |
@@ -2293,6 +2302,11 @@ out: | |||
2293 | } | 2302 | } |
2294 | __setup("schedstats=", setup_schedstats); | 2303 | __setup("schedstats=", setup_schedstats); |
2295 | 2304 | ||
2305 | static void __init init_schedstats(void) | ||
2306 | { | ||
2307 | set_schedstats(__sched_schedstats); | ||
2308 | } | ||
2309 | |||
2296 | #ifdef CONFIG_PROC_SYSCTL | 2310 | #ifdef CONFIG_PROC_SYSCTL |
2297 | int sysctl_schedstats(struct ctl_table *table, int write, | 2311 | int sysctl_schedstats(struct ctl_table *table, int write, |
2298 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2312 | void __user *buffer, size_t *lenp, loff_t *ppos) |
@@ -2313,8 +2327,10 @@ int sysctl_schedstats(struct ctl_table *table, int write, | |||
2313 | set_schedstats(state); | 2327 | set_schedstats(state); |
2314 | return err; | 2328 | return err; |
2315 | } | 2329 | } |
2316 | #endif | 2330 | #endif /* CONFIG_PROC_SYSCTL */ |
2317 | #endif | 2331 | #else /* !CONFIG_SCHEDSTATS */ |
2332 | static inline void init_schedstats(void) {} | ||
2333 | #endif /* CONFIG_SCHEDSTATS */ | ||
2318 | 2334 | ||
2319 | /* | 2335 | /* |
2320 | * fork()/clone()-time setup: | 2336 | * fork()/clone()-time setup: |
@@ -2521,10 +2537,9 @@ void wake_up_new_task(struct task_struct *p) | |||
2521 | */ | 2537 | */ |
2522 | set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); | 2538 | set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); |
2523 | #endif | 2539 | #endif |
2524 | /* Post initialize new task's util average when its cfs_rq is set */ | 2540 | rq = __task_rq_lock(p, &rf); |
2525 | post_init_entity_util_avg(&p->se); | 2541 | post_init_entity_util_avg(&p->se); |
2526 | 2542 | ||
2527 | rq = __task_rq_lock(p, &rf); | ||
2528 | activate_task(rq, p, 0); | 2543 | activate_task(rq, p, 0); |
2529 | p->on_rq = TASK_ON_RQ_QUEUED; | 2544 | p->on_rq = TASK_ON_RQ_QUEUED; |
2530 | trace_sched_wakeup_new(p); | 2545 | trace_sched_wakeup_new(p); |
@@ -3156,7 +3171,8 @@ static noinline void __schedule_bug(struct task_struct *prev) | |||
3156 | static inline void schedule_debug(struct task_struct *prev) | 3171 | static inline void schedule_debug(struct task_struct *prev) |
3157 | { | 3172 | { |
3158 | #ifdef CONFIG_SCHED_STACK_END_CHECK | 3173 | #ifdef CONFIG_SCHED_STACK_END_CHECK |
3159 | BUG_ON(task_stack_end_corrupted(prev)); | 3174 | if (task_stack_end_corrupted(prev)) |
3175 | panic("corrupted stack end detected inside scheduler\n"); | ||
3160 | #endif | 3176 | #endif |
3161 | 3177 | ||
3162 | if (unlikely(in_atomic_preempt_off())) { | 3178 | if (unlikely(in_atomic_preempt_off())) { |
@@ -5133,14 +5149,16 @@ void show_state_filter(unsigned long state_filter) | |||
5133 | /* | 5149 | /* |
5134 | * reset the NMI-timeout, listing all files on a slow | 5150 | * reset the NMI-timeout, listing all files on a slow |
5135 | * console might take a lot of time: | 5151 | * console might take a lot of time: |
5152 | * Also, reset softlockup watchdogs on all CPUs, because | ||
5153 | * another CPU might be blocked waiting for us to process | ||
5154 | * an IPI. | ||
5136 | */ | 5155 | */ |
5137 | touch_nmi_watchdog(); | 5156 | touch_nmi_watchdog(); |
5157 | touch_all_softlockup_watchdogs(); | ||
5138 | if (!state_filter || (p->state & state_filter)) | 5158 | if (!state_filter || (p->state & state_filter)) |
5139 | sched_show_task(p); | 5159 | sched_show_task(p); |
5140 | } | 5160 | } |
5141 | 5161 | ||
5142 | touch_all_softlockup_watchdogs(); | ||
5143 | |||
5144 | #ifdef CONFIG_SCHED_DEBUG | 5162 | #ifdef CONFIG_SCHED_DEBUG |
5145 | if (!state_filter) | 5163 | if (!state_filter) |
5146 | sysrq_sched_debug_show(); | 5164 | sysrq_sched_debug_show(); |
@@ -7487,6 +7505,8 @@ void __init sched_init(void) | |||
7487 | #endif | 7505 | #endif |
7488 | init_sched_fair_class(); | 7506 | init_sched_fair_class(); |
7489 | 7507 | ||
7508 | init_schedstats(); | ||
7509 | |||
7490 | scheduler_running = 1; | 7510 | scheduler_running = 1; |
7491 | } | 7511 | } |
7492 | 7512 | ||