diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 34f94240642f..168b2680ae27 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -240,7 +240,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
240 | hard = hrtimer_get_expires(&rt_b->rt_period_timer); | 240 | hard = hrtimer_get_expires(&rt_b->rt_period_timer); |
241 | delta = ktime_to_ns(ktime_sub(hard, soft)); | 241 | delta = ktime_to_ns(ktime_sub(hard, soft)); |
242 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, | 242 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, |
243 | HRTIMER_MODE_ABS, 0); | 243 | HRTIMER_MODE_ABS_PINNED, 0); |
244 | } | 244 | } |
245 | spin_unlock(&rt_b->rt_runtime_lock); | 245 | spin_unlock(&rt_b->rt_runtime_lock); |
246 | } | 246 | } |
@@ -1155,7 +1155,7 @@ static __init void init_hrtick(void) | |||
1155 | static void hrtick_start(struct rq *rq, u64 delay) | 1155 | static void hrtick_start(struct rq *rq, u64 delay) |
1156 | { | 1156 | { |
1157 | __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, | 1157 | __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, |
1158 | HRTIMER_MODE_REL, 0); | 1158 | HRTIMER_MODE_REL_PINNED, 0); |
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | static inline void init_hrtick(void) | 1161 | static inline void init_hrtick(void) |
@@ -1978,7 +1978,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1978 | if (task_hot(p, old_rq->clock, NULL)) | 1978 | if (task_hot(p, old_rq->clock, NULL)) |
1979 | schedstat_inc(p, se.nr_forced2_migrations); | 1979 | schedstat_inc(p, se.nr_forced2_migrations); |
1980 | #endif | 1980 | #endif |
1981 | perf_counter_task_migration(p, new_cpu); | 1981 | perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, |
1982 | 1, 1, NULL, 0); | ||
1982 | } | 1983 | } |
1983 | p->se.vruntime -= old_cfsrq->min_vruntime - | 1984 | p->se.vruntime -= old_cfsrq->min_vruntime - |
1984 | new_cfsrq->min_vruntime; | 1985 | new_cfsrq->min_vruntime; |
@@ -4420,6 +4421,11 @@ static struct { | |||
4420 | .load_balancer = ATOMIC_INIT(-1), | 4421 | .load_balancer = ATOMIC_INIT(-1), |
4421 | }; | 4422 | }; |
4422 | 4423 | ||
4424 | int get_nohz_load_balancer(void) | ||
4425 | { | ||
4426 | return atomic_read(&nohz.load_balancer); | ||
4427 | } | ||
4428 | |||
4423 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 4429 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
4424 | /** | 4430 | /** |
4425 | * lowest_flag_domain - Return lowest sched_domain containing flag. | 4431 | * lowest_flag_domain - Return lowest sched_domain containing flag. |
@@ -7078,7 +7084,7 @@ static int migration_thread(void *data) | |||
7078 | 7084 | ||
7079 | if (cpu_is_offline(cpu)) { | 7085 | if (cpu_is_offline(cpu)) { |
7080 | spin_unlock_irq(&rq->lock); | 7086 | spin_unlock_irq(&rq->lock); |
7081 | goto wait_to_die; | 7087 | break; |
7082 | } | 7088 | } |
7083 | 7089 | ||
7084 | if (rq->active_balance) { | 7090 | if (rq->active_balance) { |
@@ -7104,16 +7110,7 @@ static int migration_thread(void *data) | |||
7104 | complete(&req->done); | 7110 | complete(&req->done); |
7105 | } | 7111 | } |
7106 | __set_current_state(TASK_RUNNING); | 7112 | __set_current_state(TASK_RUNNING); |
7107 | return 0; | ||
7108 | 7113 | ||
7109 | wait_to_die: | ||
7110 | /* Wait for kthread_stop */ | ||
7111 | set_current_state(TASK_INTERRUPTIBLE); | ||
7112 | while (!kthread_should_stop()) { | ||
7113 | schedule(); | ||
7114 | set_current_state(TASK_INTERRUPTIBLE); | ||
7115 | } | ||
7116 | __set_current_state(TASK_RUNNING); | ||
7117 | return 0; | 7114 | return 0; |
7118 | } | 7115 | } |
7119 | 7116 | ||
@@ -7527,6 +7524,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7527 | rq = task_rq_lock(p, &flags); | 7524 | rq = task_rq_lock(p, &flags); |
7528 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | 7525 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
7529 | task_rq_unlock(rq, &flags); | 7526 | task_rq_unlock(rq, &flags); |
7527 | get_task_struct(p); | ||
7530 | cpu_rq(cpu)->migration_thread = p; | 7528 | cpu_rq(cpu)->migration_thread = p; |
7531 | break; | 7529 | break; |
7532 | 7530 | ||
@@ -7557,6 +7555,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7557 | kthread_bind(cpu_rq(cpu)->migration_thread, | 7555 | kthread_bind(cpu_rq(cpu)->migration_thread, |
7558 | cpumask_any(cpu_online_mask)); | 7556 | cpumask_any(cpu_online_mask)); |
7559 | kthread_stop(cpu_rq(cpu)->migration_thread); | 7557 | kthread_stop(cpu_rq(cpu)->migration_thread); |
7558 | put_task_struct(cpu_rq(cpu)->migration_thread); | ||
7560 | cpu_rq(cpu)->migration_thread = NULL; | 7559 | cpu_rq(cpu)->migration_thread = NULL; |
7561 | break; | 7560 | break; |
7562 | 7561 | ||
@@ -7566,6 +7565,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7566 | migrate_live_tasks(cpu); | 7565 | migrate_live_tasks(cpu); |
7567 | rq = cpu_rq(cpu); | 7566 | rq = cpu_rq(cpu); |
7568 | kthread_stop(rq->migration_thread); | 7567 | kthread_stop(rq->migration_thread); |
7568 | put_task_struct(rq->migration_thread); | ||
7569 | rq->migration_thread = NULL; | 7569 | rq->migration_thread = NULL; |
7570 | /* Idle task back to normal (off runqueue, low prio) */ | 7570 | /* Idle task back to normal (off runqueue, low prio) */ |
7571 | spin_lock_irq(&rq->lock); | 7571 | spin_lock_irq(&rq->lock); |
@@ -7861,7 +7861,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7861 | free_rootdomain(old_rd); | 7861 | free_rootdomain(old_rd); |
7862 | } | 7862 | } |
7863 | 7863 | ||
7864 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) | 7864 | static int init_rootdomain(struct root_domain *rd, bool bootmem) |
7865 | { | 7865 | { |
7866 | gfp_t gfp = GFP_KERNEL; | 7866 | gfp_t gfp = GFP_KERNEL; |
7867 | 7867 | ||
@@ -9067,6 +9067,8 @@ void __init sched_init_smp(void) | |||
9067 | } | 9067 | } |
9068 | #endif /* CONFIG_SMP */ | 9068 | #endif /* CONFIG_SMP */ |
9069 | 9069 | ||
9070 | const_debug unsigned int sysctl_timer_migration = 1; | ||
9071 | |||
9070 | int in_sched_functions(unsigned long addr) | 9072 | int in_sched_functions(unsigned long addr) |
9071 | { | 9073 | { |
9072 | return in_lock_functions(addr) || | 9074 | return in_lock_functions(addr) || |