diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bounds.c | 4 | ||||
-rw-r--r-- | kernel/context_tracking.c | 2 | ||||
-rw-r--r-- | kernel/cpu.c | 17 | ||||
-rw-r--r-- | kernel/cpu/idle.c | 16 | ||||
-rw-r--r-- | kernel/fork.c | 5 | ||||
-rw-r--r-- | kernel/rcutree.c | 15 | ||||
-rw-r--r-- | kernel/sched/core.c | 290 | ||||
-rw-r--r-- | kernel/sched/debug.c | 68 | ||||
-rw-r--r-- | kernel/sched/fair.c | 1359 | ||||
-rw-r--r-- | kernel/sched/features.h | 19 | ||||
-rw-r--r-- | kernel/sched/idle_task.c | 2 | ||||
-rw-r--r-- | kernel/sched/rt.c | 22 | ||||
-rw-r--r-- | kernel/sched/sched.h | 52 | ||||
-rw-r--r-- | kernel/sched/stats.h | 46 | ||||
-rw-r--r-- | kernel/sched/stop_task.c | 2 | ||||
-rw-r--r-- | kernel/softirq.c | 16 | ||||
-rw-r--r-- | kernel/stop_machine.c | 288 | ||||
-rw-r--r-- | kernel/sysctl.c | 21 | ||||
-rw-r--r-- | kernel/timer.c | 8 | ||||
-rw-r--r-- | kernel/wait.c | 24 |
20 files changed, 1860 insertions, 416 deletions
diff --git a/kernel/bounds.c b/kernel/bounds.c index 0c9b862292b2..e8ca97b5c386 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/mmzone.h> | 10 | #include <linux/mmzone.h> |
11 | #include <linux/kbuild.h> | 11 | #include <linux/kbuild.h> |
12 | #include <linux/page_cgroup.h> | 12 | #include <linux/page_cgroup.h> |
13 | #include <linux/log2.h> | ||
13 | 14 | ||
14 | void foo(void) | 15 | void foo(void) |
15 | { | 16 | { |
@@ -17,5 +18,8 @@ void foo(void) | |||
17 | DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS); | 18 | DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS); |
18 | DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES); | 19 | DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES); |
19 | DEFINE(NR_PCG_FLAGS, __NR_PCG_FLAGS); | 20 | DEFINE(NR_PCG_FLAGS, __NR_PCG_FLAGS); |
21 | #ifdef CONFIG_SMP | ||
22 | DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); | ||
23 | #endif | ||
20 | /* End of constants */ | 24 | /* End of constants */ |
21 | } | 25 | } |
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 859c8dfd78a1..e5f3917aa05b 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c | |||
@@ -120,7 +120,7 @@ void context_tracking_user_enter(void) | |||
120 | * instead of preempt_schedule() to exit user context if needed before | 120 | * instead of preempt_schedule() to exit user context if needed before |
121 | * calling the scheduler. | 121 | * calling the scheduler. |
122 | */ | 122 | */ |
123 | void __sched notrace preempt_schedule_context(void) | 123 | asmlinkage void __sched notrace preempt_schedule_context(void) |
124 | { | 124 | { |
125 | enum ctx_state prev_ctx; | 125 | enum ctx_state prev_ctx; |
126 | 126 | ||
diff --git a/kernel/cpu.c b/kernel/cpu.c index d7f07a2da5a6..63aa50d7ce1e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -308,6 +308,23 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
308 | } | 308 | } |
309 | smpboot_park_threads(cpu); | 309 | smpboot_park_threads(cpu); |
310 | 310 | ||
311 | /* | ||
312 | * By now we've cleared cpu_active_mask, wait for all preempt-disabled | ||
313 | * and RCU users of this state to go away such that all new such users | ||
314 | * will observe it. | ||
315 | * | ||
316 | * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might | ||
317 | * not imply sync_sched(), so explicitly call both. | ||
318 | */ | ||
319 | #ifdef CONFIG_PREEMPT | ||
320 | synchronize_sched(); | ||
321 | #endif | ||
322 | synchronize_rcu(); | ||
323 | |||
324 | /* | ||
325 | * So now all preempt/rcu users must observe !cpu_active(). | ||
326 | */ | ||
327 | |||
311 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); | 328 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
312 | if (err) { | 329 | if (err) { |
313 | /* CPU didn't die: tell everyone. Can't complain. */ | 330 | /* CPU didn't die: tell everyone. Can't complain. */ |
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c index e695c0a0bcb5..988573a9a387 100644 --- a/kernel/cpu/idle.c +++ b/kernel/cpu/idle.c | |||
@@ -44,7 +44,7 @@ static inline int cpu_idle_poll(void) | |||
44 | rcu_idle_enter(); | 44 | rcu_idle_enter(); |
45 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | 45 | trace_cpu_idle_rcuidle(0, smp_processor_id()); |
46 | local_irq_enable(); | 46 | local_irq_enable(); |
47 | while (!need_resched()) | 47 | while (!tif_need_resched()) |
48 | cpu_relax(); | 48 | cpu_relax(); |
49 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | 49 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
50 | rcu_idle_exit(); | 50 | rcu_idle_exit(); |
@@ -92,8 +92,7 @@ static void cpu_idle_loop(void) | |||
92 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | 92 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { |
93 | cpu_idle_poll(); | 93 | cpu_idle_poll(); |
94 | } else { | 94 | } else { |
95 | current_clr_polling(); | 95 | if (!current_clr_polling_and_test()) { |
96 | if (!need_resched()) { | ||
97 | stop_critical_timings(); | 96 | stop_critical_timings(); |
98 | rcu_idle_enter(); | 97 | rcu_idle_enter(); |
99 | arch_cpu_idle(); | 98 | arch_cpu_idle(); |
@@ -103,9 +102,16 @@ static void cpu_idle_loop(void) | |||
103 | } else { | 102 | } else { |
104 | local_irq_enable(); | 103 | local_irq_enable(); |
105 | } | 104 | } |
106 | current_set_polling(); | 105 | __current_set_polling(); |
107 | } | 106 | } |
108 | arch_cpu_idle_exit(); | 107 | arch_cpu_idle_exit(); |
108 | /* | ||
109 | * We need to test and propagate the TIF_NEED_RESCHED | ||
110 | * bit here because we might not have send the | ||
111 | * reschedule IPI to idle tasks. | ||
112 | */ | ||
113 | if (tif_need_resched()) | ||
114 | set_preempt_need_resched(); | ||
109 | } | 115 | } |
110 | tick_nohz_idle_exit(); | 116 | tick_nohz_idle_exit(); |
111 | schedule_preempt_disabled(); | 117 | schedule_preempt_disabled(); |
@@ -129,7 +135,7 @@ void cpu_startup_entry(enum cpuhp_state state) | |||
129 | */ | 135 | */ |
130 | boot_init_stack_canary(); | 136 | boot_init_stack_canary(); |
131 | #endif | 137 | #endif |
132 | current_set_polling(); | 138 | __current_set_polling(); |
133 | arch_cpu_idle_prepare(); | 139 | arch_cpu_idle_prepare(); |
134 | cpu_idle_loop(); | 140 | cpu_idle_loop(); |
135 | } | 141 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 086fe73ad6bd..c93be06dee87 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -817,9 +817,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk) | |||
817 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 817 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
818 | mm->pmd_huge_pte = NULL; | 818 | mm->pmd_huge_pte = NULL; |
819 | #endif | 819 | #endif |
820 | #ifdef CONFIG_NUMA_BALANCING | ||
821 | mm->first_nid = NUMA_PTE_SCAN_INIT; | ||
822 | #endif | ||
823 | if (!mm_init(mm, tsk)) | 820 | if (!mm_init(mm, tsk)) |
824 | goto fail_nomem; | 821 | goto fail_nomem; |
825 | 822 | ||
@@ -1313,7 +1310,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1313 | #endif | 1310 | #endif |
1314 | 1311 | ||
1315 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1312 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
1316 | sched_fork(p); | 1313 | sched_fork(clone_flags, p); |
1317 | 1314 | ||
1318 | retval = perf_event_init_task(p); | 1315 | retval = perf_event_init_task(p); |
1319 | if (retval) | 1316 | if (retval) |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 32618b3fe4e6..1dc9f3604ad8 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -898,6 +898,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
898 | force_quiescent_state(rsp); /* Kick them all. */ | 898 | force_quiescent_state(rsp); /* Kick them all. */ |
899 | } | 899 | } |
900 | 900 | ||
901 | /* | ||
902 | * This function really isn't for public consumption, but RCU is special in | ||
903 | * that context switches can allow the state machine to make progress. | ||
904 | */ | ||
905 | extern void resched_cpu(int cpu); | ||
906 | |||
901 | static void print_cpu_stall(struct rcu_state *rsp) | 907 | static void print_cpu_stall(struct rcu_state *rsp) |
902 | { | 908 | { |
903 | int cpu; | 909 | int cpu; |
@@ -927,7 +933,14 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
927 | 3 * rcu_jiffies_till_stall_check() + 3; | 933 | 3 * rcu_jiffies_till_stall_check() + 3; |
928 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 934 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
929 | 935 | ||
930 | set_need_resched(); /* kick ourselves to get things going. */ | 936 | /* |
937 | * Attempt to revive the RCU machinery by forcing a context switch. | ||
938 | * | ||
939 | * A context switch would normally allow the RCU state machine to make | ||
940 | * progress and it could be we're stuck in kernel space without context | ||
941 | * switches for an entirely unreasonable amount of time. | ||
942 | */ | ||
943 | resched_cpu(smp_processor_id()); | ||
931 | } | 944 | } |
932 | 945 | ||
933 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | 946 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5ac63c9a995a..450a34b2a637 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -513,12 +513,11 @@ static inline void init_hrtick(void) | |||
513 | * might also involve a cross-CPU call to trigger the scheduler on | 513 | * might also involve a cross-CPU call to trigger the scheduler on |
514 | * the target CPU. | 514 | * the target CPU. |
515 | */ | 515 | */ |
516 | #ifdef CONFIG_SMP | ||
517 | void resched_task(struct task_struct *p) | 516 | void resched_task(struct task_struct *p) |
518 | { | 517 | { |
519 | int cpu; | 518 | int cpu; |
520 | 519 | ||
521 | assert_raw_spin_locked(&task_rq(p)->lock); | 520 | lockdep_assert_held(&task_rq(p)->lock); |
522 | 521 | ||
523 | if (test_tsk_need_resched(p)) | 522 | if (test_tsk_need_resched(p)) |
524 | return; | 523 | return; |
@@ -526,8 +525,10 @@ void resched_task(struct task_struct *p) | |||
526 | set_tsk_need_resched(p); | 525 | set_tsk_need_resched(p); |
527 | 526 | ||
528 | cpu = task_cpu(p); | 527 | cpu = task_cpu(p); |
529 | if (cpu == smp_processor_id()) | 528 | if (cpu == smp_processor_id()) { |
529 | set_preempt_need_resched(); | ||
530 | return; | 530 | return; |
531 | } | ||
531 | 532 | ||
532 | /* NEED_RESCHED must be visible before we test polling */ | 533 | /* NEED_RESCHED must be visible before we test polling */ |
533 | smp_mb(); | 534 | smp_mb(); |
@@ -546,6 +547,7 @@ void resched_cpu(int cpu) | |||
546 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 547 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
547 | } | 548 | } |
548 | 549 | ||
550 | #ifdef CONFIG_SMP | ||
549 | #ifdef CONFIG_NO_HZ_COMMON | 551 | #ifdef CONFIG_NO_HZ_COMMON |
550 | /* | 552 | /* |
551 | * In the semi idle case, use the nearest busy cpu for migrating timers | 553 | * In the semi idle case, use the nearest busy cpu for migrating timers |
@@ -693,12 +695,6 @@ void sched_avg_update(struct rq *rq) | |||
693 | } | 695 | } |
694 | } | 696 | } |
695 | 697 | ||
696 | #else /* !CONFIG_SMP */ | ||
697 | void resched_task(struct task_struct *p) | ||
698 | { | ||
699 | assert_raw_spin_locked(&task_rq(p)->lock); | ||
700 | set_tsk_need_resched(p); | ||
701 | } | ||
702 | #endif /* CONFIG_SMP */ | 698 | #endif /* CONFIG_SMP */ |
703 | 699 | ||
704 | #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ | 700 | #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ |
@@ -767,14 +763,14 @@ static void set_load_weight(struct task_struct *p) | |||
767 | static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) | 763 | static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) |
768 | { | 764 | { |
769 | update_rq_clock(rq); | 765 | update_rq_clock(rq); |
770 | sched_info_queued(p); | 766 | sched_info_queued(rq, p); |
771 | p->sched_class->enqueue_task(rq, p, flags); | 767 | p->sched_class->enqueue_task(rq, p, flags); |
772 | } | 768 | } |
773 | 769 | ||
774 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) | 770 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
775 | { | 771 | { |
776 | update_rq_clock(rq); | 772 | update_rq_clock(rq); |
777 | sched_info_dequeued(p); | 773 | sched_info_dequeued(rq, p); |
778 | p->sched_class->dequeue_task(rq, p, flags); | 774 | p->sched_class->dequeue_task(rq, p, flags); |
779 | } | 775 | } |
780 | 776 | ||
@@ -987,7 +983,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
987 | * ttwu() will sort out the placement. | 983 | * ttwu() will sort out the placement. |
988 | */ | 984 | */ |
989 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && | 985 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && |
990 | !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); | 986 | !(task_preempt_count(p) & PREEMPT_ACTIVE)); |
991 | 987 | ||
992 | #ifdef CONFIG_LOCKDEP | 988 | #ifdef CONFIG_LOCKDEP |
993 | /* | 989 | /* |
@@ -1017,6 +1013,107 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1017 | __set_task_cpu(p, new_cpu); | 1013 | __set_task_cpu(p, new_cpu); |
1018 | } | 1014 | } |
1019 | 1015 | ||
1016 | static void __migrate_swap_task(struct task_struct *p, int cpu) | ||
1017 | { | ||
1018 | if (p->on_rq) { | ||
1019 | struct rq *src_rq, *dst_rq; | ||
1020 | |||
1021 | src_rq = task_rq(p); | ||
1022 | dst_rq = cpu_rq(cpu); | ||
1023 | |||
1024 | deactivate_task(src_rq, p, 0); | ||
1025 | set_task_cpu(p, cpu); | ||
1026 | activate_task(dst_rq, p, 0); | ||
1027 | check_preempt_curr(dst_rq, p, 0); | ||
1028 | } else { | ||
1029 | /* | ||
1030 | * Task isn't running anymore; make it appear like we migrated | ||
1031 | * it before it went to sleep. This means on wakeup we make the | ||
1032 | * previous cpu our targer instead of where it really is. | ||
1033 | */ | ||
1034 | p->wake_cpu = cpu; | ||
1035 | } | ||
1036 | } | ||
1037 | |||
1038 | struct migration_swap_arg { | ||
1039 | struct task_struct *src_task, *dst_task; | ||
1040 | int src_cpu, dst_cpu; | ||
1041 | }; | ||
1042 | |||
1043 | static int migrate_swap_stop(void *data) | ||
1044 | { | ||
1045 | struct migration_swap_arg *arg = data; | ||
1046 | struct rq *src_rq, *dst_rq; | ||
1047 | int ret = -EAGAIN; | ||
1048 | |||
1049 | src_rq = cpu_rq(arg->src_cpu); | ||
1050 | dst_rq = cpu_rq(arg->dst_cpu); | ||
1051 | |||
1052 | double_raw_lock(&arg->src_task->pi_lock, | ||
1053 | &arg->dst_task->pi_lock); | ||
1054 | double_rq_lock(src_rq, dst_rq); | ||
1055 | if (task_cpu(arg->dst_task) != arg->dst_cpu) | ||
1056 | goto unlock; | ||
1057 | |||
1058 | if (task_cpu(arg->src_task) != arg->src_cpu) | ||
1059 | goto unlock; | ||
1060 | |||
1061 | if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) | ||
1062 | goto unlock; | ||
1063 | |||
1064 | if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) | ||
1065 | goto unlock; | ||
1066 | |||
1067 | __migrate_swap_task(arg->src_task, arg->dst_cpu); | ||
1068 | __migrate_swap_task(arg->dst_task, arg->src_cpu); | ||
1069 | |||
1070 | ret = 0; | ||
1071 | |||
1072 | unlock: | ||
1073 | double_rq_unlock(src_rq, dst_rq); | ||
1074 | raw_spin_unlock(&arg->dst_task->pi_lock); | ||
1075 | raw_spin_unlock(&arg->src_task->pi_lock); | ||
1076 | |||
1077 | return ret; | ||
1078 | } | ||
1079 | |||
1080 | /* | ||
1081 | * Cross migrate two tasks | ||
1082 | */ | ||
1083 | int migrate_swap(struct task_struct *cur, struct task_struct *p) | ||
1084 | { | ||
1085 | struct migration_swap_arg arg; | ||
1086 | int ret = -EINVAL; | ||
1087 | |||
1088 | arg = (struct migration_swap_arg){ | ||
1089 | .src_task = cur, | ||
1090 | .src_cpu = task_cpu(cur), | ||
1091 | .dst_task = p, | ||
1092 | .dst_cpu = task_cpu(p), | ||
1093 | }; | ||
1094 | |||
1095 | if (arg.src_cpu == arg.dst_cpu) | ||
1096 | goto out; | ||
1097 | |||
1098 | /* | ||
1099 | * These three tests are all lockless; this is OK since all of them | ||
1100 | * will be re-checked with proper locks held further down the line. | ||
1101 | */ | ||
1102 | if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) | ||
1103 | goto out; | ||
1104 | |||
1105 | if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) | ||
1106 | goto out; | ||
1107 | |||
1108 | if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) | ||
1109 | goto out; | ||
1110 | |||
1111 | ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); | ||
1112 | |||
1113 | out: | ||
1114 | return ret; | ||
1115 | } | ||
1116 | |||
1020 | struct migration_arg { | 1117 | struct migration_arg { |
1021 | struct task_struct *task; | 1118 | struct task_struct *task; |
1022 | int dest_cpu; | 1119 | int dest_cpu; |
@@ -1236,9 +1333,9 @@ out: | |||
1236 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. | 1333 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. |
1237 | */ | 1334 | */ |
1238 | static inline | 1335 | static inline |
1239 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | 1336 | int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) |
1240 | { | 1337 | { |
1241 | int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); | 1338 | cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); |
1242 | 1339 | ||
1243 | /* | 1340 | /* |
1244 | * In order not to call set_task_cpu() on a blocking task we need | 1341 | * In order not to call set_task_cpu() on a blocking task we need |
@@ -1330,12 +1427,13 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) | |||
1330 | 1427 | ||
1331 | if (rq->idle_stamp) { | 1428 | if (rq->idle_stamp) { |
1332 | u64 delta = rq_clock(rq) - rq->idle_stamp; | 1429 | u64 delta = rq_clock(rq) - rq->idle_stamp; |
1333 | u64 max = 2*sysctl_sched_migration_cost; | 1430 | u64 max = 2*rq->max_idle_balance_cost; |
1334 | 1431 | ||
1335 | if (delta > max) | 1432 | update_avg(&rq->avg_idle, delta); |
1433 | |||
1434 | if (rq->avg_idle > max) | ||
1336 | rq->avg_idle = max; | 1435 | rq->avg_idle = max; |
1337 | else | 1436 | |
1338 | update_avg(&rq->avg_idle, delta); | ||
1339 | rq->idle_stamp = 0; | 1437 | rq->idle_stamp = 0; |
1340 | } | 1438 | } |
1341 | #endif | 1439 | #endif |
@@ -1396,6 +1494,14 @@ static void sched_ttwu_pending(void) | |||
1396 | 1494 | ||
1397 | void scheduler_ipi(void) | 1495 | void scheduler_ipi(void) |
1398 | { | 1496 | { |
1497 | /* | ||
1498 | * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting | ||
1499 | * TIF_NEED_RESCHED remotely (for the first time) will also send | ||
1500 | * this IPI. | ||
1501 | */ | ||
1502 | if (tif_need_resched()) | ||
1503 | set_preempt_need_resched(); | ||
1504 | |||
1399 | if (llist_empty(&this_rq()->wake_list) | 1505 | if (llist_empty(&this_rq()->wake_list) |
1400 | && !tick_nohz_full_cpu(smp_processor_id()) | 1506 | && !tick_nohz_full_cpu(smp_processor_id()) |
1401 | && !got_nohz_idle_kick()) | 1507 | && !got_nohz_idle_kick()) |
@@ -1513,7 +1619,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
1513 | if (p->sched_class->task_waking) | 1619 | if (p->sched_class->task_waking) |
1514 | p->sched_class->task_waking(p); | 1620 | p->sched_class->task_waking(p); |
1515 | 1621 | ||
1516 | cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); | 1622 | cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); |
1517 | if (task_cpu(p) != cpu) { | 1623 | if (task_cpu(p) != cpu) { |
1518 | wake_flags |= WF_MIGRATED; | 1624 | wake_flags |= WF_MIGRATED; |
1519 | set_task_cpu(p, cpu); | 1625 | set_task_cpu(p, cpu); |
@@ -1595,7 +1701,7 @@ int wake_up_state(struct task_struct *p, unsigned int state) | |||
1595 | * | 1701 | * |
1596 | * __sched_fork() is basic setup used by init_idle() too: | 1702 | * __sched_fork() is basic setup used by init_idle() too: |
1597 | */ | 1703 | */ |
1598 | static void __sched_fork(struct task_struct *p) | 1704 | static void __sched_fork(unsigned long clone_flags, struct task_struct *p) |
1599 | { | 1705 | { |
1600 | p->on_rq = 0; | 1706 | p->on_rq = 0; |
1601 | 1707 | ||
@@ -1619,16 +1725,24 @@ static void __sched_fork(struct task_struct *p) | |||
1619 | 1725 | ||
1620 | #ifdef CONFIG_NUMA_BALANCING | 1726 | #ifdef CONFIG_NUMA_BALANCING |
1621 | if (p->mm && atomic_read(&p->mm->mm_users) == 1) { | 1727 | if (p->mm && atomic_read(&p->mm->mm_users) == 1) { |
1622 | p->mm->numa_next_scan = jiffies; | 1728 | p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); |
1623 | p->mm->numa_next_reset = jiffies; | ||
1624 | p->mm->numa_scan_seq = 0; | 1729 | p->mm->numa_scan_seq = 0; |
1625 | } | 1730 | } |
1626 | 1731 | ||
1732 | if (clone_flags & CLONE_VM) | ||
1733 | p->numa_preferred_nid = current->numa_preferred_nid; | ||
1734 | else | ||
1735 | p->numa_preferred_nid = -1; | ||
1736 | |||
1627 | p->node_stamp = 0ULL; | 1737 | p->node_stamp = 0ULL; |
1628 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; | 1738 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; |
1629 | p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0; | ||
1630 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; | 1739 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; |
1631 | p->numa_work.next = &p->numa_work; | 1740 | p->numa_work.next = &p->numa_work; |
1741 | p->numa_faults = NULL; | ||
1742 | p->numa_faults_buffer = NULL; | ||
1743 | |||
1744 | INIT_LIST_HEAD(&p->numa_entry); | ||
1745 | p->numa_group = NULL; | ||
1632 | #endif /* CONFIG_NUMA_BALANCING */ | 1746 | #endif /* CONFIG_NUMA_BALANCING */ |
1633 | } | 1747 | } |
1634 | 1748 | ||
@@ -1654,12 +1768,12 @@ void set_numabalancing_state(bool enabled) | |||
1654 | /* | 1768 | /* |
1655 | * fork()/clone()-time setup: | 1769 | * fork()/clone()-time setup: |
1656 | */ | 1770 | */ |
1657 | void sched_fork(struct task_struct *p) | 1771 | void sched_fork(unsigned long clone_flags, struct task_struct *p) |
1658 | { | 1772 | { |
1659 | unsigned long flags; | 1773 | unsigned long flags; |
1660 | int cpu = get_cpu(); | 1774 | int cpu = get_cpu(); |
1661 | 1775 | ||
1662 | __sched_fork(p); | 1776 | __sched_fork(clone_flags, p); |
1663 | /* | 1777 | /* |
1664 | * We mark the process as running here. This guarantees that | 1778 | * We mark the process as running here. This guarantees that |
1665 | * nobody will actually run it, and a signal or other external | 1779 | * nobody will actually run it, and a signal or other external |
@@ -1717,10 +1831,7 @@ void sched_fork(struct task_struct *p) | |||
1717 | #if defined(CONFIG_SMP) | 1831 | #if defined(CONFIG_SMP) |
1718 | p->on_cpu = 0; | 1832 | p->on_cpu = 0; |
1719 | #endif | 1833 | #endif |
1720 | #ifdef CONFIG_PREEMPT_COUNT | 1834 | init_task_preempt_count(p); |
1721 | /* Want to start with kernel preemption disabled. */ | ||
1722 | task_thread_info(p)->preempt_count = 1; | ||
1723 | #endif | ||
1724 | #ifdef CONFIG_SMP | 1835 | #ifdef CONFIG_SMP |
1725 | plist_node_init(&p->pushable_tasks, MAX_PRIO); | 1836 | plist_node_init(&p->pushable_tasks, MAX_PRIO); |
1726 | #endif | 1837 | #endif |
@@ -1747,7 +1858,7 @@ void wake_up_new_task(struct task_struct *p) | |||
1747 | * - cpus_allowed can change in the fork path | 1858 | * - cpus_allowed can change in the fork path |
1748 | * - any previously selected cpu might disappear through hotplug | 1859 | * - any previously selected cpu might disappear through hotplug |
1749 | */ | 1860 | */ |
1750 | set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0)); | 1861 | set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); |
1751 | #endif | 1862 | #endif |
1752 | 1863 | ||
1753 | /* Initialize new task's runnable average */ | 1864 | /* Initialize new task's runnable average */ |
@@ -1838,7 +1949,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, | |||
1838 | struct task_struct *next) | 1949 | struct task_struct *next) |
1839 | { | 1950 | { |
1840 | trace_sched_switch(prev, next); | 1951 | trace_sched_switch(prev, next); |
1841 | sched_info_switch(prev, next); | 1952 | sched_info_switch(rq, prev, next); |
1842 | perf_event_task_sched_out(prev, next); | 1953 | perf_event_task_sched_out(prev, next); |
1843 | fire_sched_out_preempt_notifiers(prev, next); | 1954 | fire_sched_out_preempt_notifiers(prev, next); |
1844 | prepare_lock_switch(rq, next); | 1955 | prepare_lock_switch(rq, next); |
@@ -1890,6 +2001,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
1890 | if (mm) | 2001 | if (mm) |
1891 | mmdrop(mm); | 2002 | mmdrop(mm); |
1892 | if (unlikely(prev_state == TASK_DEAD)) { | 2003 | if (unlikely(prev_state == TASK_DEAD)) { |
2004 | task_numa_free(prev); | ||
2005 | |||
1893 | /* | 2006 | /* |
1894 | * Remove function-return probe instances associated with this | 2007 | * Remove function-return probe instances associated with this |
1895 | * task and put them back on the free list. | 2008 | * task and put them back on the free list. |
@@ -2073,7 +2186,7 @@ void sched_exec(void) | |||
2073 | int dest_cpu; | 2186 | int dest_cpu; |
2074 | 2187 | ||
2075 | raw_spin_lock_irqsave(&p->pi_lock, flags); | 2188 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
2076 | dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); | 2189 | dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); |
2077 | if (dest_cpu == smp_processor_id()) | 2190 | if (dest_cpu == smp_processor_id()) |
2078 | goto unlock; | 2191 | goto unlock; |
2079 | 2192 | ||
@@ -2215,7 +2328,7 @@ notrace unsigned long get_parent_ip(unsigned long addr) | |||
2215 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ | 2328 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ |
2216 | defined(CONFIG_PREEMPT_TRACER)) | 2329 | defined(CONFIG_PREEMPT_TRACER)) |
2217 | 2330 | ||
2218 | void __kprobes add_preempt_count(int val) | 2331 | void __kprobes preempt_count_add(int val) |
2219 | { | 2332 | { |
2220 | #ifdef CONFIG_DEBUG_PREEMPT | 2333 | #ifdef CONFIG_DEBUG_PREEMPT |
2221 | /* | 2334 | /* |
@@ -2224,7 +2337,7 @@ void __kprobes add_preempt_count(int val) | |||
2224 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) | 2337 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) |
2225 | return; | 2338 | return; |
2226 | #endif | 2339 | #endif |
2227 | preempt_count() += val; | 2340 | __preempt_count_add(val); |
2228 | #ifdef CONFIG_DEBUG_PREEMPT | 2341 | #ifdef CONFIG_DEBUG_PREEMPT |
2229 | /* | 2342 | /* |
2230 | * Spinlock count overflowing soon? | 2343 | * Spinlock count overflowing soon? |
@@ -2235,9 +2348,9 @@ void __kprobes add_preempt_count(int val) | |||
2235 | if (preempt_count() == val) | 2348 | if (preempt_count() == val) |
2236 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | 2349 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
2237 | } | 2350 | } |
2238 | EXPORT_SYMBOL(add_preempt_count); | 2351 | EXPORT_SYMBOL(preempt_count_add); |
2239 | 2352 | ||
2240 | void __kprobes sub_preempt_count(int val) | 2353 | void __kprobes preempt_count_sub(int val) |
2241 | { | 2354 | { |
2242 | #ifdef CONFIG_DEBUG_PREEMPT | 2355 | #ifdef CONFIG_DEBUG_PREEMPT |
2243 | /* | 2356 | /* |
@@ -2255,9 +2368,9 @@ void __kprobes sub_preempt_count(int val) | |||
2255 | 2368 | ||
2256 | if (preempt_count() == val) | 2369 | if (preempt_count() == val) |
2257 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | 2370 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
2258 | preempt_count() -= val; | 2371 | __preempt_count_sub(val); |
2259 | } | 2372 | } |
2260 | EXPORT_SYMBOL(sub_preempt_count); | 2373 | EXPORT_SYMBOL(preempt_count_sub); |
2261 | 2374 | ||
2262 | #endif | 2375 | #endif |
2263 | 2376 | ||
@@ -2430,6 +2543,7 @@ need_resched: | |||
2430 | put_prev_task(rq, prev); | 2543 | put_prev_task(rq, prev); |
2431 | next = pick_next_task(rq); | 2544 | next = pick_next_task(rq); |
2432 | clear_tsk_need_resched(prev); | 2545 | clear_tsk_need_resched(prev); |
2546 | clear_preempt_need_resched(); | ||
2433 | rq->skip_clock_update = 0; | 2547 | rq->skip_clock_update = 0; |
2434 | 2548 | ||
2435 | if (likely(prev != next)) { | 2549 | if (likely(prev != next)) { |
@@ -2520,9 +2634,9 @@ asmlinkage void __sched notrace preempt_schedule(void) | |||
2520 | return; | 2634 | return; |
2521 | 2635 | ||
2522 | do { | 2636 | do { |
2523 | add_preempt_count_notrace(PREEMPT_ACTIVE); | 2637 | __preempt_count_add(PREEMPT_ACTIVE); |
2524 | __schedule(); | 2638 | __schedule(); |
2525 | sub_preempt_count_notrace(PREEMPT_ACTIVE); | 2639 | __preempt_count_sub(PREEMPT_ACTIVE); |
2526 | 2640 | ||
2527 | /* | 2641 | /* |
2528 | * Check again in case we missed a preemption opportunity | 2642 | * Check again in case we missed a preemption opportunity |
@@ -2541,20 +2655,19 @@ EXPORT_SYMBOL(preempt_schedule); | |||
2541 | */ | 2655 | */ |
2542 | asmlinkage void __sched preempt_schedule_irq(void) | 2656 | asmlinkage void __sched preempt_schedule_irq(void) |
2543 | { | 2657 | { |
2544 | struct thread_info *ti = current_thread_info(); | ||
2545 | enum ctx_state prev_state; | 2658 | enum ctx_state prev_state; |
2546 | 2659 | ||
2547 | /* Catch callers which need to be fixed */ | 2660 | /* Catch callers which need to be fixed */ |
2548 | BUG_ON(ti->preempt_count || !irqs_disabled()); | 2661 | BUG_ON(preempt_count() || !irqs_disabled()); |
2549 | 2662 | ||
2550 | prev_state = exception_enter(); | 2663 | prev_state = exception_enter(); |
2551 | 2664 | ||
2552 | do { | 2665 | do { |
2553 | add_preempt_count(PREEMPT_ACTIVE); | 2666 | __preempt_count_add(PREEMPT_ACTIVE); |
2554 | local_irq_enable(); | 2667 | local_irq_enable(); |
2555 | __schedule(); | 2668 | __schedule(); |
2556 | local_irq_disable(); | 2669 | local_irq_disable(); |
2557 | sub_preempt_count(PREEMPT_ACTIVE); | 2670 | __preempt_count_sub(PREEMPT_ACTIVE); |
2558 | 2671 | ||
2559 | /* | 2672 | /* |
2560 | * Check again in case we missed a preemption opportunity | 2673 | * Check again in case we missed a preemption opportunity |
@@ -3598,13 +3711,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | |||
3598 | struct task_struct *p; | 3711 | struct task_struct *p; |
3599 | int retval; | 3712 | int retval; |
3600 | 3713 | ||
3601 | get_online_cpus(); | ||
3602 | rcu_read_lock(); | 3714 | rcu_read_lock(); |
3603 | 3715 | ||
3604 | p = find_process_by_pid(pid); | 3716 | p = find_process_by_pid(pid); |
3605 | if (!p) { | 3717 | if (!p) { |
3606 | rcu_read_unlock(); | 3718 | rcu_read_unlock(); |
3607 | put_online_cpus(); | ||
3608 | return -ESRCH; | 3719 | return -ESRCH; |
3609 | } | 3720 | } |
3610 | 3721 | ||
@@ -3661,7 +3772,6 @@ out_free_cpus_allowed: | |||
3661 | free_cpumask_var(cpus_allowed); | 3772 | free_cpumask_var(cpus_allowed); |
3662 | out_put_task: | 3773 | out_put_task: |
3663 | put_task_struct(p); | 3774 | put_task_struct(p); |
3664 | put_online_cpus(); | ||
3665 | return retval; | 3775 | return retval; |
3666 | } | 3776 | } |
3667 | 3777 | ||
@@ -3706,7 +3816,6 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) | |||
3706 | unsigned long flags; | 3816 | unsigned long flags; |
3707 | int retval; | 3817 | int retval; |
3708 | 3818 | ||
3709 | get_online_cpus(); | ||
3710 | rcu_read_lock(); | 3819 | rcu_read_lock(); |
3711 | 3820 | ||
3712 | retval = -ESRCH; | 3821 | retval = -ESRCH; |
@@ -3719,12 +3828,11 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) | |||
3719 | goto out_unlock; | 3828 | goto out_unlock; |
3720 | 3829 | ||
3721 | raw_spin_lock_irqsave(&p->pi_lock, flags); | 3830 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
3722 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); | 3831 | cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); |
3723 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | 3832 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
3724 | 3833 | ||
3725 | out_unlock: | 3834 | out_unlock: |
3726 | rcu_read_unlock(); | 3835 | rcu_read_unlock(); |
3727 | put_online_cpus(); | ||
3728 | 3836 | ||
3729 | return retval; | 3837 | return retval; |
3730 | } | 3838 | } |
@@ -3794,16 +3902,11 @@ SYSCALL_DEFINE0(sched_yield) | |||
3794 | return 0; | 3902 | return 0; |
3795 | } | 3903 | } |
3796 | 3904 | ||
3797 | static inline int should_resched(void) | ||
3798 | { | ||
3799 | return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); | ||
3800 | } | ||
3801 | |||
3802 | static void __cond_resched(void) | 3905 | static void __cond_resched(void) |
3803 | { | 3906 | { |
3804 | add_preempt_count(PREEMPT_ACTIVE); | 3907 | __preempt_count_add(PREEMPT_ACTIVE); |
3805 | __schedule(); | 3908 | __schedule(); |
3806 | sub_preempt_count(PREEMPT_ACTIVE); | 3909 | __preempt_count_sub(PREEMPT_ACTIVE); |
3807 | } | 3910 | } |
3808 | 3911 | ||
3809 | int __sched _cond_resched(void) | 3912 | int __sched _cond_resched(void) |
@@ -4186,7 +4289,7 @@ void init_idle(struct task_struct *idle, int cpu) | |||
4186 | 4289 | ||
4187 | raw_spin_lock_irqsave(&rq->lock, flags); | 4290 | raw_spin_lock_irqsave(&rq->lock, flags); |
4188 | 4291 | ||
4189 | __sched_fork(idle); | 4292 | __sched_fork(0, idle); |
4190 | idle->state = TASK_RUNNING; | 4293 | idle->state = TASK_RUNNING; |
4191 | idle->se.exec_start = sched_clock(); | 4294 | idle->se.exec_start = sched_clock(); |
4192 | 4295 | ||
@@ -4212,7 +4315,7 @@ void init_idle(struct task_struct *idle, int cpu) | |||
4212 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 4315 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
4213 | 4316 | ||
4214 | /* Set the preempt count _outside_ the spinlocks! */ | 4317 | /* Set the preempt count _outside_ the spinlocks! */ |
4215 | task_thread_info(idle)->preempt_count = 0; | 4318 | init_idle_preempt_count(idle, cpu); |
4216 | 4319 | ||
4217 | /* | 4320 | /* |
4218 | * The idle tasks have their own, simple scheduling class: | 4321 | * The idle tasks have their own, simple scheduling class: |
@@ -4346,6 +4449,53 @@ fail: | |||
4346 | return ret; | 4449 | return ret; |
4347 | } | 4450 | } |
4348 | 4451 | ||
4452 | #ifdef CONFIG_NUMA_BALANCING | ||
4453 | /* Migrate current task p to target_cpu */ | ||
4454 | int migrate_task_to(struct task_struct *p, int target_cpu) | ||
4455 | { | ||
4456 | struct migration_arg arg = { p, target_cpu }; | ||
4457 | int curr_cpu = task_cpu(p); | ||
4458 | |||
4459 | if (curr_cpu == target_cpu) | ||
4460 | return 0; | ||
4461 | |||
4462 | if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) | ||
4463 | return -EINVAL; | ||
4464 | |||
4465 | /* TODO: This is not properly updating schedstats */ | ||
4466 | |||
4467 | return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); | ||
4468 | } | ||
4469 | |||
4470 | /* | ||
4471 | * Requeue a task on a given node and accurately track the number of NUMA | ||
4472 | * tasks on the runqueues | ||
4473 | */ | ||
4474 | void sched_setnuma(struct task_struct *p, int nid) | ||
4475 | { | ||
4476 | struct rq *rq; | ||
4477 | unsigned long flags; | ||
4478 | bool on_rq, running; | ||
4479 | |||
4480 | rq = task_rq_lock(p, &flags); | ||
4481 | on_rq = p->on_rq; | ||
4482 | running = task_current(rq, p); | ||
4483 | |||
4484 | if (on_rq) | ||
4485 | dequeue_task(rq, p, 0); | ||
4486 | if (running) | ||
4487 | p->sched_class->put_prev_task(rq, p); | ||
4488 | |||
4489 | p->numa_preferred_nid = nid; | ||
4490 | |||
4491 | if (running) | ||
4492 | p->sched_class->set_curr_task(rq); | ||
4493 | if (on_rq) | ||
4494 | enqueue_task(rq, p, 0); | ||
4495 | task_rq_unlock(rq, p, &flags); | ||
4496 | } | ||
4497 | #endif | ||
4498 | |||
4349 | /* | 4499 | /* |
4350 | * migration_cpu_stop - this will be executed by a highprio stopper thread | 4500 | * migration_cpu_stop - this will be executed by a highprio stopper thread |
4351 | * and performs thread migration by bumping thread off CPU then | 4501 | * and performs thread migration by bumping thread off CPU then |
@@ -5119,6 +5269,7 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu) | |||
5119 | DEFINE_PER_CPU(struct sched_domain *, sd_llc); | 5269 | DEFINE_PER_CPU(struct sched_domain *, sd_llc); |
5120 | DEFINE_PER_CPU(int, sd_llc_size); | 5270 | DEFINE_PER_CPU(int, sd_llc_size); |
5121 | DEFINE_PER_CPU(int, sd_llc_id); | 5271 | DEFINE_PER_CPU(int, sd_llc_id); |
5272 | DEFINE_PER_CPU(struct sched_domain *, sd_numa); | ||
5122 | 5273 | ||
5123 | static void update_top_cache_domain(int cpu) | 5274 | static void update_top_cache_domain(int cpu) |
5124 | { | 5275 | { |
@@ -5135,6 +5286,9 @@ static void update_top_cache_domain(int cpu) | |||
5135 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 5286 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
5136 | per_cpu(sd_llc_size, cpu) = size; | 5287 | per_cpu(sd_llc_size, cpu) = size; |
5137 | per_cpu(sd_llc_id, cpu) = id; | 5288 | per_cpu(sd_llc_id, cpu) = id; |
5289 | |||
5290 | sd = lowest_flag_domain(cpu, SD_NUMA); | ||
5291 | rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); | ||
5138 | } | 5292 | } |
5139 | 5293 | ||
5140 | /* | 5294 | /* |
@@ -5654,6 +5808,7 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu) | |||
5654 | | 0*SD_SHARE_PKG_RESOURCES | 5808 | | 0*SD_SHARE_PKG_RESOURCES |
5655 | | 1*SD_SERIALIZE | 5809 | | 1*SD_SERIALIZE |
5656 | | 0*SD_PREFER_SIBLING | 5810 | | 0*SD_PREFER_SIBLING |
5811 | | 1*SD_NUMA | ||
5657 | | sd_local_flags(level) | 5812 | | sd_local_flags(level) |
5658 | , | 5813 | , |
5659 | .last_balance = jiffies, | 5814 | .last_balance = jiffies, |
@@ -6335,14 +6490,17 @@ void __init sched_init_smp(void) | |||
6335 | 6490 | ||
6336 | sched_init_numa(); | 6491 | sched_init_numa(); |
6337 | 6492 | ||
6338 | get_online_cpus(); | 6493 | /* |
6494 | * There's no userspace yet to cause hotplug operations; hence all the | ||
6495 | * cpu masks are stable and all blatant races in the below code cannot | ||
6496 | * happen. | ||
6497 | */ | ||
6339 | mutex_lock(&sched_domains_mutex); | 6498 | mutex_lock(&sched_domains_mutex); |
6340 | init_sched_domains(cpu_active_mask); | 6499 | init_sched_domains(cpu_active_mask); |
6341 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); | 6500 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
6342 | if (cpumask_empty(non_isolated_cpus)) | 6501 | if (cpumask_empty(non_isolated_cpus)) |
6343 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); | 6502 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
6344 | mutex_unlock(&sched_domains_mutex); | 6503 | mutex_unlock(&sched_domains_mutex); |
6345 | put_online_cpus(); | ||
6346 | 6504 | ||
6347 | hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); | 6505 | hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); |
6348 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); | 6506 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); |
@@ -6505,6 +6663,7 @@ void __init sched_init(void) | |||
6505 | rq->online = 0; | 6663 | rq->online = 0; |
6506 | rq->idle_stamp = 0; | 6664 | rq->idle_stamp = 0; |
6507 | rq->avg_idle = 2*sysctl_sched_migration_cost; | 6665 | rq->avg_idle = 2*sysctl_sched_migration_cost; |
6666 | rq->max_idle_balance_cost = sysctl_sched_migration_cost; | ||
6508 | 6667 | ||
6509 | INIT_LIST_HEAD(&rq->cfs_tasks); | 6668 | INIT_LIST_HEAD(&rq->cfs_tasks); |
6510 | 6669 | ||
@@ -7277,7 +7436,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) | |||
7277 | 7436 | ||
7278 | runtime_enabled = quota != RUNTIME_INF; | 7437 | runtime_enabled = quota != RUNTIME_INF; |
7279 | runtime_was_enabled = cfs_b->quota != RUNTIME_INF; | 7438 | runtime_was_enabled = cfs_b->quota != RUNTIME_INF; |
7280 | account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled); | 7439 | /* |
7440 | * If we need to toggle cfs_bandwidth_used, off->on must occur | ||
7441 | * before making related changes, and on->off must occur afterwards | ||
7442 | */ | ||
7443 | if (runtime_enabled && !runtime_was_enabled) | ||
7444 | cfs_bandwidth_usage_inc(); | ||
7281 | raw_spin_lock_irq(&cfs_b->lock); | 7445 | raw_spin_lock_irq(&cfs_b->lock); |
7282 | cfs_b->period = ns_to_ktime(period); | 7446 | cfs_b->period = ns_to_ktime(period); |
7283 | cfs_b->quota = quota; | 7447 | cfs_b->quota = quota; |
@@ -7303,6 +7467,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) | |||
7303 | unthrottle_cfs_rq(cfs_rq); | 7467 | unthrottle_cfs_rq(cfs_rq); |
7304 | raw_spin_unlock_irq(&rq->lock); | 7468 | raw_spin_unlock_irq(&rq->lock); |
7305 | } | 7469 | } |
7470 | if (runtime_was_enabled && !runtime_enabled) | ||
7471 | cfs_bandwidth_usage_dec(); | ||
7306 | out_unlock: | 7472 | out_unlock: |
7307 | mutex_unlock(&cfs_constraints_mutex); | 7473 | mutex_unlock(&cfs_constraints_mutex); |
7308 | 7474 | ||
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 196559994f7c..5c34d1817e8f 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include <linux/kallsyms.h> | 16 | #include <linux/kallsyms.h> |
17 | #include <linux/utsname.h> | 17 | #include <linux/utsname.h> |
18 | #include <linux/mempolicy.h> | ||
18 | 19 | ||
19 | #include "sched.h" | 20 | #include "sched.h" |
20 | 21 | ||
@@ -137,6 +138,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
137 | SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", | 138 | SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", |
138 | 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); | 139 | 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); |
139 | #endif | 140 | #endif |
141 | #ifdef CONFIG_NUMA_BALANCING | ||
142 | SEQ_printf(m, " %d", cpu_to_node(task_cpu(p))); | ||
143 | #endif | ||
140 | #ifdef CONFIG_CGROUP_SCHED | 144 | #ifdef CONFIG_CGROUP_SCHED |
141 | SEQ_printf(m, " %s", task_group_path(task_group(p))); | 145 | SEQ_printf(m, " %s", task_group_path(task_group(p))); |
142 | #endif | 146 | #endif |
@@ -159,7 +163,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) | |||
159 | read_lock_irqsave(&tasklist_lock, flags); | 163 | read_lock_irqsave(&tasklist_lock, flags); |
160 | 164 | ||
161 | do_each_thread(g, p) { | 165 | do_each_thread(g, p) { |
162 | if (!p->on_rq || task_cpu(p) != rq_cpu) | 166 | if (task_cpu(p) != rq_cpu) |
163 | continue; | 167 | continue; |
164 | 168 | ||
165 | print_task(m, rq, p); | 169 | print_task(m, rq, p); |
@@ -225,6 +229,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
225 | atomic_read(&cfs_rq->tg->runnable_avg)); | 229 | atomic_read(&cfs_rq->tg->runnable_avg)); |
226 | #endif | 230 | #endif |
227 | #endif | 231 | #endif |
232 | #ifdef CONFIG_CFS_BANDWIDTH | ||
233 | SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active", | ||
234 | cfs_rq->tg->cfs_bandwidth.timer_active); | ||
235 | SEQ_printf(m, " .%-30s: %d\n", "throttled", | ||
236 | cfs_rq->throttled); | ||
237 | SEQ_printf(m, " .%-30s: %d\n", "throttle_count", | ||
238 | cfs_rq->throttle_count); | ||
239 | #endif | ||
228 | 240 | ||
229 | #ifdef CONFIG_FAIR_GROUP_SCHED | 241 | #ifdef CONFIG_FAIR_GROUP_SCHED |
230 | print_cfs_group_stats(m, cpu, cfs_rq->tg); | 242 | print_cfs_group_stats(m, cpu, cfs_rq->tg); |
@@ -345,7 +357,7 @@ static void sched_debug_header(struct seq_file *m) | |||
345 | cpu_clk = local_clock(); | 357 | cpu_clk = local_clock(); |
346 | local_irq_restore(flags); | 358 | local_irq_restore(flags); |
347 | 359 | ||
348 | SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n", | 360 | SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n", |
349 | init_utsname()->release, | 361 | init_utsname()->release, |
350 | (int)strcspn(init_utsname()->version, " "), | 362 | (int)strcspn(init_utsname()->version, " "), |
351 | init_utsname()->version); | 363 | init_utsname()->version); |
@@ -488,6 +500,56 @@ static int __init init_sched_debug_procfs(void) | |||
488 | 500 | ||
489 | __initcall(init_sched_debug_procfs); | 501 | __initcall(init_sched_debug_procfs); |
490 | 502 | ||
503 | #define __P(F) \ | ||
504 | SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) | ||
505 | #define P(F) \ | ||
506 | SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) | ||
507 | #define __PN(F) \ | ||
508 | SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) | ||
509 | #define PN(F) \ | ||
510 | SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) | ||
511 | |||
512 | |||
513 | static void sched_show_numa(struct task_struct *p, struct seq_file *m) | ||
514 | { | ||
515 | #ifdef CONFIG_NUMA_BALANCING | ||
516 | struct mempolicy *pol; | ||
517 | int node, i; | ||
518 | |||
519 | if (p->mm) | ||
520 | P(mm->numa_scan_seq); | ||
521 | |||
522 | task_lock(p); | ||
523 | pol = p->mempolicy; | ||
524 | if (pol && !(pol->flags & MPOL_F_MORON)) | ||
525 | pol = NULL; | ||
526 | mpol_get(pol); | ||
527 | task_unlock(p); | ||
528 | |||
529 | SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0)); | ||
530 | |||
531 | for_each_online_node(node) { | ||
532 | for (i = 0; i < 2; i++) { | ||
533 | unsigned long nr_faults = -1; | ||
534 | int cpu_current, home_node; | ||
535 | |||
536 | if (p->numa_faults) | ||
537 | nr_faults = p->numa_faults[2*node + i]; | ||
538 | |||
539 | cpu_current = !i ? (task_node(p) == node) : | ||
540 | (pol && node_isset(node, pol->v.nodes)); | ||
541 | |||
542 | home_node = (p->numa_preferred_nid == node); | ||
543 | |||
544 | SEQ_printf(m, "numa_faults, %d, %d, %d, %d, %ld\n", | ||
545 | i, node, cpu_current, home_node, nr_faults); | ||
546 | } | ||
547 | } | ||
548 | |||
549 | mpol_put(pol); | ||
550 | #endif | ||
551 | } | ||
552 | |||
491 | void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | 553 | void proc_sched_show_task(struct task_struct *p, struct seq_file *m) |
492 | { | 554 | { |
493 | unsigned long nr_switches; | 555 | unsigned long nr_switches; |
@@ -591,6 +653,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
591 | SEQ_printf(m, "%-45s:%21Ld\n", | 653 | SEQ_printf(m, "%-45s:%21Ld\n", |
592 | "clock-delta", (long long)(t1-t0)); | 654 | "clock-delta", (long long)(t1-t0)); |
593 | } | 655 | } |
656 | |||
657 | sched_show_numa(p, m); | ||
594 | } | 658 | } |
595 | 659 | ||
596 | void proc_sched_set_task(struct task_struct *p) | 660 | void proc_sched_set_task(struct task_struct *p) |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7c70201fbc61..41c02b6b090e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -681,6 +681,8 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
681 | } | 681 | } |
682 | 682 | ||
683 | #ifdef CONFIG_SMP | 683 | #ifdef CONFIG_SMP |
684 | static unsigned long task_h_load(struct task_struct *p); | ||
685 | |||
684 | static inline void __update_task_entity_contrib(struct sched_entity *se); | 686 | static inline void __update_task_entity_contrib(struct sched_entity *se); |
685 | 687 | ||
686 | /* Give new task start runnable values to heavy its load in infant time */ | 688 | /* Give new task start runnable values to heavy its load in infant time */ |
@@ -818,11 +820,12 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
818 | 820 | ||
819 | #ifdef CONFIG_NUMA_BALANCING | 821 | #ifdef CONFIG_NUMA_BALANCING |
820 | /* | 822 | /* |
821 | * numa task sample period in ms | 823 | * Approximate time to scan a full NUMA task in ms. The task scan period is |
824 | * calculated based on the tasks virtual memory size and | ||
825 | * numa_balancing_scan_size. | ||
822 | */ | 826 | */ |
823 | unsigned int sysctl_numa_balancing_scan_period_min = 100; | 827 | unsigned int sysctl_numa_balancing_scan_period_min = 1000; |
824 | unsigned int sysctl_numa_balancing_scan_period_max = 100*50; | 828 | unsigned int sysctl_numa_balancing_scan_period_max = 60000; |
825 | unsigned int sysctl_numa_balancing_scan_period_reset = 100*600; | ||
826 | 829 | ||
827 | /* Portion of address space to scan in MB */ | 830 | /* Portion of address space to scan in MB */ |
828 | unsigned int sysctl_numa_balancing_scan_size = 256; | 831 | unsigned int sysctl_numa_balancing_scan_size = 256; |
@@ -830,41 +833,810 @@ unsigned int sysctl_numa_balancing_scan_size = 256; | |||
830 | /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ | 833 | /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ |
831 | unsigned int sysctl_numa_balancing_scan_delay = 1000; | 834 | unsigned int sysctl_numa_balancing_scan_delay = 1000; |
832 | 835 | ||
833 | static void task_numa_placement(struct task_struct *p) | 836 | /* |
837 | * After skipping a page migration on a shared page, skip N more numa page | ||
838 | * migrations unconditionally. This reduces the number of NUMA migrations | ||
839 | * in shared memory workloads, and has the effect of pulling tasks towards | ||
840 | * where their memory lives, over pulling the memory towards the task. | ||
841 | */ | ||
842 | unsigned int sysctl_numa_balancing_migrate_deferred = 16; | ||
843 | |||
844 | static unsigned int task_nr_scan_windows(struct task_struct *p) | ||
845 | { | ||
846 | unsigned long rss = 0; | ||
847 | unsigned long nr_scan_pages; | ||
848 | |||
849 | /* | ||
850 | * Calculations based on RSS as non-present and empty pages are skipped | ||
851 | * by the PTE scanner and NUMA hinting faults should be trapped based | ||
852 | * on resident pages | ||
853 | */ | ||
854 | nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); | ||
855 | rss = get_mm_rss(p->mm); | ||
856 | if (!rss) | ||
857 | rss = nr_scan_pages; | ||
858 | |||
859 | rss = round_up(rss, nr_scan_pages); | ||
860 | return rss / nr_scan_pages; | ||
861 | } | ||
862 | |||
863 | /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ | ||
864 | #define MAX_SCAN_WINDOW 2560 | ||
865 | |||
866 | static unsigned int task_scan_min(struct task_struct *p) | ||
867 | { | ||
868 | unsigned int scan, floor; | ||
869 | unsigned int windows = 1; | ||
870 | |||
871 | if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW) | ||
872 | windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size; | ||
873 | floor = 1000 / windows; | ||
874 | |||
875 | scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); | ||
876 | return max_t(unsigned int, floor, scan); | ||
877 | } | ||
878 | |||
879 | static unsigned int task_scan_max(struct task_struct *p) | ||
880 | { | ||
881 | unsigned int smin = task_scan_min(p); | ||
882 | unsigned int smax; | ||
883 | |||
884 | /* Watch for min being lower than max due to floor calculations */ | ||
885 | smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); | ||
886 | return max(smin, smax); | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * Once a preferred node is selected the scheduler balancer will prefer moving | ||
891 | * a task to that node for sysctl_numa_balancing_settle_count number of PTE | ||
892 | * scans. This will give the process the chance to accumulate more faults on | ||
893 | * the preferred node but still allow the scheduler to move the task again if | ||
894 | * the nodes CPUs are overloaded. | ||
895 | */ | ||
896 | unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4; | ||
897 | |||
898 | static void account_numa_enqueue(struct rq *rq, struct task_struct *p) | ||
899 | { | ||
900 | rq->nr_numa_running += (p->numa_preferred_nid != -1); | ||
901 | rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); | ||
902 | } | ||
903 | |||
904 | static void account_numa_dequeue(struct rq *rq, struct task_struct *p) | ||
905 | { | ||
906 | rq->nr_numa_running -= (p->numa_preferred_nid != -1); | ||
907 | rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); | ||
908 | } | ||
909 | |||
910 | struct numa_group { | ||
911 | atomic_t refcount; | ||
912 | |||
913 | spinlock_t lock; /* nr_tasks, tasks */ | ||
914 | int nr_tasks; | ||
915 | pid_t gid; | ||
916 | struct list_head task_list; | ||
917 | |||
918 | struct rcu_head rcu; | ||
919 | unsigned long total_faults; | ||
920 | unsigned long faults[0]; | ||
921 | }; | ||
922 | |||
923 | pid_t task_numa_group_id(struct task_struct *p) | ||
924 | { | ||
925 | return p->numa_group ? p->numa_group->gid : 0; | ||
926 | } | ||
927 | |||
928 | static inline int task_faults_idx(int nid, int priv) | ||
929 | { | ||
930 | return 2 * nid + priv; | ||
931 | } | ||
932 | |||
933 | static inline unsigned long task_faults(struct task_struct *p, int nid) | ||
934 | { | ||
935 | if (!p->numa_faults) | ||
936 | return 0; | ||
937 | |||
938 | return p->numa_faults[task_faults_idx(nid, 0)] + | ||
939 | p->numa_faults[task_faults_idx(nid, 1)]; | ||
940 | } | ||
941 | |||
942 | static inline unsigned long group_faults(struct task_struct *p, int nid) | ||
943 | { | ||
944 | if (!p->numa_group) | ||
945 | return 0; | ||
946 | |||
947 | return p->numa_group->faults[2*nid] + p->numa_group->faults[2*nid+1]; | ||
948 | } | ||
949 | |||
950 | /* | ||
951 | * These return the fraction of accesses done by a particular task, or | ||
952 | * task group, on a particular numa node. The group weight is given a | ||
953 | * larger multiplier, in order to group tasks together that are almost | ||
954 | * evenly spread out between numa nodes. | ||
955 | */ | ||
956 | static inline unsigned long task_weight(struct task_struct *p, int nid) | ||
957 | { | ||
958 | unsigned long total_faults; | ||
959 | |||
960 | if (!p->numa_faults) | ||
961 | return 0; | ||
962 | |||
963 | total_faults = p->total_numa_faults; | ||
964 | |||
965 | if (!total_faults) | ||
966 | return 0; | ||
967 | |||
968 | return 1000 * task_faults(p, nid) / total_faults; | ||
969 | } | ||
970 | |||
971 | static inline unsigned long group_weight(struct task_struct *p, int nid) | ||
834 | { | 972 | { |
835 | int seq; | 973 | if (!p->numa_group || !p->numa_group->total_faults) |
974 | return 0; | ||
975 | |||
976 | return 1000 * group_faults(p, nid) / p->numa_group->total_faults; | ||
977 | } | ||
978 | |||
979 | static unsigned long weighted_cpuload(const int cpu); | ||
980 | static unsigned long source_load(int cpu, int type); | ||
981 | static unsigned long target_load(int cpu, int type); | ||
982 | static unsigned long power_of(int cpu); | ||
983 | static long effective_load(struct task_group *tg, int cpu, long wl, long wg); | ||
984 | |||
985 | /* Cached statistics for all CPUs within a node */ | ||
986 | struct numa_stats { | ||
987 | unsigned long nr_running; | ||
988 | unsigned long load; | ||
989 | |||
990 | /* Total compute capacity of CPUs on a node */ | ||
991 | unsigned long power; | ||
992 | |||
993 | /* Approximate capacity in terms of runnable tasks on a node */ | ||
994 | unsigned long capacity; | ||
995 | int has_capacity; | ||
996 | }; | ||
997 | |||
998 | /* | ||
999 | * XXX borrowed from update_sg_lb_stats | ||
1000 | */ | ||
1001 | static void update_numa_stats(struct numa_stats *ns, int nid) | ||
1002 | { | ||
1003 | int cpu; | ||
1004 | |||
1005 | memset(ns, 0, sizeof(*ns)); | ||
1006 | for_each_cpu(cpu, cpumask_of_node(nid)) { | ||
1007 | struct rq *rq = cpu_rq(cpu); | ||
1008 | |||
1009 | ns->nr_running += rq->nr_running; | ||
1010 | ns->load += weighted_cpuload(cpu); | ||
1011 | ns->power += power_of(cpu); | ||
1012 | } | ||
1013 | |||
1014 | ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power; | ||
1015 | ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE); | ||
1016 | ns->has_capacity = (ns->nr_running < ns->capacity); | ||
1017 | } | ||
1018 | |||
1019 | struct task_numa_env { | ||
1020 | struct task_struct *p; | ||
1021 | |||
1022 | int src_cpu, src_nid; | ||
1023 | int dst_cpu, dst_nid; | ||
1024 | |||
1025 | struct numa_stats src_stats, dst_stats; | ||
1026 | |||
1027 | int imbalance_pct, idx; | ||
1028 | |||
1029 | struct task_struct *best_task; | ||
1030 | long best_imp; | ||
1031 | int best_cpu; | ||
1032 | }; | ||
1033 | |||
1034 | static void task_numa_assign(struct task_numa_env *env, | ||
1035 | struct task_struct *p, long imp) | ||
1036 | { | ||
1037 | if (env->best_task) | ||
1038 | put_task_struct(env->best_task); | ||
1039 | if (p) | ||
1040 | get_task_struct(p); | ||
1041 | |||
1042 | env->best_task = p; | ||
1043 | env->best_imp = imp; | ||
1044 | env->best_cpu = env->dst_cpu; | ||
1045 | } | ||
1046 | |||
1047 | /* | ||
1048 | * This checks if the overall compute and NUMA accesses of the system would | ||
1049 | * be improved if the source tasks was migrated to the target dst_cpu taking | ||
1050 | * into account that it might be best if task running on the dst_cpu should | ||
1051 | * be exchanged with the source task | ||
1052 | */ | ||
1053 | static void task_numa_compare(struct task_numa_env *env, | ||
1054 | long taskimp, long groupimp) | ||
1055 | { | ||
1056 | struct rq *src_rq = cpu_rq(env->src_cpu); | ||
1057 | struct rq *dst_rq = cpu_rq(env->dst_cpu); | ||
1058 | struct task_struct *cur; | ||
1059 | long dst_load, src_load; | ||
1060 | long load; | ||
1061 | long imp = (groupimp > 0) ? groupimp : taskimp; | ||
1062 | |||
1063 | rcu_read_lock(); | ||
1064 | cur = ACCESS_ONCE(dst_rq->curr); | ||
1065 | if (cur->pid == 0) /* idle */ | ||
1066 | cur = NULL; | ||
1067 | |||
1068 | /* | ||
1069 | * "imp" is the fault differential for the source task between the | ||
1070 | * source and destination node. Calculate the total differential for | ||
1071 | * the source task and potential destination task. The more negative | ||
1072 | * the value is, the more rmeote accesses that would be expected to | ||
1073 | * be incurred if the tasks were swapped. | ||
1074 | */ | ||
1075 | if (cur) { | ||
1076 | /* Skip this swap candidate if cannot move to the source cpu */ | ||
1077 | if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur))) | ||
1078 | goto unlock; | ||
1079 | |||
1080 | /* | ||
1081 | * If dst and source tasks are in the same NUMA group, or not | ||
1082 | * in any group then look only at task weights. | ||
1083 | */ | ||
1084 | if (cur->numa_group == env->p->numa_group) { | ||
1085 | imp = taskimp + task_weight(cur, env->src_nid) - | ||
1086 | task_weight(cur, env->dst_nid); | ||
1087 | /* | ||
1088 | * Add some hysteresis to prevent swapping the | ||
1089 | * tasks within a group over tiny differences. | ||
1090 | */ | ||
1091 | if (cur->numa_group) | ||
1092 | imp -= imp/16; | ||
1093 | } else { | ||
1094 | /* | ||
1095 | * Compare the group weights. If a task is all by | ||
1096 | * itself (not part of a group), use the task weight | ||
1097 | * instead. | ||
1098 | */ | ||
1099 | if (env->p->numa_group) | ||
1100 | imp = groupimp; | ||
1101 | else | ||
1102 | imp = taskimp; | ||
1103 | |||
1104 | if (cur->numa_group) | ||
1105 | imp += group_weight(cur, env->src_nid) - | ||
1106 | group_weight(cur, env->dst_nid); | ||
1107 | else | ||
1108 | imp += task_weight(cur, env->src_nid) - | ||
1109 | task_weight(cur, env->dst_nid); | ||
1110 | } | ||
1111 | } | ||
1112 | |||
1113 | if (imp < env->best_imp) | ||
1114 | goto unlock; | ||
1115 | |||
1116 | if (!cur) { | ||
1117 | /* Is there capacity at our destination? */ | ||
1118 | if (env->src_stats.has_capacity && | ||
1119 | !env->dst_stats.has_capacity) | ||
1120 | goto unlock; | ||
1121 | |||
1122 | goto balance; | ||
1123 | } | ||
1124 | |||
1125 | /* Balance doesn't matter much if we're running a task per cpu */ | ||
1126 | if (src_rq->nr_running == 1 && dst_rq->nr_running == 1) | ||
1127 | goto assign; | ||
1128 | |||
1129 | /* | ||
1130 | * In the overloaded case, try and keep the load balanced. | ||
1131 | */ | ||
1132 | balance: | ||
1133 | dst_load = env->dst_stats.load; | ||
1134 | src_load = env->src_stats.load; | ||
1135 | |||
1136 | /* XXX missing power terms */ | ||
1137 | load = task_h_load(env->p); | ||
1138 | dst_load += load; | ||
1139 | src_load -= load; | ||
1140 | |||
1141 | if (cur) { | ||
1142 | load = task_h_load(cur); | ||
1143 | dst_load -= load; | ||
1144 | src_load += load; | ||
1145 | } | ||
1146 | |||
1147 | /* make src_load the smaller */ | ||
1148 | if (dst_load < src_load) | ||
1149 | swap(dst_load, src_load); | ||
1150 | |||
1151 | if (src_load * env->imbalance_pct < dst_load * 100) | ||
1152 | goto unlock; | ||
1153 | |||
1154 | assign: | ||
1155 | task_numa_assign(env, cur, imp); | ||
1156 | unlock: | ||
1157 | rcu_read_unlock(); | ||
1158 | } | ||
1159 | |||
1160 | static void task_numa_find_cpu(struct task_numa_env *env, | ||
1161 | long taskimp, long groupimp) | ||
1162 | { | ||
1163 | int cpu; | ||
1164 | |||
1165 | for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { | ||
1166 | /* Skip this CPU if the source task cannot migrate */ | ||
1167 | if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p))) | ||
1168 | continue; | ||
1169 | |||
1170 | env->dst_cpu = cpu; | ||
1171 | task_numa_compare(env, taskimp, groupimp); | ||
1172 | } | ||
1173 | } | ||
1174 | |||
1175 | static int task_numa_migrate(struct task_struct *p) | ||
1176 | { | ||
1177 | struct task_numa_env env = { | ||
1178 | .p = p, | ||
1179 | |||
1180 | .src_cpu = task_cpu(p), | ||
1181 | .src_nid = task_node(p), | ||
1182 | |||
1183 | .imbalance_pct = 112, | ||
1184 | |||
1185 | .best_task = NULL, | ||
1186 | .best_imp = 0, | ||
1187 | .best_cpu = -1 | ||
1188 | }; | ||
1189 | struct sched_domain *sd; | ||
1190 | unsigned long taskweight, groupweight; | ||
1191 | int nid, ret; | ||
1192 | long taskimp, groupimp; | ||
1193 | |||
1194 | /* | ||
1195 | * Pick the lowest SD_NUMA domain, as that would have the smallest | ||
1196 | * imbalance and would be the first to start moving tasks about. | ||
1197 | * | ||
1198 | * And we want to avoid any moving of tasks about, as that would create | ||
1199 | * random movement of tasks -- counter the numa conditions we're trying | ||
1200 | * to satisfy here. | ||
1201 | */ | ||
1202 | rcu_read_lock(); | ||
1203 | sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); | ||
1204 | env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; | ||
1205 | rcu_read_unlock(); | ||
1206 | |||
1207 | taskweight = task_weight(p, env.src_nid); | ||
1208 | groupweight = group_weight(p, env.src_nid); | ||
1209 | update_numa_stats(&env.src_stats, env.src_nid); | ||
1210 | env.dst_nid = p->numa_preferred_nid; | ||
1211 | taskimp = task_weight(p, env.dst_nid) - taskweight; | ||
1212 | groupimp = group_weight(p, env.dst_nid) - groupweight; | ||
1213 | update_numa_stats(&env.dst_stats, env.dst_nid); | ||
1214 | |||
1215 | /* If the preferred nid has capacity, try to use it. */ | ||
1216 | if (env.dst_stats.has_capacity) | ||
1217 | task_numa_find_cpu(&env, taskimp, groupimp); | ||
1218 | |||
1219 | /* No space available on the preferred nid. Look elsewhere. */ | ||
1220 | if (env.best_cpu == -1) { | ||
1221 | for_each_online_node(nid) { | ||
1222 | if (nid == env.src_nid || nid == p->numa_preferred_nid) | ||
1223 | continue; | ||
1224 | |||
1225 | /* Only consider nodes where both task and groups benefit */ | ||
1226 | taskimp = task_weight(p, nid) - taskweight; | ||
1227 | groupimp = group_weight(p, nid) - groupweight; | ||
1228 | if (taskimp < 0 && groupimp < 0) | ||
1229 | continue; | ||
1230 | |||
1231 | env.dst_nid = nid; | ||
1232 | update_numa_stats(&env.dst_stats, env.dst_nid); | ||
1233 | task_numa_find_cpu(&env, taskimp, groupimp); | ||
1234 | } | ||
1235 | } | ||
1236 | |||
1237 | /* No better CPU than the current one was found. */ | ||
1238 | if (env.best_cpu == -1) | ||
1239 | return -EAGAIN; | ||
1240 | |||
1241 | sched_setnuma(p, env.dst_nid); | ||
1242 | |||
1243 | /* | ||
1244 | * Reset the scan period if the task is being rescheduled on an | ||
1245 | * alternative node to recheck if the tasks is now properly placed. | ||
1246 | */ | ||
1247 | p->numa_scan_period = task_scan_min(p); | ||
1248 | |||
1249 | if (env.best_task == NULL) { | ||
1250 | int ret = migrate_task_to(p, env.best_cpu); | ||
1251 | return ret; | ||
1252 | } | ||
1253 | |||
1254 | ret = migrate_swap(p, env.best_task); | ||
1255 | put_task_struct(env.best_task); | ||
1256 | return ret; | ||
1257 | } | ||
1258 | |||
1259 | /* Attempt to migrate a task to a CPU on the preferred node. */ | ||
1260 | static void numa_migrate_preferred(struct task_struct *p) | ||
1261 | { | ||
1262 | /* This task has no NUMA fault statistics yet */ | ||
1263 | if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) | ||
1264 | return; | ||
1265 | |||
1266 | /* Periodically retry migrating the task to the preferred node */ | ||
1267 | p->numa_migrate_retry = jiffies + HZ; | ||
1268 | |||
1269 | /* Success if task is already running on preferred CPU */ | ||
1270 | if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid) | ||
1271 | return; | ||
1272 | |||
1273 | /* Otherwise, try migrate to a CPU on the preferred node */ | ||
1274 | task_numa_migrate(p); | ||
1275 | } | ||
1276 | |||
1277 | /* | ||
1278 | * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS | ||
1279 | * increments. The more local the fault statistics are, the higher the scan | ||
1280 | * period will be for the next scan window. If local/remote ratio is below | ||
1281 | * NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) the | ||
1282 | * scan period will decrease | ||
1283 | */ | ||
1284 | #define NUMA_PERIOD_SLOTS 10 | ||
1285 | #define NUMA_PERIOD_THRESHOLD 3 | ||
1286 | |||
1287 | /* | ||
1288 | * Increase the scan period (slow down scanning) if the majority of | ||
1289 | * our memory is already on our local node, or if the majority of | ||
1290 | * the page accesses are shared with other processes. | ||
1291 | * Otherwise, decrease the scan period. | ||
1292 | */ | ||
1293 | static void update_task_scan_period(struct task_struct *p, | ||
1294 | unsigned long shared, unsigned long private) | ||
1295 | { | ||
1296 | unsigned int period_slot; | ||
1297 | int ratio; | ||
1298 | int diff; | ||
1299 | |||
1300 | unsigned long remote = p->numa_faults_locality[0]; | ||
1301 | unsigned long local = p->numa_faults_locality[1]; | ||
1302 | |||
1303 | /* | ||
1304 | * If there were no record hinting faults then either the task is | ||
1305 | * completely idle or all activity is areas that are not of interest | ||
1306 | * to automatic numa balancing. Scan slower | ||
1307 | */ | ||
1308 | if (local + shared == 0) { | ||
1309 | p->numa_scan_period = min(p->numa_scan_period_max, | ||
1310 | p->numa_scan_period << 1); | ||
1311 | |||
1312 | p->mm->numa_next_scan = jiffies + | ||
1313 | msecs_to_jiffies(p->numa_scan_period); | ||
836 | 1314 | ||
837 | if (!p->mm) /* for example, ksmd faulting in a user's mm */ | ||
838 | return; | 1315 | return; |
1316 | } | ||
1317 | |||
1318 | /* | ||
1319 | * Prepare to scale scan period relative to the current period. | ||
1320 | * == NUMA_PERIOD_THRESHOLD scan period stays the same | ||
1321 | * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) | ||
1322 | * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) | ||
1323 | */ | ||
1324 | period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); | ||
1325 | ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); | ||
1326 | if (ratio >= NUMA_PERIOD_THRESHOLD) { | ||
1327 | int slot = ratio - NUMA_PERIOD_THRESHOLD; | ||
1328 | if (!slot) | ||
1329 | slot = 1; | ||
1330 | diff = slot * period_slot; | ||
1331 | } else { | ||
1332 | diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; | ||
1333 | |||
1334 | /* | ||
1335 | * Scale scan rate increases based on sharing. There is an | ||
1336 | * inverse relationship between the degree of sharing and | ||
1337 | * the adjustment made to the scanning period. Broadly | ||
1338 | * speaking the intent is that there is little point | ||
1339 | * scanning faster if shared accesses dominate as it may | ||
1340 | * simply bounce migrations uselessly | ||
1341 | */ | ||
1342 | period_slot = DIV_ROUND_UP(diff, NUMA_PERIOD_SLOTS); | ||
1343 | ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared)); | ||
1344 | diff = (diff * ratio) / NUMA_PERIOD_SLOTS; | ||
1345 | } | ||
1346 | |||
1347 | p->numa_scan_period = clamp(p->numa_scan_period + diff, | ||
1348 | task_scan_min(p), task_scan_max(p)); | ||
1349 | memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); | ||
1350 | } | ||
1351 | |||
1352 | static void task_numa_placement(struct task_struct *p) | ||
1353 | { | ||
1354 | int seq, nid, max_nid = -1, max_group_nid = -1; | ||
1355 | unsigned long max_faults = 0, max_group_faults = 0; | ||
1356 | unsigned long fault_types[2] = { 0, 0 }; | ||
1357 | spinlock_t *group_lock = NULL; | ||
1358 | |||
839 | seq = ACCESS_ONCE(p->mm->numa_scan_seq); | 1359 | seq = ACCESS_ONCE(p->mm->numa_scan_seq); |
840 | if (p->numa_scan_seq == seq) | 1360 | if (p->numa_scan_seq == seq) |
841 | return; | 1361 | return; |
842 | p->numa_scan_seq = seq; | 1362 | p->numa_scan_seq = seq; |
1363 | p->numa_scan_period_max = task_scan_max(p); | ||
1364 | |||
1365 | /* If the task is part of a group prevent parallel updates to group stats */ | ||
1366 | if (p->numa_group) { | ||
1367 | group_lock = &p->numa_group->lock; | ||
1368 | spin_lock(group_lock); | ||
1369 | } | ||
1370 | |||
1371 | /* Find the node with the highest number of faults */ | ||
1372 | for_each_online_node(nid) { | ||
1373 | unsigned long faults = 0, group_faults = 0; | ||
1374 | int priv, i; | ||
1375 | |||
1376 | for (priv = 0; priv < 2; priv++) { | ||
1377 | long diff; | ||
1378 | |||
1379 | i = task_faults_idx(nid, priv); | ||
1380 | diff = -p->numa_faults[i]; | ||
1381 | |||
1382 | /* Decay existing window, copy faults since last scan */ | ||
1383 | p->numa_faults[i] >>= 1; | ||
1384 | p->numa_faults[i] += p->numa_faults_buffer[i]; | ||
1385 | fault_types[priv] += p->numa_faults_buffer[i]; | ||
1386 | p->numa_faults_buffer[i] = 0; | ||
1387 | |||
1388 | faults += p->numa_faults[i]; | ||
1389 | diff += p->numa_faults[i]; | ||
1390 | p->total_numa_faults += diff; | ||
1391 | if (p->numa_group) { | ||
1392 | /* safe because we can only change our own group */ | ||
1393 | p->numa_group->faults[i] += diff; | ||
1394 | p->numa_group->total_faults += diff; | ||
1395 | group_faults += p->numa_group->faults[i]; | ||
1396 | } | ||
1397 | } | ||
1398 | |||
1399 | if (faults > max_faults) { | ||
1400 | max_faults = faults; | ||
1401 | max_nid = nid; | ||
1402 | } | ||
1403 | |||
1404 | if (group_faults > max_group_faults) { | ||
1405 | max_group_faults = group_faults; | ||
1406 | max_group_nid = nid; | ||
1407 | } | ||
1408 | } | ||
1409 | |||
1410 | update_task_scan_period(p, fault_types[0], fault_types[1]); | ||
1411 | |||
1412 | if (p->numa_group) { | ||
1413 | /* | ||
1414 | * If the preferred task and group nids are different, | ||
1415 | * iterate over the nodes again to find the best place. | ||
1416 | */ | ||
1417 | if (max_nid != max_group_nid) { | ||
1418 | unsigned long weight, max_weight = 0; | ||
1419 | |||
1420 | for_each_online_node(nid) { | ||
1421 | weight = task_weight(p, nid) + group_weight(p, nid); | ||
1422 | if (weight > max_weight) { | ||
1423 | max_weight = weight; | ||
1424 | max_nid = nid; | ||
1425 | } | ||
1426 | } | ||
1427 | } | ||
1428 | |||
1429 | spin_unlock(group_lock); | ||
1430 | } | ||
843 | 1431 | ||
844 | /* FIXME: Scheduling placement policy hints go here */ | 1432 | /* Preferred node as the node with the most faults */ |
1433 | if (max_faults && max_nid != p->numa_preferred_nid) { | ||
1434 | /* Update the preferred nid and migrate task if possible */ | ||
1435 | sched_setnuma(p, max_nid); | ||
1436 | numa_migrate_preferred(p); | ||
1437 | } | ||
1438 | } | ||
1439 | |||
1440 | static inline int get_numa_group(struct numa_group *grp) | ||
1441 | { | ||
1442 | return atomic_inc_not_zero(&grp->refcount); | ||
1443 | } | ||
1444 | |||
1445 | static inline void put_numa_group(struct numa_group *grp) | ||
1446 | { | ||
1447 | if (atomic_dec_and_test(&grp->refcount)) | ||
1448 | kfree_rcu(grp, rcu); | ||
1449 | } | ||
1450 | |||
1451 | static void task_numa_group(struct task_struct *p, int cpupid, int flags, | ||
1452 | int *priv) | ||
1453 | { | ||
1454 | struct numa_group *grp, *my_grp; | ||
1455 | struct task_struct *tsk; | ||
1456 | bool join = false; | ||
1457 | int cpu = cpupid_to_cpu(cpupid); | ||
1458 | int i; | ||
1459 | |||
1460 | if (unlikely(!p->numa_group)) { | ||
1461 | unsigned int size = sizeof(struct numa_group) + | ||
1462 | 2*nr_node_ids*sizeof(unsigned long); | ||
1463 | |||
1464 | grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); | ||
1465 | if (!grp) | ||
1466 | return; | ||
1467 | |||
1468 | atomic_set(&grp->refcount, 1); | ||
1469 | spin_lock_init(&grp->lock); | ||
1470 | INIT_LIST_HEAD(&grp->task_list); | ||
1471 | grp->gid = p->pid; | ||
1472 | |||
1473 | for (i = 0; i < 2*nr_node_ids; i++) | ||
1474 | grp->faults[i] = p->numa_faults[i]; | ||
1475 | |||
1476 | grp->total_faults = p->total_numa_faults; | ||
1477 | |||
1478 | list_add(&p->numa_entry, &grp->task_list); | ||
1479 | grp->nr_tasks++; | ||
1480 | rcu_assign_pointer(p->numa_group, grp); | ||
1481 | } | ||
1482 | |||
1483 | rcu_read_lock(); | ||
1484 | tsk = ACCESS_ONCE(cpu_rq(cpu)->curr); | ||
1485 | |||
1486 | if (!cpupid_match_pid(tsk, cpupid)) | ||
1487 | goto no_join; | ||
1488 | |||
1489 | grp = rcu_dereference(tsk->numa_group); | ||
1490 | if (!grp) | ||
1491 | goto no_join; | ||
1492 | |||
1493 | my_grp = p->numa_group; | ||
1494 | if (grp == my_grp) | ||
1495 | goto no_join; | ||
1496 | |||
1497 | /* | ||
1498 | * Only join the other group if its bigger; if we're the bigger group, | ||
1499 | * the other task will join us. | ||
1500 | */ | ||
1501 | if (my_grp->nr_tasks > grp->nr_tasks) | ||
1502 | goto no_join; | ||
1503 | |||
1504 | /* | ||
1505 | * Tie-break on the grp address. | ||
1506 | */ | ||
1507 | if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) | ||
1508 | goto no_join; | ||
1509 | |||
1510 | /* Always join threads in the same process. */ | ||
1511 | if (tsk->mm == current->mm) | ||
1512 | join = true; | ||
1513 | |||
1514 | /* Simple filter to avoid false positives due to PID collisions */ | ||
1515 | if (flags & TNF_SHARED) | ||
1516 | join = true; | ||
1517 | |||
1518 | /* Update priv based on whether false sharing was detected */ | ||
1519 | *priv = !join; | ||
1520 | |||
1521 | if (join && !get_numa_group(grp)) | ||
1522 | goto no_join; | ||
1523 | |||
1524 | rcu_read_unlock(); | ||
1525 | |||
1526 | if (!join) | ||
1527 | return; | ||
1528 | |||
1529 | double_lock(&my_grp->lock, &grp->lock); | ||
1530 | |||
1531 | for (i = 0; i < 2*nr_node_ids; i++) { | ||
1532 | my_grp->faults[i] -= p->numa_faults[i]; | ||
1533 | grp->faults[i] += p->numa_faults[i]; | ||
1534 | } | ||
1535 | my_grp->total_faults -= p->total_numa_faults; | ||
1536 | grp->total_faults += p->total_numa_faults; | ||
1537 | |||
1538 | list_move(&p->numa_entry, &grp->task_list); | ||
1539 | my_grp->nr_tasks--; | ||
1540 | grp->nr_tasks++; | ||
1541 | |||
1542 | spin_unlock(&my_grp->lock); | ||
1543 | spin_unlock(&grp->lock); | ||
1544 | |||
1545 | rcu_assign_pointer(p->numa_group, grp); | ||
1546 | |||
1547 | put_numa_group(my_grp); | ||
1548 | return; | ||
1549 | |||
1550 | no_join: | ||
1551 | rcu_read_unlock(); | ||
1552 | return; | ||
1553 | } | ||
1554 | |||
1555 | void task_numa_free(struct task_struct *p) | ||
1556 | { | ||
1557 | struct numa_group *grp = p->numa_group; | ||
1558 | int i; | ||
1559 | void *numa_faults = p->numa_faults; | ||
1560 | |||
1561 | if (grp) { | ||
1562 | spin_lock(&grp->lock); | ||
1563 | for (i = 0; i < 2*nr_node_ids; i++) | ||
1564 | grp->faults[i] -= p->numa_faults[i]; | ||
1565 | grp->total_faults -= p->total_numa_faults; | ||
1566 | |||
1567 | list_del(&p->numa_entry); | ||
1568 | grp->nr_tasks--; | ||
1569 | spin_unlock(&grp->lock); | ||
1570 | rcu_assign_pointer(p->numa_group, NULL); | ||
1571 | put_numa_group(grp); | ||
1572 | } | ||
1573 | |||
1574 | p->numa_faults = NULL; | ||
1575 | p->numa_faults_buffer = NULL; | ||
1576 | kfree(numa_faults); | ||
845 | } | 1577 | } |
846 | 1578 | ||
847 | /* | 1579 | /* |
848 | * Got a PROT_NONE fault for a page on @node. | 1580 | * Got a PROT_NONE fault for a page on @node. |
849 | */ | 1581 | */ |
850 | void task_numa_fault(int node, int pages, bool migrated) | 1582 | void task_numa_fault(int last_cpupid, int node, int pages, int flags) |
851 | { | 1583 | { |
852 | struct task_struct *p = current; | 1584 | struct task_struct *p = current; |
1585 | bool migrated = flags & TNF_MIGRATED; | ||
1586 | int priv; | ||
853 | 1587 | ||
854 | if (!numabalancing_enabled) | 1588 | if (!numabalancing_enabled) |
855 | return; | 1589 | return; |
856 | 1590 | ||
857 | /* FIXME: Allocate task-specific structure for placement policy here */ | 1591 | /* for example, ksmd faulting in a user's mm */ |
1592 | if (!p->mm) | ||
1593 | return; | ||
1594 | |||
1595 | /* Do not worry about placement if exiting */ | ||
1596 | if (p->state == TASK_DEAD) | ||
1597 | return; | ||
1598 | |||
1599 | /* Allocate buffer to track faults on a per-node basis */ | ||
1600 | if (unlikely(!p->numa_faults)) { | ||
1601 | int size = sizeof(*p->numa_faults) * 2 * nr_node_ids; | ||
1602 | |||
1603 | /* numa_faults and numa_faults_buffer share the allocation */ | ||
1604 | p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN); | ||
1605 | if (!p->numa_faults) | ||
1606 | return; | ||
1607 | |||
1608 | BUG_ON(p->numa_faults_buffer); | ||
1609 | p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids); | ||
1610 | p->total_numa_faults = 0; | ||
1611 | memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); | ||
1612 | } | ||
858 | 1613 | ||
859 | /* | 1614 | /* |
860 | * If pages are properly placed (did not migrate) then scan slower. | 1615 | * First accesses are treated as private, otherwise consider accesses |
861 | * This is reset periodically in case of phase changes | 1616 | * to be private if the accessing pid has not changed |
862 | */ | 1617 | */ |
863 | if (!migrated) | 1618 | if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { |
864 | p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max, | 1619 | priv = 1; |
865 | p->numa_scan_period + jiffies_to_msecs(10)); | 1620 | } else { |
1621 | priv = cpupid_match_pid(p, last_cpupid); | ||
1622 | if (!priv && !(flags & TNF_NO_GROUP)) | ||
1623 | task_numa_group(p, last_cpupid, flags, &priv); | ||
1624 | } | ||
866 | 1625 | ||
867 | task_numa_placement(p); | 1626 | task_numa_placement(p); |
1627 | |||
1628 | /* | ||
1629 | * Retry task to preferred node migration periodically, in case it | ||
1630 | * case it previously failed, or the scheduler moved us. | ||
1631 | */ | ||
1632 | if (time_after(jiffies, p->numa_migrate_retry)) | ||
1633 | numa_migrate_preferred(p); | ||
1634 | |||
1635 | if (migrated) | ||
1636 | p->numa_pages_migrated += pages; | ||
1637 | |||
1638 | p->numa_faults_buffer[task_faults_idx(node, priv)] += pages; | ||
1639 | p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages; | ||
868 | } | 1640 | } |
869 | 1641 | ||
870 | static void reset_ptenuma_scan(struct task_struct *p) | 1642 | static void reset_ptenuma_scan(struct task_struct *p) |
@@ -884,6 +1656,7 @@ void task_numa_work(struct callback_head *work) | |||
884 | struct mm_struct *mm = p->mm; | 1656 | struct mm_struct *mm = p->mm; |
885 | struct vm_area_struct *vma; | 1657 | struct vm_area_struct *vma; |
886 | unsigned long start, end; | 1658 | unsigned long start, end; |
1659 | unsigned long nr_pte_updates = 0; | ||
887 | long pages; | 1660 | long pages; |
888 | 1661 | ||
889 | WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work)); | 1662 | WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work)); |
@@ -900,35 +1673,9 @@ void task_numa_work(struct callback_head *work) | |||
900 | if (p->flags & PF_EXITING) | 1673 | if (p->flags & PF_EXITING) |
901 | return; | 1674 | return; |
902 | 1675 | ||
903 | /* | 1676 | if (!mm->numa_next_scan) { |
904 | * We do not care about task placement until a task runs on a node | 1677 | mm->numa_next_scan = now + |
905 | * other than the first one used by the address space. This is | 1678 | msecs_to_jiffies(sysctl_numa_balancing_scan_delay); |
906 | * largely because migrations are driven by what CPU the task | ||
907 | * is running on. If it's never scheduled on another node, it'll | ||
908 | * not migrate so why bother trapping the fault. | ||
909 | */ | ||
910 | if (mm->first_nid == NUMA_PTE_SCAN_INIT) | ||
911 | mm->first_nid = numa_node_id(); | ||
912 | if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) { | ||
913 | /* Are we running on a new node yet? */ | ||
914 | if (numa_node_id() == mm->first_nid && | ||
915 | !sched_feat_numa(NUMA_FORCE)) | ||
916 | return; | ||
917 | |||
918 | mm->first_nid = NUMA_PTE_SCAN_ACTIVE; | ||
919 | } | ||
920 | |||
921 | /* | ||
922 | * Reset the scan period if enough time has gone by. Objective is that | ||
923 | * scanning will be reduced if pages are properly placed. As tasks | ||
924 | * can enter different phases this needs to be re-examined. Lacking | ||
925 | * proper tracking of reference behaviour, this blunt hammer is used. | ||
926 | */ | ||
927 | migrate = mm->numa_next_reset; | ||
928 | if (time_after(now, migrate)) { | ||
929 | p->numa_scan_period = sysctl_numa_balancing_scan_period_min; | ||
930 | next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset); | ||
931 | xchg(&mm->numa_next_reset, next_scan); | ||
932 | } | 1679 | } |
933 | 1680 | ||
934 | /* | 1681 | /* |
@@ -938,20 +1685,20 @@ void task_numa_work(struct callback_head *work) | |||
938 | if (time_before(now, migrate)) | 1685 | if (time_before(now, migrate)) |
939 | return; | 1686 | return; |
940 | 1687 | ||
941 | if (p->numa_scan_period == 0) | 1688 | if (p->numa_scan_period == 0) { |
942 | p->numa_scan_period = sysctl_numa_balancing_scan_period_min; | 1689 | p->numa_scan_period_max = task_scan_max(p); |
1690 | p->numa_scan_period = task_scan_min(p); | ||
1691 | } | ||
943 | 1692 | ||
944 | next_scan = now + msecs_to_jiffies(p->numa_scan_period); | 1693 | next_scan = now + msecs_to_jiffies(p->numa_scan_period); |
945 | if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) | 1694 | if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) |
946 | return; | 1695 | return; |
947 | 1696 | ||
948 | /* | 1697 | /* |
949 | * Do not set pte_numa if the current running node is rate-limited. | 1698 | * Delay this task enough that another task of this mm will likely win |
950 | * This loses statistics on the fault but if we are unwilling to | 1699 | * the next time around. |
951 | * migrate to this node, it is less likely we can do useful work | ||
952 | */ | 1700 | */ |
953 | if (migrate_ratelimited(numa_node_id())) | 1701 | p->node_stamp += 2 * TICK_NSEC; |
954 | return; | ||
955 | 1702 | ||
956 | start = mm->numa_scan_offset; | 1703 | start = mm->numa_scan_offset; |
957 | pages = sysctl_numa_balancing_scan_size; | 1704 | pages = sysctl_numa_balancing_scan_size; |
@@ -967,18 +1714,32 @@ void task_numa_work(struct callback_head *work) | |||
967 | vma = mm->mmap; | 1714 | vma = mm->mmap; |
968 | } | 1715 | } |
969 | for (; vma; vma = vma->vm_next) { | 1716 | for (; vma; vma = vma->vm_next) { |
970 | if (!vma_migratable(vma)) | 1717 | if (!vma_migratable(vma) || !vma_policy_mof(p, vma)) |
971 | continue; | 1718 | continue; |
972 | 1719 | ||
973 | /* Skip small VMAs. They are not likely to be of relevance */ | 1720 | /* |
974 | if (vma->vm_end - vma->vm_start < HPAGE_SIZE) | 1721 | * Shared library pages mapped by multiple processes are not |
1722 | * migrated as it is expected they are cache replicated. Avoid | ||
1723 | * hinting faults in read-only file-backed mappings or the vdso | ||
1724 | * as migrating the pages will be of marginal benefit. | ||
1725 | */ | ||
1726 | if (!vma->vm_mm || | ||
1727 | (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) | ||
975 | continue; | 1728 | continue; |
976 | 1729 | ||
977 | do { | 1730 | do { |
978 | start = max(start, vma->vm_start); | 1731 | start = max(start, vma->vm_start); |
979 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); | 1732 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); |
980 | end = min(end, vma->vm_end); | 1733 | end = min(end, vma->vm_end); |
981 | pages -= change_prot_numa(vma, start, end); | 1734 | nr_pte_updates += change_prot_numa(vma, start, end); |
1735 | |||
1736 | /* | ||
1737 | * Scan sysctl_numa_balancing_scan_size but ensure that | ||
1738 | * at least one PTE is updated so that unused virtual | ||
1739 | * address space is quickly skipped. | ||
1740 | */ | ||
1741 | if (nr_pte_updates) | ||
1742 | pages -= (end - start) >> PAGE_SHIFT; | ||
982 | 1743 | ||
983 | start = end; | 1744 | start = end; |
984 | if (pages <= 0) | 1745 | if (pages <= 0) |
@@ -988,10 +1749,10 @@ void task_numa_work(struct callback_head *work) | |||
988 | 1749 | ||
989 | out: | 1750 | out: |
990 | /* | 1751 | /* |
991 | * It is possible to reach the end of the VMA list but the last few VMAs are | 1752 | * It is possible to reach the end of the VMA list but the last few |
992 | * not guaranteed to the vma_migratable. If they are not, we would find the | 1753 | * VMAs are not guaranteed to the vma_migratable. If they are not, we |
993 | * !migratable VMA on the next scan but not reset the scanner to the start | 1754 | * would find the !migratable VMA on the next scan but not reset the |
994 | * so check it now. | 1755 | * scanner to the start so check it now. |
995 | */ | 1756 | */ |
996 | if (vma) | 1757 | if (vma) |
997 | mm->numa_scan_offset = start; | 1758 | mm->numa_scan_offset = start; |
@@ -1025,8 +1786,8 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) | |||
1025 | 1786 | ||
1026 | if (now - curr->node_stamp > period) { | 1787 | if (now - curr->node_stamp > period) { |
1027 | if (!curr->node_stamp) | 1788 | if (!curr->node_stamp) |
1028 | curr->numa_scan_period = sysctl_numa_balancing_scan_period_min; | 1789 | curr->numa_scan_period = task_scan_min(curr); |
1029 | curr->node_stamp = now; | 1790 | curr->node_stamp += period; |
1030 | 1791 | ||
1031 | if (!time_before(jiffies, curr->mm->numa_next_scan)) { | 1792 | if (!time_before(jiffies, curr->mm->numa_next_scan)) { |
1032 | init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */ | 1793 | init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */ |
@@ -1038,6 +1799,14 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) | |||
1038 | static void task_tick_numa(struct rq *rq, struct task_struct *curr) | 1799 | static void task_tick_numa(struct rq *rq, struct task_struct *curr) |
1039 | { | 1800 | { |
1040 | } | 1801 | } |
1802 | |||
1803 | static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) | ||
1804 | { | ||
1805 | } | ||
1806 | |||
1807 | static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) | ||
1808 | { | ||
1809 | } | ||
1041 | #endif /* CONFIG_NUMA_BALANCING */ | 1810 | #endif /* CONFIG_NUMA_BALANCING */ |
1042 | 1811 | ||
1043 | static void | 1812 | static void |
@@ -1047,8 +1816,12 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
1047 | if (!parent_entity(se)) | 1816 | if (!parent_entity(se)) |
1048 | update_load_add(&rq_of(cfs_rq)->load, se->load.weight); | 1817 | update_load_add(&rq_of(cfs_rq)->load, se->load.weight); |
1049 | #ifdef CONFIG_SMP | 1818 | #ifdef CONFIG_SMP |
1050 | if (entity_is_task(se)) | 1819 | if (entity_is_task(se)) { |
1051 | list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); | 1820 | struct rq *rq = rq_of(cfs_rq); |
1821 | |||
1822 | account_numa_enqueue(rq, task_of(se)); | ||
1823 | list_add(&se->group_node, &rq->cfs_tasks); | ||
1824 | } | ||
1052 | #endif | 1825 | #endif |
1053 | cfs_rq->nr_running++; | 1826 | cfs_rq->nr_running++; |
1054 | } | 1827 | } |
@@ -1059,8 +1832,10 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
1059 | update_load_sub(&cfs_rq->load, se->load.weight); | 1832 | update_load_sub(&cfs_rq->load, se->load.weight); |
1060 | if (!parent_entity(se)) | 1833 | if (!parent_entity(se)) |
1061 | update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); | 1834 | update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); |
1062 | if (entity_is_task(se)) | 1835 | if (entity_is_task(se)) { |
1836 | account_numa_dequeue(rq_of(cfs_rq), task_of(se)); | ||
1063 | list_del_init(&se->group_node); | 1837 | list_del_init(&se->group_node); |
1838 | } | ||
1064 | cfs_rq->nr_running--; | 1839 | cfs_rq->nr_running--; |
1065 | } | 1840 | } |
1066 | 1841 | ||
@@ -2070,13 +2845,14 @@ static inline bool cfs_bandwidth_used(void) | |||
2070 | return static_key_false(&__cfs_bandwidth_used); | 2845 | return static_key_false(&__cfs_bandwidth_used); |
2071 | } | 2846 | } |
2072 | 2847 | ||
2073 | void account_cfs_bandwidth_used(int enabled, int was_enabled) | 2848 | void cfs_bandwidth_usage_inc(void) |
2074 | { | 2849 | { |
2075 | /* only need to count groups transitioning between enabled/!enabled */ | 2850 | static_key_slow_inc(&__cfs_bandwidth_used); |
2076 | if (enabled && !was_enabled) | 2851 | } |
2077 | static_key_slow_inc(&__cfs_bandwidth_used); | 2852 | |
2078 | else if (!enabled && was_enabled) | 2853 | void cfs_bandwidth_usage_dec(void) |
2079 | static_key_slow_dec(&__cfs_bandwidth_used); | 2854 | { |
2855 | static_key_slow_dec(&__cfs_bandwidth_used); | ||
2080 | } | 2856 | } |
2081 | #else /* HAVE_JUMP_LABEL */ | 2857 | #else /* HAVE_JUMP_LABEL */ |
2082 | static bool cfs_bandwidth_used(void) | 2858 | static bool cfs_bandwidth_used(void) |
@@ -2084,7 +2860,8 @@ static bool cfs_bandwidth_used(void) | |||
2084 | return true; | 2860 | return true; |
2085 | } | 2861 | } |
2086 | 2862 | ||
2087 | void account_cfs_bandwidth_used(int enabled, int was_enabled) {} | 2863 | void cfs_bandwidth_usage_inc(void) {} |
2864 | void cfs_bandwidth_usage_dec(void) {} | ||
2088 | #endif /* HAVE_JUMP_LABEL */ | 2865 | #endif /* HAVE_JUMP_LABEL */ |
2089 | 2866 | ||
2090 | /* | 2867 | /* |
@@ -2335,6 +3112,8 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) | |||
2335 | cfs_rq->throttled_clock = rq_clock(rq); | 3112 | cfs_rq->throttled_clock = rq_clock(rq); |
2336 | raw_spin_lock(&cfs_b->lock); | 3113 | raw_spin_lock(&cfs_b->lock); |
2337 | list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); | 3114 | list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); |
3115 | if (!cfs_b->timer_active) | ||
3116 | __start_cfs_bandwidth(cfs_b); | ||
2338 | raw_spin_unlock(&cfs_b->lock); | 3117 | raw_spin_unlock(&cfs_b->lock); |
2339 | } | 3118 | } |
2340 | 3119 | ||
@@ -2448,6 +3227,13 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) | |||
2448 | if (idle) | 3227 | if (idle) |
2449 | goto out_unlock; | 3228 | goto out_unlock; |
2450 | 3229 | ||
3230 | /* | ||
3231 | * if we have relooped after returning idle once, we need to update our | ||
3232 | * status as actually running, so that other cpus doing | ||
3233 | * __start_cfs_bandwidth will stop trying to cancel us. | ||
3234 | */ | ||
3235 | cfs_b->timer_active = 1; | ||
3236 | |||
2451 | __refill_cfs_bandwidth_runtime(cfs_b); | 3237 | __refill_cfs_bandwidth_runtime(cfs_b); |
2452 | 3238 | ||
2453 | if (!throttled) { | 3239 | if (!throttled) { |
@@ -2508,7 +3294,13 @@ static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; | |||
2508 | /* how long we wait to gather additional slack before distributing */ | 3294 | /* how long we wait to gather additional slack before distributing */ |
2509 | static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; | 3295 | static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; |
2510 | 3296 | ||
2511 | /* are we near the end of the current quota period? */ | 3297 | /* |
3298 | * Are we near the end of the current quota period? | ||
3299 | * | ||
3300 | * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the | ||
3301 | * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of | ||
3302 | * migrate_hrtimers, base is never cleared, so we are fine. | ||
3303 | */ | ||
2512 | static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) | 3304 | static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) |
2513 | { | 3305 | { |
2514 | struct hrtimer *refresh_timer = &cfs_b->period_timer; | 3306 | struct hrtimer *refresh_timer = &cfs_b->period_timer; |
@@ -2584,10 +3376,12 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) | |||
2584 | u64 expires; | 3376 | u64 expires; |
2585 | 3377 | ||
2586 | /* confirm we're still not at a refresh boundary */ | 3378 | /* confirm we're still not at a refresh boundary */ |
2587 | if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) | 3379 | raw_spin_lock(&cfs_b->lock); |
3380 | if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { | ||
3381 | raw_spin_unlock(&cfs_b->lock); | ||
2588 | return; | 3382 | return; |
3383 | } | ||
2589 | 3384 | ||
2590 | raw_spin_lock(&cfs_b->lock); | ||
2591 | if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) { | 3385 | if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) { |
2592 | runtime = cfs_b->runtime; | 3386 | runtime = cfs_b->runtime; |
2593 | cfs_b->runtime = 0; | 3387 | cfs_b->runtime = 0; |
@@ -2708,11 +3502,11 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | |||
2708 | * (timer_active==0 becomes visible before the hrtimer call-back | 3502 | * (timer_active==0 becomes visible before the hrtimer call-back |
2709 | * terminates). In either case we ensure that it's re-programmed | 3503 | * terminates). In either case we ensure that it's re-programmed |
2710 | */ | 3504 | */ |
2711 | while (unlikely(hrtimer_active(&cfs_b->period_timer))) { | 3505 | while (unlikely(hrtimer_active(&cfs_b->period_timer)) && |
3506 | hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) { | ||
3507 | /* bounce the lock to allow do_sched_cfs_period_timer to run */ | ||
2712 | raw_spin_unlock(&cfs_b->lock); | 3508 | raw_spin_unlock(&cfs_b->lock); |
2713 | /* ensure cfs_b->lock is available while we wait */ | 3509 | cpu_relax(); |
2714 | hrtimer_cancel(&cfs_b->period_timer); | ||
2715 | |||
2716 | raw_spin_lock(&cfs_b->lock); | 3510 | raw_spin_lock(&cfs_b->lock); |
2717 | /* if someone else restarted the timer then we're done */ | 3511 | /* if someone else restarted the timer then we're done */ |
2718 | if (cfs_b->timer_active) | 3512 | if (cfs_b->timer_active) |
@@ -3113,7 +3907,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | |||
3113 | { | 3907 | { |
3114 | struct sched_entity *se = tg->se[cpu]; | 3908 | struct sched_entity *se = tg->se[cpu]; |
3115 | 3909 | ||
3116 | if (!tg->parent) /* the trivial, non-cgroup case */ | 3910 | if (!tg->parent || !wl) /* the trivial, non-cgroup case */ |
3117 | return wl; | 3911 | return wl; |
3118 | 3912 | ||
3119 | for_each_sched_entity(se) { | 3913 | for_each_sched_entity(se) { |
@@ -3166,8 +3960,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | |||
3166 | } | 3960 | } |
3167 | #else | 3961 | #else |
3168 | 3962 | ||
3169 | static inline unsigned long effective_load(struct task_group *tg, int cpu, | 3963 | static long effective_load(struct task_group *tg, int cpu, long wl, long wg) |
3170 | unsigned long wl, unsigned long wg) | ||
3171 | { | 3964 | { |
3172 | return wl; | 3965 | return wl; |
3173 | } | 3966 | } |
@@ -3420,11 +4213,10 @@ done: | |||
3420 | * preempt must be disabled. | 4213 | * preempt must be disabled. |
3421 | */ | 4214 | */ |
3422 | static int | 4215 | static int |
3423 | select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | 4216 | select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) |
3424 | { | 4217 | { |
3425 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; | 4218 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; |
3426 | int cpu = smp_processor_id(); | 4219 | int cpu = smp_processor_id(); |
3427 | int prev_cpu = task_cpu(p); | ||
3428 | int new_cpu = cpu; | 4220 | int new_cpu = cpu; |
3429 | int want_affine = 0; | 4221 | int want_affine = 0; |
3430 | int sync = wake_flags & WF_SYNC; | 4222 | int sync = wake_flags & WF_SYNC; |
@@ -3904,9 +4696,12 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp | |||
3904 | 4696 | ||
3905 | static unsigned long __read_mostly max_load_balance_interval = HZ/10; | 4697 | static unsigned long __read_mostly max_load_balance_interval = HZ/10; |
3906 | 4698 | ||
4699 | enum fbq_type { regular, remote, all }; | ||
4700 | |||
3907 | #define LBF_ALL_PINNED 0x01 | 4701 | #define LBF_ALL_PINNED 0x01 |
3908 | #define LBF_NEED_BREAK 0x02 | 4702 | #define LBF_NEED_BREAK 0x02 |
3909 | #define LBF_SOME_PINNED 0x04 | 4703 | #define LBF_DST_PINNED 0x04 |
4704 | #define LBF_SOME_PINNED 0x08 | ||
3910 | 4705 | ||
3911 | struct lb_env { | 4706 | struct lb_env { |
3912 | struct sched_domain *sd; | 4707 | struct sched_domain *sd; |
@@ -3929,6 +4724,8 @@ struct lb_env { | |||
3929 | unsigned int loop; | 4724 | unsigned int loop; |
3930 | unsigned int loop_break; | 4725 | unsigned int loop_break; |
3931 | unsigned int loop_max; | 4726 | unsigned int loop_max; |
4727 | |||
4728 | enum fbq_type fbq_type; | ||
3932 | }; | 4729 | }; |
3933 | 4730 | ||
3934 | /* | 4731 | /* |
@@ -3975,6 +4772,78 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
3975 | return delta < (s64)sysctl_sched_migration_cost; | 4772 | return delta < (s64)sysctl_sched_migration_cost; |
3976 | } | 4773 | } |
3977 | 4774 | ||
4775 | #ifdef CONFIG_NUMA_BALANCING | ||
4776 | /* Returns true if the destination node has incurred more faults */ | ||
4777 | static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env) | ||
4778 | { | ||
4779 | int src_nid, dst_nid; | ||
4780 | |||
4781 | if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults || | ||
4782 | !(env->sd->flags & SD_NUMA)) { | ||
4783 | return false; | ||
4784 | } | ||
4785 | |||
4786 | src_nid = cpu_to_node(env->src_cpu); | ||
4787 | dst_nid = cpu_to_node(env->dst_cpu); | ||
4788 | |||
4789 | if (src_nid == dst_nid) | ||
4790 | return false; | ||
4791 | |||
4792 | /* Always encourage migration to the preferred node. */ | ||
4793 | if (dst_nid == p->numa_preferred_nid) | ||
4794 | return true; | ||
4795 | |||
4796 | /* If both task and group weight improve, this move is a winner. */ | ||
4797 | if (task_weight(p, dst_nid) > task_weight(p, src_nid) && | ||
4798 | group_weight(p, dst_nid) > group_weight(p, src_nid)) | ||
4799 | return true; | ||
4800 | |||
4801 | return false; | ||
4802 | } | ||
4803 | |||
4804 | |||
4805 | static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env) | ||
4806 | { | ||
4807 | int src_nid, dst_nid; | ||
4808 | |||
4809 | if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER)) | ||
4810 | return false; | ||
4811 | |||
4812 | if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) | ||
4813 | return false; | ||
4814 | |||
4815 | src_nid = cpu_to_node(env->src_cpu); | ||
4816 | dst_nid = cpu_to_node(env->dst_cpu); | ||
4817 | |||
4818 | if (src_nid == dst_nid) | ||
4819 | return false; | ||
4820 | |||
4821 | /* Migrating away from the preferred node is always bad. */ | ||
4822 | if (src_nid == p->numa_preferred_nid) | ||
4823 | return true; | ||
4824 | |||
4825 | /* If either task or group weight get worse, don't do it. */ | ||
4826 | if (task_weight(p, dst_nid) < task_weight(p, src_nid) || | ||
4827 | group_weight(p, dst_nid) < group_weight(p, src_nid)) | ||
4828 | return true; | ||
4829 | |||
4830 | return false; | ||
4831 | } | ||
4832 | |||
4833 | #else | ||
4834 | static inline bool migrate_improves_locality(struct task_struct *p, | ||
4835 | struct lb_env *env) | ||
4836 | { | ||
4837 | return false; | ||
4838 | } | ||
4839 | |||
4840 | static inline bool migrate_degrades_locality(struct task_struct *p, | ||
4841 | struct lb_env *env) | ||
4842 | { | ||
4843 | return false; | ||
4844 | } | ||
4845 | #endif | ||
4846 | |||
3978 | /* | 4847 | /* |
3979 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? | 4848 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? |
3980 | */ | 4849 | */ |
@@ -3997,6 +4866,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) | |||
3997 | 4866 | ||
3998 | schedstat_inc(p, se.statistics.nr_failed_migrations_affine); | 4867 | schedstat_inc(p, se.statistics.nr_failed_migrations_affine); |
3999 | 4868 | ||
4869 | env->flags |= LBF_SOME_PINNED; | ||
4870 | |||
4000 | /* | 4871 | /* |
4001 | * Remember if this task can be migrated to any other cpu in | 4872 | * Remember if this task can be migrated to any other cpu in |
4002 | * our sched_group. We may want to revisit it if we couldn't | 4873 | * our sched_group. We may want to revisit it if we couldn't |
@@ -4005,13 +4876,13 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) | |||
4005 | * Also avoid computing new_dst_cpu if we have already computed | 4876 | * Also avoid computing new_dst_cpu if we have already computed |
4006 | * one in current iteration. | 4877 | * one in current iteration. |
4007 | */ | 4878 | */ |
4008 | if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED)) | 4879 | if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED)) |
4009 | return 0; | 4880 | return 0; |
4010 | 4881 | ||
4011 | /* Prevent to re-select dst_cpu via env's cpus */ | 4882 | /* Prevent to re-select dst_cpu via env's cpus */ |
4012 | for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { | 4883 | for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { |
4013 | if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) { | 4884 | if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) { |
4014 | env->flags |= LBF_SOME_PINNED; | 4885 | env->flags |= LBF_DST_PINNED; |
4015 | env->new_dst_cpu = cpu; | 4886 | env->new_dst_cpu = cpu; |
4016 | break; | 4887 | break; |
4017 | } | 4888 | } |
@@ -4030,11 +4901,24 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) | |||
4030 | 4901 | ||
4031 | /* | 4902 | /* |
4032 | * Aggressive migration if: | 4903 | * Aggressive migration if: |
4033 | * 1) task is cache cold, or | 4904 | * 1) destination numa is preferred |
4034 | * 2) too many balance attempts have failed. | 4905 | * 2) task is cache cold, or |
4906 | * 3) too many balance attempts have failed. | ||
4035 | */ | 4907 | */ |
4036 | |||
4037 | tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd); | 4908 | tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd); |
4909 | if (!tsk_cache_hot) | ||
4910 | tsk_cache_hot = migrate_degrades_locality(p, env); | ||
4911 | |||
4912 | if (migrate_improves_locality(p, env)) { | ||
4913 | #ifdef CONFIG_SCHEDSTATS | ||
4914 | if (tsk_cache_hot) { | ||
4915 | schedstat_inc(env->sd, lb_hot_gained[env->idle]); | ||
4916 | schedstat_inc(p, se.statistics.nr_forced_migrations); | ||
4917 | } | ||
4918 | #endif | ||
4919 | return 1; | ||
4920 | } | ||
4921 | |||
4038 | if (!tsk_cache_hot || | 4922 | if (!tsk_cache_hot || |
4039 | env->sd->nr_balance_failed > env->sd->cache_nice_tries) { | 4923 | env->sd->nr_balance_failed > env->sd->cache_nice_tries) { |
4040 | 4924 | ||
@@ -4077,8 +4961,6 @@ static int move_one_task(struct lb_env *env) | |||
4077 | return 0; | 4961 | return 0; |
4078 | } | 4962 | } |
4079 | 4963 | ||
4080 | static unsigned long task_h_load(struct task_struct *p); | ||
4081 | |||
4082 | static const unsigned int sched_nr_migrate_break = 32; | 4964 | static const unsigned int sched_nr_migrate_break = 32; |
4083 | 4965 | ||
4084 | /* | 4966 | /* |
@@ -4291,6 +5173,10 @@ struct sg_lb_stats { | |||
4291 | unsigned int group_weight; | 5173 | unsigned int group_weight; |
4292 | int group_imb; /* Is there an imbalance in the group ? */ | 5174 | int group_imb; /* Is there an imbalance in the group ? */ |
4293 | int group_has_capacity; /* Is there extra capacity in the group? */ | 5175 | int group_has_capacity; /* Is there extra capacity in the group? */ |
5176 | #ifdef CONFIG_NUMA_BALANCING | ||
5177 | unsigned int nr_numa_running; | ||
5178 | unsigned int nr_preferred_running; | ||
5179 | #endif | ||
4294 | }; | 5180 | }; |
4295 | 5181 | ||
4296 | /* | 5182 | /* |
@@ -4330,7 +5216,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds) | |||
4330 | /** | 5216 | /** |
4331 | * get_sd_load_idx - Obtain the load index for a given sched domain. | 5217 | * get_sd_load_idx - Obtain the load index for a given sched domain. |
4332 | * @sd: The sched_domain whose load_idx is to be obtained. | 5218 | * @sd: The sched_domain whose load_idx is to be obtained. |
4333 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. | 5219 | * @idle: The idle status of the CPU for whose sd load_idx is obtained. |
4334 | * | 5220 | * |
4335 | * Return: The load index. | 5221 | * Return: The load index. |
4336 | */ | 5222 | */ |
@@ -4447,7 +5333,7 @@ void update_group_power(struct sched_domain *sd, int cpu) | |||
4447 | { | 5333 | { |
4448 | struct sched_domain *child = sd->child; | 5334 | struct sched_domain *child = sd->child; |
4449 | struct sched_group *group, *sdg = sd->groups; | 5335 | struct sched_group *group, *sdg = sd->groups; |
4450 | unsigned long power; | 5336 | unsigned long power, power_orig; |
4451 | unsigned long interval; | 5337 | unsigned long interval; |
4452 | 5338 | ||
4453 | interval = msecs_to_jiffies(sd->balance_interval); | 5339 | interval = msecs_to_jiffies(sd->balance_interval); |
@@ -4459,7 +5345,7 @@ void update_group_power(struct sched_domain *sd, int cpu) | |||
4459 | return; | 5345 | return; |
4460 | } | 5346 | } |
4461 | 5347 | ||
4462 | power = 0; | 5348 | power_orig = power = 0; |
4463 | 5349 | ||
4464 | if (child->flags & SD_OVERLAP) { | 5350 | if (child->flags & SD_OVERLAP) { |
4465 | /* | 5351 | /* |
@@ -4467,8 +5353,12 @@ void update_group_power(struct sched_domain *sd, int cpu) | |||
4467 | * span the current group. | 5353 | * span the current group. |
4468 | */ | 5354 | */ |
4469 | 5355 | ||
4470 | for_each_cpu(cpu, sched_group_cpus(sdg)) | 5356 | for_each_cpu(cpu, sched_group_cpus(sdg)) { |
4471 | power += power_of(cpu); | 5357 | struct sched_group *sg = cpu_rq(cpu)->sd->groups; |
5358 | |||
5359 | power_orig += sg->sgp->power_orig; | ||
5360 | power += sg->sgp->power; | ||
5361 | } | ||
4472 | } else { | 5362 | } else { |
4473 | /* | 5363 | /* |
4474 | * !SD_OVERLAP domains can assume that child groups | 5364 | * !SD_OVERLAP domains can assume that child groups |
@@ -4477,12 +5367,14 @@ void update_group_power(struct sched_domain *sd, int cpu) | |||
4477 | 5367 | ||
4478 | group = child->groups; | 5368 | group = child->groups; |
4479 | do { | 5369 | do { |
5370 | power_orig += group->sgp->power_orig; | ||
4480 | power += group->sgp->power; | 5371 | power += group->sgp->power; |
4481 | group = group->next; | 5372 | group = group->next; |
4482 | } while (group != child->groups); | 5373 | } while (group != child->groups); |
4483 | } | 5374 | } |
4484 | 5375 | ||
4485 | sdg->sgp->power_orig = sdg->sgp->power = power; | 5376 | sdg->sgp->power_orig = power_orig; |
5377 | sdg->sgp->power = power; | ||
4486 | } | 5378 | } |
4487 | 5379 | ||
4488 | /* | 5380 | /* |
@@ -4526,13 +5418,12 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | |||
4526 | * cpu 3 and leave one of the cpus in the second group unused. | 5418 | * cpu 3 and leave one of the cpus in the second group unused. |
4527 | * | 5419 | * |
4528 | * The current solution to this issue is detecting the skew in the first group | 5420 | * The current solution to this issue is detecting the skew in the first group |
4529 | * by noticing it has a cpu that is overloaded while the remaining cpus are | 5421 | * by noticing the lower domain failed to reach balance and had difficulty |
4530 | * idle -- or rather, there's a distinct imbalance in the cpus; see | 5422 | * moving tasks due to affinity constraints. |
4531 | * sg_imbalanced(). | ||
4532 | * | 5423 | * |
4533 | * When this is so detected; this group becomes a candidate for busiest; see | 5424 | * When this is so detected; this group becomes a candidate for busiest; see |
4534 | * update_sd_pick_busiest(). And calculcate_imbalance() and | 5425 | * update_sd_pick_busiest(). And calculate_imbalance() and |
4535 | * find_busiest_group() avoid some of the usual balance conditional to allow it | 5426 | * find_busiest_group() avoid some of the usual balance conditions to allow it |
4536 | * to create an effective group imbalance. | 5427 | * to create an effective group imbalance. |
4537 | * | 5428 | * |
4538 | * This is a somewhat tricky proposition since the next run might not find the | 5429 | * This is a somewhat tricky proposition since the next run might not find the |
@@ -4540,49 +5431,36 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | |||
4540 | * subtle and fragile situation. | 5431 | * subtle and fragile situation. |
4541 | */ | 5432 | */ |
4542 | 5433 | ||
4543 | struct sg_imb_stats { | 5434 | static inline int sg_imbalanced(struct sched_group *group) |
4544 | unsigned long max_nr_running, min_nr_running; | ||
4545 | unsigned long max_cpu_load, min_cpu_load; | ||
4546 | }; | ||
4547 | |||
4548 | static inline void init_sg_imb_stats(struct sg_imb_stats *sgi) | ||
4549 | { | 5435 | { |
4550 | sgi->max_cpu_load = sgi->max_nr_running = 0UL; | 5436 | return group->sgp->imbalance; |
4551 | sgi->min_cpu_load = sgi->min_nr_running = ~0UL; | ||
4552 | } | 5437 | } |
4553 | 5438 | ||
4554 | static inline void | 5439 | /* |
4555 | update_sg_imb_stats(struct sg_imb_stats *sgi, | 5440 | * Compute the group capacity. |
4556 | unsigned long load, unsigned long nr_running) | 5441 | * |
5442 | * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by | ||
5443 | * first dividing out the smt factor and computing the actual number of cores | ||
5444 | * and limit power unit capacity with that. | ||
5445 | */ | ||
5446 | static inline int sg_capacity(struct lb_env *env, struct sched_group *group) | ||
4557 | { | 5447 | { |
4558 | if (load > sgi->max_cpu_load) | 5448 | unsigned int capacity, smt, cpus; |
4559 | sgi->max_cpu_load = load; | 5449 | unsigned int power, power_orig; |
4560 | if (sgi->min_cpu_load > load) | ||
4561 | sgi->min_cpu_load = load; | ||
4562 | 5450 | ||
4563 | if (nr_running > sgi->max_nr_running) | 5451 | power = group->sgp->power; |
4564 | sgi->max_nr_running = nr_running; | 5452 | power_orig = group->sgp->power_orig; |
4565 | if (sgi->min_nr_running > nr_running) | 5453 | cpus = group->group_weight; |
4566 | sgi->min_nr_running = nr_running; | ||
4567 | } | ||
4568 | 5454 | ||
4569 | static inline int | 5455 | /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */ |
4570 | sg_imbalanced(struct sg_lb_stats *sgs, struct sg_imb_stats *sgi) | 5456 | smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig); |
4571 | { | 5457 | capacity = cpus / smt; /* cores */ |
4572 | /* | ||
4573 | * Consider the group unbalanced when the imbalance is larger | ||
4574 | * than the average weight of a task. | ||
4575 | * | ||
4576 | * APZ: with cgroup the avg task weight can vary wildly and | ||
4577 | * might not be a suitable number - should we keep a | ||
4578 | * normalized nr_running number somewhere that negates | ||
4579 | * the hierarchy? | ||
4580 | */ | ||
4581 | if ((sgi->max_cpu_load - sgi->min_cpu_load) >= sgs->load_per_task && | ||
4582 | (sgi->max_nr_running - sgi->min_nr_running) > 1) | ||
4583 | return 1; | ||
4584 | 5458 | ||
4585 | return 0; | 5459 | capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE)); |
5460 | if (!capacity) | ||
5461 | capacity = fix_small_capacity(env->sd, group); | ||
5462 | |||
5463 | return capacity; | ||
4586 | } | 5464 | } |
4587 | 5465 | ||
4588 | /** | 5466 | /** |
@@ -4597,12 +5475,11 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
4597 | struct sched_group *group, int load_idx, | 5475 | struct sched_group *group, int load_idx, |
4598 | int local_group, struct sg_lb_stats *sgs) | 5476 | int local_group, struct sg_lb_stats *sgs) |
4599 | { | 5477 | { |
4600 | struct sg_imb_stats sgi; | ||
4601 | unsigned long nr_running; | 5478 | unsigned long nr_running; |
4602 | unsigned long load; | 5479 | unsigned long load; |
4603 | int i; | 5480 | int i; |
4604 | 5481 | ||
4605 | init_sg_imb_stats(&sgi); | 5482 | memset(sgs, 0, sizeof(*sgs)); |
4606 | 5483 | ||
4607 | for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { | 5484 | for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { |
4608 | struct rq *rq = cpu_rq(i); | 5485 | struct rq *rq = cpu_rq(i); |
@@ -4610,24 +5487,22 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
4610 | nr_running = rq->nr_running; | 5487 | nr_running = rq->nr_running; |
4611 | 5488 | ||
4612 | /* Bias balancing toward cpus of our domain */ | 5489 | /* Bias balancing toward cpus of our domain */ |
4613 | if (local_group) { | 5490 | if (local_group) |
4614 | load = target_load(i, load_idx); | 5491 | load = target_load(i, load_idx); |
4615 | } else { | 5492 | else |
4616 | load = source_load(i, load_idx); | 5493 | load = source_load(i, load_idx); |
4617 | update_sg_imb_stats(&sgi, load, nr_running); | ||
4618 | } | ||
4619 | 5494 | ||
4620 | sgs->group_load += load; | 5495 | sgs->group_load += load; |
4621 | sgs->sum_nr_running += nr_running; | 5496 | sgs->sum_nr_running += nr_running; |
5497 | #ifdef CONFIG_NUMA_BALANCING | ||
5498 | sgs->nr_numa_running += rq->nr_numa_running; | ||
5499 | sgs->nr_preferred_running += rq->nr_preferred_running; | ||
5500 | #endif | ||
4622 | sgs->sum_weighted_load += weighted_cpuload(i); | 5501 | sgs->sum_weighted_load += weighted_cpuload(i); |
4623 | if (idle_cpu(i)) | 5502 | if (idle_cpu(i)) |
4624 | sgs->idle_cpus++; | 5503 | sgs->idle_cpus++; |
4625 | } | 5504 | } |
4626 | 5505 | ||
4627 | if (local_group && (env->idle != CPU_NEWLY_IDLE || | ||
4628 | time_after_eq(jiffies, group->sgp->next_update))) | ||
4629 | update_group_power(env->sd, env->dst_cpu); | ||
4630 | |||
4631 | /* Adjust by relative CPU power of the group */ | 5506 | /* Adjust by relative CPU power of the group */ |
4632 | sgs->group_power = group->sgp->power; | 5507 | sgs->group_power = group->sgp->power; |
4633 | sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power; | 5508 | sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power; |
@@ -4635,16 +5510,11 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
4635 | if (sgs->sum_nr_running) | 5510 | if (sgs->sum_nr_running) |
4636 | sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; | 5511 | sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; |
4637 | 5512 | ||
4638 | sgs->group_imb = sg_imbalanced(sgs, &sgi); | ||
4639 | |||
4640 | sgs->group_capacity = | ||
4641 | DIV_ROUND_CLOSEST(sgs->group_power, SCHED_POWER_SCALE); | ||
4642 | |||
4643 | if (!sgs->group_capacity) | ||
4644 | sgs->group_capacity = fix_small_capacity(env->sd, group); | ||
4645 | |||
4646 | sgs->group_weight = group->group_weight; | 5513 | sgs->group_weight = group->group_weight; |
4647 | 5514 | ||
5515 | sgs->group_imb = sg_imbalanced(group); | ||
5516 | sgs->group_capacity = sg_capacity(env, group); | ||
5517 | |||
4648 | if (sgs->group_capacity > sgs->sum_nr_running) | 5518 | if (sgs->group_capacity > sgs->sum_nr_running) |
4649 | sgs->group_has_capacity = 1; | 5519 | sgs->group_has_capacity = 1; |
4650 | } | 5520 | } |
@@ -4693,14 +5563,42 @@ static bool update_sd_pick_busiest(struct lb_env *env, | |||
4693 | return false; | 5563 | return false; |
4694 | } | 5564 | } |
4695 | 5565 | ||
5566 | #ifdef CONFIG_NUMA_BALANCING | ||
5567 | static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) | ||
5568 | { | ||
5569 | if (sgs->sum_nr_running > sgs->nr_numa_running) | ||
5570 | return regular; | ||
5571 | if (sgs->sum_nr_running > sgs->nr_preferred_running) | ||
5572 | return remote; | ||
5573 | return all; | ||
5574 | } | ||
5575 | |||
5576 | static inline enum fbq_type fbq_classify_rq(struct rq *rq) | ||
5577 | { | ||
5578 | if (rq->nr_running > rq->nr_numa_running) | ||
5579 | return regular; | ||
5580 | if (rq->nr_running > rq->nr_preferred_running) | ||
5581 | return remote; | ||
5582 | return all; | ||
5583 | } | ||
5584 | #else | ||
5585 | static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) | ||
5586 | { | ||
5587 | return all; | ||
5588 | } | ||
5589 | |||
5590 | static inline enum fbq_type fbq_classify_rq(struct rq *rq) | ||
5591 | { | ||
5592 | return regular; | ||
5593 | } | ||
5594 | #endif /* CONFIG_NUMA_BALANCING */ | ||
5595 | |||
4696 | /** | 5596 | /** |
4697 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. | 5597 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. |
4698 | * @env: The load balancing environment. | 5598 | * @env: The load balancing environment. |
4699 | * @balance: Should we balance. | ||
4700 | * @sds: variable to hold the statistics for this sched_domain. | 5599 | * @sds: variable to hold the statistics for this sched_domain. |
4701 | */ | 5600 | */ |
4702 | static inline void update_sd_lb_stats(struct lb_env *env, | 5601 | static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) |
4703 | struct sd_lb_stats *sds) | ||
4704 | { | 5602 | { |
4705 | struct sched_domain *child = env->sd->child; | 5603 | struct sched_domain *child = env->sd->child; |
4706 | struct sched_group *sg = env->sd->groups; | 5604 | struct sched_group *sg = env->sd->groups; |
@@ -4720,11 +5618,17 @@ static inline void update_sd_lb_stats(struct lb_env *env, | |||
4720 | if (local_group) { | 5618 | if (local_group) { |
4721 | sds->local = sg; | 5619 | sds->local = sg; |
4722 | sgs = &sds->local_stat; | 5620 | sgs = &sds->local_stat; |
5621 | |||
5622 | if (env->idle != CPU_NEWLY_IDLE || | ||
5623 | time_after_eq(jiffies, sg->sgp->next_update)) | ||
5624 | update_group_power(env->sd, env->dst_cpu); | ||
4723 | } | 5625 | } |
4724 | 5626 | ||
4725 | memset(sgs, 0, sizeof(*sgs)); | ||
4726 | update_sg_lb_stats(env, sg, load_idx, local_group, sgs); | 5627 | update_sg_lb_stats(env, sg, load_idx, local_group, sgs); |
4727 | 5628 | ||
5629 | if (local_group) | ||
5630 | goto next_group; | ||
5631 | |||
4728 | /* | 5632 | /* |
4729 | * In case the child domain prefers tasks go to siblings | 5633 | * In case the child domain prefers tasks go to siblings |
4730 | * first, lower the sg capacity to one so that we'll try | 5634 | * first, lower the sg capacity to one so that we'll try |
@@ -4735,21 +5639,25 @@ static inline void update_sd_lb_stats(struct lb_env *env, | |||
4735 | * heaviest group when it is already under-utilized (possible | 5639 | * heaviest group when it is already under-utilized (possible |
4736 | * with a large weight task outweighs the tasks on the system). | 5640 | * with a large weight task outweighs the tasks on the system). |
4737 | */ | 5641 | */ |
4738 | if (prefer_sibling && !local_group && | 5642 | if (prefer_sibling && sds->local && |
4739 | sds->local && sds->local_stat.group_has_capacity) | 5643 | sds->local_stat.group_has_capacity) |
4740 | sgs->group_capacity = min(sgs->group_capacity, 1U); | 5644 | sgs->group_capacity = min(sgs->group_capacity, 1U); |
4741 | 5645 | ||
4742 | /* Now, start updating sd_lb_stats */ | 5646 | if (update_sd_pick_busiest(env, sds, sg, sgs)) { |
4743 | sds->total_load += sgs->group_load; | ||
4744 | sds->total_pwr += sgs->group_power; | ||
4745 | |||
4746 | if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) { | ||
4747 | sds->busiest = sg; | 5647 | sds->busiest = sg; |
4748 | sds->busiest_stat = *sgs; | 5648 | sds->busiest_stat = *sgs; |
4749 | } | 5649 | } |
4750 | 5650 | ||
5651 | next_group: | ||
5652 | /* Now, start updating sd_lb_stats */ | ||
5653 | sds->total_load += sgs->group_load; | ||
5654 | sds->total_pwr += sgs->group_power; | ||
5655 | |||
4751 | sg = sg->next; | 5656 | sg = sg->next; |
4752 | } while (sg != env->sd->groups); | 5657 | } while (sg != env->sd->groups); |
5658 | |||
5659 | if (env->sd->flags & SD_NUMA) | ||
5660 | env->fbq_type = fbq_classify_group(&sds->busiest_stat); | ||
4753 | } | 5661 | } |
4754 | 5662 | ||
4755 | /** | 5663 | /** |
@@ -5053,15 +5961,39 @@ static struct rq *find_busiest_queue(struct lb_env *env, | |||
5053 | int i; | 5961 | int i; |
5054 | 5962 | ||
5055 | for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { | 5963 | for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { |
5056 | unsigned long power = power_of(i); | 5964 | unsigned long power, capacity, wl; |
5057 | unsigned long capacity = DIV_ROUND_CLOSEST(power, | 5965 | enum fbq_type rt; |
5058 | SCHED_POWER_SCALE); | ||
5059 | unsigned long wl; | ||
5060 | 5966 | ||
5967 | rq = cpu_rq(i); | ||
5968 | rt = fbq_classify_rq(rq); | ||
5969 | |||
5970 | /* | ||
5971 | * We classify groups/runqueues into three groups: | ||
5972 | * - regular: there are !numa tasks | ||
5973 | * - remote: there are numa tasks that run on the 'wrong' node | ||
5974 | * - all: there is no distinction | ||
5975 | * | ||
5976 | * In order to avoid migrating ideally placed numa tasks, | ||
5977 | * ignore those when there's better options. | ||
5978 | * | ||
5979 | * If we ignore the actual busiest queue to migrate another | ||
5980 | * task, the next balance pass can still reduce the busiest | ||
5981 | * queue by moving tasks around inside the node. | ||
5982 | * | ||
5983 | * If we cannot move enough load due to this classification | ||
5984 | * the next pass will adjust the group classification and | ||
5985 | * allow migration of more tasks. | ||
5986 | * | ||
5987 | * Both cases only affect the total convergence complexity. | ||
5988 | */ | ||
5989 | if (rt > env->fbq_type) | ||
5990 | continue; | ||
5991 | |||
5992 | power = power_of(i); | ||
5993 | capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE); | ||
5061 | if (!capacity) | 5994 | if (!capacity) |
5062 | capacity = fix_small_capacity(env->sd, group); | 5995 | capacity = fix_small_capacity(env->sd, group); |
5063 | 5996 | ||
5064 | rq = cpu_rq(i); | ||
5065 | wl = weighted_cpuload(i); | 5997 | wl = weighted_cpuload(i); |
5066 | 5998 | ||
5067 | /* | 5999 | /* |
@@ -5164,6 +6096,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
5164 | int *continue_balancing) | 6096 | int *continue_balancing) |
5165 | { | 6097 | { |
5166 | int ld_moved, cur_ld_moved, active_balance = 0; | 6098 | int ld_moved, cur_ld_moved, active_balance = 0; |
6099 | struct sched_domain *sd_parent = sd->parent; | ||
5167 | struct sched_group *group; | 6100 | struct sched_group *group; |
5168 | struct rq *busiest; | 6101 | struct rq *busiest; |
5169 | unsigned long flags; | 6102 | unsigned long flags; |
@@ -5177,6 +6110,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
5177 | .idle = idle, | 6110 | .idle = idle, |
5178 | .loop_break = sched_nr_migrate_break, | 6111 | .loop_break = sched_nr_migrate_break, |
5179 | .cpus = cpus, | 6112 | .cpus = cpus, |
6113 | .fbq_type = all, | ||
5180 | }; | 6114 | }; |
5181 | 6115 | ||
5182 | /* | 6116 | /* |
@@ -5268,17 +6202,17 @@ more_balance: | |||
5268 | * moreover subsequent load balance cycles should correct the | 6202 | * moreover subsequent load balance cycles should correct the |
5269 | * excess load moved. | 6203 | * excess load moved. |
5270 | */ | 6204 | */ |
5271 | if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) { | 6205 | if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { |
6206 | |||
6207 | /* Prevent to re-select dst_cpu via env's cpus */ | ||
6208 | cpumask_clear_cpu(env.dst_cpu, env.cpus); | ||
5272 | 6209 | ||
5273 | env.dst_rq = cpu_rq(env.new_dst_cpu); | 6210 | env.dst_rq = cpu_rq(env.new_dst_cpu); |
5274 | env.dst_cpu = env.new_dst_cpu; | 6211 | env.dst_cpu = env.new_dst_cpu; |
5275 | env.flags &= ~LBF_SOME_PINNED; | 6212 | env.flags &= ~LBF_DST_PINNED; |
5276 | env.loop = 0; | 6213 | env.loop = 0; |
5277 | env.loop_break = sched_nr_migrate_break; | 6214 | env.loop_break = sched_nr_migrate_break; |
5278 | 6215 | ||
5279 | /* Prevent to re-select dst_cpu via env's cpus */ | ||
5280 | cpumask_clear_cpu(env.dst_cpu, env.cpus); | ||
5281 | |||
5282 | /* | 6216 | /* |
5283 | * Go back to "more_balance" rather than "redo" since we | 6217 | * Go back to "more_balance" rather than "redo" since we |
5284 | * need to continue with same src_cpu. | 6218 | * need to continue with same src_cpu. |
@@ -5286,6 +6220,18 @@ more_balance: | |||
5286 | goto more_balance; | 6220 | goto more_balance; |
5287 | } | 6221 | } |
5288 | 6222 | ||
6223 | /* | ||
6224 | * We failed to reach balance because of affinity. | ||
6225 | */ | ||
6226 | if (sd_parent) { | ||
6227 | int *group_imbalance = &sd_parent->groups->sgp->imbalance; | ||
6228 | |||
6229 | if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) { | ||
6230 | *group_imbalance = 1; | ||
6231 | } else if (*group_imbalance) | ||
6232 | *group_imbalance = 0; | ||
6233 | } | ||
6234 | |||
5289 | /* All tasks on this runqueue were pinned by CPU affinity */ | 6235 | /* All tasks on this runqueue were pinned by CPU affinity */ |
5290 | if (unlikely(env.flags & LBF_ALL_PINNED)) { | 6236 | if (unlikely(env.flags & LBF_ALL_PINNED)) { |
5291 | cpumask_clear_cpu(cpu_of(busiest), cpus); | 6237 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
@@ -5393,6 +6339,7 @@ void idle_balance(int this_cpu, struct rq *this_rq) | |||
5393 | struct sched_domain *sd; | 6339 | struct sched_domain *sd; |
5394 | int pulled_task = 0; | 6340 | int pulled_task = 0; |
5395 | unsigned long next_balance = jiffies + HZ; | 6341 | unsigned long next_balance = jiffies + HZ; |
6342 | u64 curr_cost = 0; | ||
5396 | 6343 | ||
5397 | this_rq->idle_stamp = rq_clock(this_rq); | 6344 | this_rq->idle_stamp = rq_clock(this_rq); |
5398 | 6345 | ||
@@ -5409,15 +6356,27 @@ void idle_balance(int this_cpu, struct rq *this_rq) | |||
5409 | for_each_domain(this_cpu, sd) { | 6356 | for_each_domain(this_cpu, sd) { |
5410 | unsigned long interval; | 6357 | unsigned long interval; |
5411 | int continue_balancing = 1; | 6358 | int continue_balancing = 1; |
6359 | u64 t0, domain_cost; | ||
5412 | 6360 | ||
5413 | if (!(sd->flags & SD_LOAD_BALANCE)) | 6361 | if (!(sd->flags & SD_LOAD_BALANCE)) |
5414 | continue; | 6362 | continue; |
5415 | 6363 | ||
6364 | if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) | ||
6365 | break; | ||
6366 | |||
5416 | if (sd->flags & SD_BALANCE_NEWIDLE) { | 6367 | if (sd->flags & SD_BALANCE_NEWIDLE) { |
6368 | t0 = sched_clock_cpu(this_cpu); | ||
6369 | |||
5417 | /* If we've pulled tasks over stop searching: */ | 6370 | /* If we've pulled tasks over stop searching: */ |
5418 | pulled_task = load_balance(this_cpu, this_rq, | 6371 | pulled_task = load_balance(this_cpu, this_rq, |
5419 | sd, CPU_NEWLY_IDLE, | 6372 | sd, CPU_NEWLY_IDLE, |
5420 | &continue_balancing); | 6373 | &continue_balancing); |
6374 | |||
6375 | domain_cost = sched_clock_cpu(this_cpu) - t0; | ||
6376 | if (domain_cost > sd->max_newidle_lb_cost) | ||
6377 | sd->max_newidle_lb_cost = domain_cost; | ||
6378 | |||
6379 | curr_cost += domain_cost; | ||
5421 | } | 6380 | } |
5422 | 6381 | ||
5423 | interval = msecs_to_jiffies(sd->balance_interval); | 6382 | interval = msecs_to_jiffies(sd->balance_interval); |
@@ -5439,6 +6398,9 @@ void idle_balance(int this_cpu, struct rq *this_rq) | |||
5439 | */ | 6398 | */ |
5440 | this_rq->next_balance = next_balance; | 6399 | this_rq->next_balance = next_balance; |
5441 | } | 6400 | } |
6401 | |||
6402 | if (curr_cost > this_rq->max_idle_balance_cost) | ||
6403 | this_rq->max_idle_balance_cost = curr_cost; | ||
5442 | } | 6404 | } |
5443 | 6405 | ||
5444 | /* | 6406 | /* |
@@ -5662,15 +6624,39 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
5662 | /* Earliest time when we have to do rebalance again */ | 6624 | /* Earliest time when we have to do rebalance again */ |
5663 | unsigned long next_balance = jiffies + 60*HZ; | 6625 | unsigned long next_balance = jiffies + 60*HZ; |
5664 | int update_next_balance = 0; | 6626 | int update_next_balance = 0; |
5665 | int need_serialize; | 6627 | int need_serialize, need_decay = 0; |
6628 | u64 max_cost = 0; | ||
5666 | 6629 | ||
5667 | update_blocked_averages(cpu); | 6630 | update_blocked_averages(cpu); |
5668 | 6631 | ||
5669 | rcu_read_lock(); | 6632 | rcu_read_lock(); |
5670 | for_each_domain(cpu, sd) { | 6633 | for_each_domain(cpu, sd) { |
6634 | /* | ||
6635 | * Decay the newidle max times here because this is a regular | ||
6636 | * visit to all the domains. Decay ~1% per second. | ||
6637 | */ | ||
6638 | if (time_after(jiffies, sd->next_decay_max_lb_cost)) { | ||
6639 | sd->max_newidle_lb_cost = | ||
6640 | (sd->max_newidle_lb_cost * 253) / 256; | ||
6641 | sd->next_decay_max_lb_cost = jiffies + HZ; | ||
6642 | need_decay = 1; | ||
6643 | } | ||
6644 | max_cost += sd->max_newidle_lb_cost; | ||
6645 | |||
5671 | if (!(sd->flags & SD_LOAD_BALANCE)) | 6646 | if (!(sd->flags & SD_LOAD_BALANCE)) |
5672 | continue; | 6647 | continue; |
5673 | 6648 | ||
6649 | /* | ||
6650 | * Stop the load balance at this level. There is another | ||
6651 | * CPU in our sched group which is doing load balancing more | ||
6652 | * actively. | ||
6653 | */ | ||
6654 | if (!continue_balancing) { | ||
6655 | if (need_decay) | ||
6656 | continue; | ||
6657 | break; | ||
6658 | } | ||
6659 | |||
5674 | interval = sd->balance_interval; | 6660 | interval = sd->balance_interval; |
5675 | if (idle != CPU_IDLE) | 6661 | if (idle != CPU_IDLE) |
5676 | interval *= sd->busy_factor; | 6662 | interval *= sd->busy_factor; |
@@ -5689,7 +6675,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
5689 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | 6675 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
5690 | if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { | 6676 | if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { |
5691 | /* | 6677 | /* |
5692 | * The LBF_SOME_PINNED logic could have changed | 6678 | * The LBF_DST_PINNED logic could have changed |
5693 | * env->dst_cpu, so we can't know our idle | 6679 | * env->dst_cpu, so we can't know our idle |
5694 | * state even if we migrated tasks. Update it. | 6680 | * state even if we migrated tasks. Update it. |
5695 | */ | 6681 | */ |
@@ -5704,14 +6690,14 @@ out: | |||
5704 | next_balance = sd->last_balance + interval; | 6690 | next_balance = sd->last_balance + interval; |
5705 | update_next_balance = 1; | 6691 | update_next_balance = 1; |
5706 | } | 6692 | } |
5707 | 6693 | } | |
6694 | if (need_decay) { | ||
5708 | /* | 6695 | /* |
5709 | * Stop the load balance at this level. There is another | 6696 | * Ensure the rq-wide value also decays but keep it at a |
5710 | * CPU in our sched group which is doing load balancing more | 6697 | * reasonable floor to avoid funnies with rq->avg_idle. |
5711 | * actively. | ||
5712 | */ | 6698 | */ |
5713 | if (!continue_balancing) | 6699 | rq->max_idle_balance_cost = |
5714 | break; | 6700 | max((u64)sysctl_sched_migration_cost, max_cost); |
5715 | } | 6701 | } |
5716 | rcu_read_unlock(); | 6702 | rcu_read_unlock(); |
5717 | 6703 | ||
@@ -6214,7 +7200,8 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, | |||
6214 | se->cfs_rq = parent->my_q; | 7200 | se->cfs_rq = parent->my_q; |
6215 | 7201 | ||
6216 | se->my_q = cfs_rq; | 7202 | se->my_q = cfs_rq; |
6217 | update_load_set(&se->load, 0); | 7203 | /* guarantee group entities always have weight */ |
7204 | update_load_set(&se->load, NICE_0_LOAD); | ||
6218 | se->parent = parent; | 7205 | se->parent = parent; |
6219 | } | 7206 | } |
6220 | 7207 | ||
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 99399f8e4799..5716929a2e3a 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h | |||
@@ -63,10 +63,23 @@ SCHED_FEAT(LB_MIN, false) | |||
63 | /* | 63 | /* |
64 | * Apply the automatic NUMA scheduling policy. Enabled automatically | 64 | * Apply the automatic NUMA scheduling policy. Enabled automatically |
65 | * at runtime if running on a NUMA machine. Can be controlled via | 65 | * at runtime if running on a NUMA machine. Can be controlled via |
66 | * numa_balancing=. Allow PTE scanning to be forced on UMA machines | 66 | * numa_balancing= |
67 | * for debugging the core machinery. | ||
68 | */ | 67 | */ |
69 | #ifdef CONFIG_NUMA_BALANCING | 68 | #ifdef CONFIG_NUMA_BALANCING |
70 | SCHED_FEAT(NUMA, false) | 69 | SCHED_FEAT(NUMA, false) |
71 | SCHED_FEAT(NUMA_FORCE, false) | 70 | |
71 | /* | ||
72 | * NUMA_FAVOUR_HIGHER will favor moving tasks towards nodes where a | ||
73 | * higher number of hinting faults are recorded during active load | ||
74 | * balancing. | ||
75 | */ | ||
76 | SCHED_FEAT(NUMA_FAVOUR_HIGHER, true) | ||
77 | |||
78 | /* | ||
79 | * NUMA_RESIST_LOWER will resist moving tasks towards nodes where a | ||
80 | * lower number of hinting faults have been recorded. As this has | ||
81 | * the potential to prevent a task ever migrating to a new node | ||
82 | * due to CPU overload it is disabled by default. | ||
83 | */ | ||
84 | SCHED_FEAT(NUMA_RESIST_LOWER, false) | ||
72 | #endif | 85 | #endif |
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index d8da01008d39..516c3d9ceea1 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c | |||
@@ -9,7 +9,7 @@ | |||
9 | 9 | ||
10 | #ifdef CONFIG_SMP | 10 | #ifdef CONFIG_SMP |
11 | static int | 11 | static int |
12 | select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) | 12 | select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) |
13 | { | 13 | { |
14 | return task_cpu(p); /* IDLE tasks as never migrated */ | 14 | return task_cpu(p); /* IDLE tasks as never migrated */ |
15 | } | 15 | } |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 01970c8e64df..7d57275fc396 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -246,8 +246,10 @@ static inline void rt_set_overload(struct rq *rq) | |||
246 | * if we should look at the mask. It would be a shame | 246 | * if we should look at the mask. It would be a shame |
247 | * if we looked at the mask, but the mask was not | 247 | * if we looked at the mask, but the mask was not |
248 | * updated yet. | 248 | * updated yet. |
249 | * | ||
250 | * Matched by the barrier in pull_rt_task(). | ||
249 | */ | 251 | */ |
250 | wmb(); | 252 | smp_wmb(); |
251 | atomic_inc(&rq->rd->rto_count); | 253 | atomic_inc(&rq->rd->rto_count); |
252 | } | 254 | } |
253 | 255 | ||
@@ -1169,13 +1171,10 @@ static void yield_task_rt(struct rq *rq) | |||
1169 | static int find_lowest_rq(struct task_struct *task); | 1171 | static int find_lowest_rq(struct task_struct *task); |
1170 | 1172 | ||
1171 | static int | 1173 | static int |
1172 | select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | 1174 | select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) |
1173 | { | 1175 | { |
1174 | struct task_struct *curr; | 1176 | struct task_struct *curr; |
1175 | struct rq *rq; | 1177 | struct rq *rq; |
1176 | int cpu; | ||
1177 | |||
1178 | cpu = task_cpu(p); | ||
1179 | 1178 | ||
1180 | if (p->nr_cpus_allowed == 1) | 1179 | if (p->nr_cpus_allowed == 1) |
1181 | goto out; | 1180 | goto out; |
@@ -1213,8 +1212,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
1213 | */ | 1212 | */ |
1214 | if (curr && unlikely(rt_task(curr)) && | 1213 | if (curr && unlikely(rt_task(curr)) && |
1215 | (curr->nr_cpus_allowed < 2 || | 1214 | (curr->nr_cpus_allowed < 2 || |
1216 | curr->prio <= p->prio) && | 1215 | curr->prio <= p->prio)) { |
1217 | (p->nr_cpus_allowed > 1)) { | ||
1218 | int target = find_lowest_rq(p); | 1216 | int target = find_lowest_rq(p); |
1219 | 1217 | ||
1220 | if (target != -1) | 1218 | if (target != -1) |
@@ -1630,6 +1628,12 @@ static int pull_rt_task(struct rq *this_rq) | |||
1630 | if (likely(!rt_overloaded(this_rq))) | 1628 | if (likely(!rt_overloaded(this_rq))) |
1631 | return 0; | 1629 | return 0; |
1632 | 1630 | ||
1631 | /* | ||
1632 | * Match the barrier from rt_set_overloaded; this guarantees that if we | ||
1633 | * see overloaded we must also see the rto_mask bit. | ||
1634 | */ | ||
1635 | smp_rmb(); | ||
1636 | |||
1633 | for_each_cpu(cpu, this_rq->rd->rto_mask) { | 1637 | for_each_cpu(cpu, this_rq->rd->rto_mask) { |
1634 | if (this_cpu == cpu) | 1638 | if (this_cpu == cpu) |
1635 | continue; | 1639 | continue; |
@@ -1931,8 +1935,8 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
1931 | p->rt.time_slice = sched_rr_timeslice; | 1935 | p->rt.time_slice = sched_rr_timeslice; |
1932 | 1936 | ||
1933 | /* | 1937 | /* |
1934 | * Requeue to the end of queue if we (and all of our ancestors) are the | 1938 | * Requeue to the end of queue if we (and all of our ancestors) are not |
1935 | * only element on the queue | 1939 | * the only element on the queue |
1936 | */ | 1940 | */ |
1937 | for_each_sched_rt_entity(rt_se) { | 1941 | for_each_sched_rt_entity(rt_se) { |
1938 | if (rt_se->run_list.prev != rt_se->run_list.next) { | 1942 | if (rt_se->run_list.prev != rt_se->run_list.next) { |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b3c5653e1dca..4e650acffed7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
7 | #include <linux/stop_machine.h> | 7 | #include <linux/stop_machine.h> |
8 | #include <linux/tick.h> | 8 | #include <linux/tick.h> |
9 | #include <linux/slab.h> | ||
9 | 10 | ||
10 | #include "cpupri.h" | 11 | #include "cpupri.h" |
11 | #include "cpuacct.h" | 12 | #include "cpuacct.h" |
@@ -408,6 +409,10 @@ struct rq { | |||
408 | * remote CPUs use both these fields when doing load calculation. | 409 | * remote CPUs use both these fields when doing load calculation. |
409 | */ | 410 | */ |
410 | unsigned int nr_running; | 411 | unsigned int nr_running; |
412 | #ifdef CONFIG_NUMA_BALANCING | ||
413 | unsigned int nr_numa_running; | ||
414 | unsigned int nr_preferred_running; | ||
415 | #endif | ||
411 | #define CPU_LOAD_IDX_MAX 5 | 416 | #define CPU_LOAD_IDX_MAX 5 |
412 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 417 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
413 | unsigned long last_load_update_tick; | 418 | unsigned long last_load_update_tick; |
@@ -476,6 +481,9 @@ struct rq { | |||
476 | u64 age_stamp; | 481 | u64 age_stamp; |
477 | u64 idle_stamp; | 482 | u64 idle_stamp; |
478 | u64 avg_idle; | 483 | u64 avg_idle; |
484 | |||
485 | /* This is used to determine avg_idle's max value */ | ||
486 | u64 max_idle_balance_cost; | ||
479 | #endif | 487 | #endif |
480 | 488 | ||
481 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | 489 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
@@ -552,6 +560,12 @@ static inline u64 rq_clock_task(struct rq *rq) | |||
552 | return rq->clock_task; | 560 | return rq->clock_task; |
553 | } | 561 | } |
554 | 562 | ||
563 | #ifdef CONFIG_NUMA_BALANCING | ||
564 | extern void sched_setnuma(struct task_struct *p, int node); | ||
565 | extern int migrate_task_to(struct task_struct *p, int cpu); | ||
566 | extern int migrate_swap(struct task_struct *, struct task_struct *); | ||
567 | #endif /* CONFIG_NUMA_BALANCING */ | ||
568 | |||
555 | #ifdef CONFIG_SMP | 569 | #ifdef CONFIG_SMP |
556 | 570 | ||
557 | #define rcu_dereference_check_sched_domain(p) \ | 571 | #define rcu_dereference_check_sched_domain(p) \ |
@@ -593,9 +607,22 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag) | |||
593 | return hsd; | 607 | return hsd; |
594 | } | 608 | } |
595 | 609 | ||
610 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) | ||
611 | { | ||
612 | struct sched_domain *sd; | ||
613 | |||
614 | for_each_domain(cpu, sd) { | ||
615 | if (sd->flags & flag) | ||
616 | break; | ||
617 | } | ||
618 | |||
619 | return sd; | ||
620 | } | ||
621 | |||
596 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); | 622 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
597 | DECLARE_PER_CPU(int, sd_llc_size); | 623 | DECLARE_PER_CPU(int, sd_llc_size); |
598 | DECLARE_PER_CPU(int, sd_llc_id); | 624 | DECLARE_PER_CPU(int, sd_llc_id); |
625 | DECLARE_PER_CPU(struct sched_domain *, sd_numa); | ||
599 | 626 | ||
600 | struct sched_group_power { | 627 | struct sched_group_power { |
601 | atomic_t ref; | 628 | atomic_t ref; |
@@ -605,6 +632,7 @@ struct sched_group_power { | |||
605 | */ | 632 | */ |
606 | unsigned int power, power_orig; | 633 | unsigned int power, power_orig; |
607 | unsigned long next_update; | 634 | unsigned long next_update; |
635 | int imbalance; /* XXX unrelated to power but shared group state */ | ||
608 | /* | 636 | /* |
609 | * Number of busy cpus in this group. | 637 | * Number of busy cpus in this group. |
610 | */ | 638 | */ |
@@ -719,6 +747,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
719 | */ | 747 | */ |
720 | smp_wmb(); | 748 | smp_wmb(); |
721 | task_thread_info(p)->cpu = cpu; | 749 | task_thread_info(p)->cpu = cpu; |
750 | p->wake_cpu = cpu; | ||
722 | #endif | 751 | #endif |
723 | } | 752 | } |
724 | 753 | ||
@@ -974,7 +1003,7 @@ struct sched_class { | |||
974 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1003 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
975 | 1004 | ||
976 | #ifdef CONFIG_SMP | 1005 | #ifdef CONFIG_SMP |
977 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | 1006 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
978 | void (*migrate_task_rq)(struct task_struct *p, int next_cpu); | 1007 | void (*migrate_task_rq)(struct task_struct *p, int next_cpu); |
979 | 1008 | ||
980 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1009 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
@@ -1220,6 +1249,24 @@ static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | |||
1220 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | 1249 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
1221 | } | 1250 | } |
1222 | 1251 | ||
1252 | static inline void double_lock(spinlock_t *l1, spinlock_t *l2) | ||
1253 | { | ||
1254 | if (l1 > l2) | ||
1255 | swap(l1, l2); | ||
1256 | |||
1257 | spin_lock(l1); | ||
1258 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); | ||
1259 | } | ||
1260 | |||
1261 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) | ||
1262 | { | ||
1263 | if (l1 > l2) | ||
1264 | swap(l1, l2); | ||
1265 | |||
1266 | raw_spin_lock(l1); | ||
1267 | raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); | ||
1268 | } | ||
1269 | |||
1223 | /* | 1270 | /* |
1224 | * double_rq_lock - safely lock two runqueues | 1271 | * double_rq_lock - safely lock two runqueues |
1225 | * | 1272 | * |
@@ -1305,7 +1352,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu); | |||
1305 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); | 1352 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
1306 | extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); | 1353 | extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); |
1307 | 1354 | ||
1308 | extern void account_cfs_bandwidth_used(int enabled, int was_enabled); | 1355 | extern void cfs_bandwidth_usage_inc(void); |
1356 | extern void cfs_bandwidth_usage_dec(void); | ||
1309 | 1357 | ||
1310 | #ifdef CONFIG_NO_HZ_COMMON | 1358 | #ifdef CONFIG_NO_HZ_COMMON |
1311 | enum rq_nohz_flag_bits { | 1359 | enum rq_nohz_flag_bits { |
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index c7edee71bce8..4ab704339656 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h | |||
@@ -59,9 +59,9 @@ static inline void sched_info_reset_dequeued(struct task_struct *t) | |||
59 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | 59 | * from dequeue_task() to account for possible rq->clock skew across cpus. The |
60 | * delta taken on each cpu would annul the skew. | 60 | * delta taken on each cpu would annul the skew. |
61 | */ | 61 | */ |
62 | static inline void sched_info_dequeued(struct task_struct *t) | 62 | static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) |
63 | { | 63 | { |
64 | unsigned long long now = rq_clock(task_rq(t)), delta = 0; | 64 | unsigned long long now = rq_clock(rq), delta = 0; |
65 | 65 | ||
66 | if (unlikely(sched_info_on())) | 66 | if (unlikely(sched_info_on())) |
67 | if (t->sched_info.last_queued) | 67 | if (t->sched_info.last_queued) |
@@ -69,7 +69,7 @@ static inline void sched_info_dequeued(struct task_struct *t) | |||
69 | sched_info_reset_dequeued(t); | 69 | sched_info_reset_dequeued(t); |
70 | t->sched_info.run_delay += delta; | 70 | t->sched_info.run_delay += delta; |
71 | 71 | ||
72 | rq_sched_info_dequeued(task_rq(t), delta); | 72 | rq_sched_info_dequeued(rq, delta); |
73 | } | 73 | } |
74 | 74 | ||
75 | /* | 75 | /* |
@@ -77,9 +77,9 @@ static inline void sched_info_dequeued(struct task_struct *t) | |||
77 | * long it was waiting to run. We also note when it began so that we | 77 | * long it was waiting to run. We also note when it began so that we |
78 | * can keep stats on how long its timeslice is. | 78 | * can keep stats on how long its timeslice is. |
79 | */ | 79 | */ |
80 | static void sched_info_arrive(struct task_struct *t) | 80 | static void sched_info_arrive(struct rq *rq, struct task_struct *t) |
81 | { | 81 | { |
82 | unsigned long long now = rq_clock(task_rq(t)), delta = 0; | 82 | unsigned long long now = rq_clock(rq), delta = 0; |
83 | 83 | ||
84 | if (t->sched_info.last_queued) | 84 | if (t->sched_info.last_queued) |
85 | delta = now - t->sched_info.last_queued; | 85 | delta = now - t->sched_info.last_queued; |
@@ -88,7 +88,7 @@ static void sched_info_arrive(struct task_struct *t) | |||
88 | t->sched_info.last_arrival = now; | 88 | t->sched_info.last_arrival = now; |
89 | t->sched_info.pcount++; | 89 | t->sched_info.pcount++; |
90 | 90 | ||
91 | rq_sched_info_arrive(task_rq(t), delta); | 91 | rq_sched_info_arrive(rq, delta); |
92 | } | 92 | } |
93 | 93 | ||
94 | /* | 94 | /* |
@@ -96,11 +96,11 @@ static void sched_info_arrive(struct task_struct *t) | |||
96 | * the timestamp if it is already not set. It's assumed that | 96 | * the timestamp if it is already not set. It's assumed that |
97 | * sched_info_dequeued() will clear that stamp when appropriate. | 97 | * sched_info_dequeued() will clear that stamp when appropriate. |
98 | */ | 98 | */ |
99 | static inline void sched_info_queued(struct task_struct *t) | 99 | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) |
100 | { | 100 | { |
101 | if (unlikely(sched_info_on())) | 101 | if (unlikely(sched_info_on())) |
102 | if (!t->sched_info.last_queued) | 102 | if (!t->sched_info.last_queued) |
103 | t->sched_info.last_queued = rq_clock(task_rq(t)); | 103 | t->sched_info.last_queued = rq_clock(rq); |
104 | } | 104 | } |
105 | 105 | ||
106 | /* | 106 | /* |
@@ -111,15 +111,15 @@ static inline void sched_info_queued(struct task_struct *t) | |||
111 | * sched_info_queued() to mark that it has now again started waiting on | 111 | * sched_info_queued() to mark that it has now again started waiting on |
112 | * the runqueue. | 112 | * the runqueue. |
113 | */ | 113 | */ |
114 | static inline void sched_info_depart(struct task_struct *t) | 114 | static inline void sched_info_depart(struct rq *rq, struct task_struct *t) |
115 | { | 115 | { |
116 | unsigned long long delta = rq_clock(task_rq(t)) - | 116 | unsigned long long delta = rq_clock(rq) - |
117 | t->sched_info.last_arrival; | 117 | t->sched_info.last_arrival; |
118 | 118 | ||
119 | rq_sched_info_depart(task_rq(t), delta); | 119 | rq_sched_info_depart(rq, delta); |
120 | 120 | ||
121 | if (t->state == TASK_RUNNING) | 121 | if (t->state == TASK_RUNNING) |
122 | sched_info_queued(t); | 122 | sched_info_queued(rq, t); |
123 | } | 123 | } |
124 | 124 | ||
125 | /* | 125 | /* |
@@ -128,32 +128,34 @@ static inline void sched_info_depart(struct task_struct *t) | |||
128 | * the idle task.) We are only called when prev != next. | 128 | * the idle task.) We are only called when prev != next. |
129 | */ | 129 | */ |
130 | static inline void | 130 | static inline void |
131 | __sched_info_switch(struct task_struct *prev, struct task_struct *next) | 131 | __sched_info_switch(struct rq *rq, |
132 | struct task_struct *prev, struct task_struct *next) | ||
132 | { | 133 | { |
133 | struct rq *rq = task_rq(prev); | ||
134 | |||
135 | /* | 134 | /* |
136 | * prev now departs the cpu. It's not interesting to record | 135 | * prev now departs the cpu. It's not interesting to record |
137 | * stats about how efficient we were at scheduling the idle | 136 | * stats about how efficient we were at scheduling the idle |
138 | * process, however. | 137 | * process, however. |
139 | */ | 138 | */ |
140 | if (prev != rq->idle) | 139 | if (prev != rq->idle) |
141 | sched_info_depart(prev); | 140 | sched_info_depart(rq, prev); |
142 | 141 | ||
143 | if (next != rq->idle) | 142 | if (next != rq->idle) |
144 | sched_info_arrive(next); | 143 | sched_info_arrive(rq, next); |
145 | } | 144 | } |
146 | static inline void | 145 | static inline void |
147 | sched_info_switch(struct task_struct *prev, struct task_struct *next) | 146 | sched_info_switch(struct rq *rq, |
147 | struct task_struct *prev, struct task_struct *next) | ||
148 | { | 148 | { |
149 | if (unlikely(sched_info_on())) | 149 | if (unlikely(sched_info_on())) |
150 | __sched_info_switch(prev, next); | 150 | __sched_info_switch(rq, prev, next); |
151 | } | 151 | } |
152 | #else | 152 | #else |
153 | #define sched_info_queued(t) do { } while (0) | 153 | #define sched_info_queued(rq, t) do { } while (0) |
154 | #define sched_info_reset_dequeued(t) do { } while (0) | 154 | #define sched_info_reset_dequeued(t) do { } while (0) |
155 | #define sched_info_dequeued(t) do { } while (0) | 155 | #define sched_info_dequeued(rq, t) do { } while (0) |
156 | #define sched_info_switch(t, next) do { } while (0) | 156 | #define sched_info_depart(rq, t) do { } while (0) |
157 | #define sched_info_arrive(rq, next) do { } while (0) | ||
158 | #define sched_info_switch(rq, t, next) do { } while (0) | ||
157 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ | 159 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ |
158 | 160 | ||
159 | /* | 161 | /* |
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index e08fbeeb54b9..47197de8abd9 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #ifdef CONFIG_SMP | 12 | #ifdef CONFIG_SMP |
13 | static int | 13 | static int |
14 | select_task_rq_stop(struct task_struct *p, int sd_flag, int flags) | 14 | select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags) |
15 | { | 15 | { |
16 | return task_cpu(p); /* stop tasks as never migrate */ | 16 | return task_cpu(p); /* stop tasks as never migrate */ |
17 | } | 17 | } |
diff --git a/kernel/softirq.c b/kernel/softirq.c index d7d498d8cc4f..dcab1d3fb53d 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -100,13 +100,13 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) | |||
100 | 100 | ||
101 | raw_local_irq_save(flags); | 101 | raw_local_irq_save(flags); |
102 | /* | 102 | /* |
103 | * The preempt tracer hooks into add_preempt_count and will break | 103 | * The preempt tracer hooks into preempt_count_add and will break |
104 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET | 104 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET |
105 | * is set and before current->softirq_enabled is cleared. | 105 | * is set and before current->softirq_enabled is cleared. |
106 | * We must manually increment preempt_count here and manually | 106 | * We must manually increment preempt_count here and manually |
107 | * call the trace_preempt_off later. | 107 | * call the trace_preempt_off later. |
108 | */ | 108 | */ |
109 | preempt_count() += cnt; | 109 | __preempt_count_add(cnt); |
110 | /* | 110 | /* |
111 | * Were softirqs turned off above: | 111 | * Were softirqs turned off above: |
112 | */ | 112 | */ |
@@ -120,7 +120,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) | |||
120 | #else /* !CONFIG_TRACE_IRQFLAGS */ | 120 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
121 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) | 121 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) |
122 | { | 122 | { |
123 | add_preempt_count(cnt); | 123 | preempt_count_add(cnt); |
124 | barrier(); | 124 | barrier(); |
125 | } | 125 | } |
126 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 126 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
@@ -139,7 +139,7 @@ static void __local_bh_enable(unsigned int cnt) | |||
139 | 139 | ||
140 | if (softirq_count() == cnt) | 140 | if (softirq_count() == cnt) |
141 | trace_softirqs_on(_RET_IP_); | 141 | trace_softirqs_on(_RET_IP_); |
142 | sub_preempt_count(cnt); | 142 | preempt_count_sub(cnt); |
143 | } | 143 | } |
144 | 144 | ||
145 | /* | 145 | /* |
@@ -169,12 +169,12 @@ static inline void _local_bh_enable_ip(unsigned long ip) | |||
169 | * Keep preemption disabled until we are done with | 169 | * Keep preemption disabled until we are done with |
170 | * softirq processing: | 170 | * softirq processing: |
171 | */ | 171 | */ |
172 | sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); | 172 | preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1); |
173 | 173 | ||
174 | if (unlikely(!in_interrupt() && local_softirq_pending())) | 174 | if (unlikely(!in_interrupt() && local_softirq_pending())) |
175 | do_softirq(); | 175 | do_softirq(); |
176 | 176 | ||
177 | dec_preempt_count(); | 177 | preempt_count_dec(); |
178 | #ifdef CONFIG_TRACE_IRQFLAGS | 178 | #ifdef CONFIG_TRACE_IRQFLAGS |
179 | local_irq_enable(); | 179 | local_irq_enable(); |
180 | #endif | 180 | #endif |
@@ -256,7 +256,7 @@ restart: | |||
256 | " exited with %08x?\n", vec_nr, | 256 | " exited with %08x?\n", vec_nr, |
257 | softirq_to_name[vec_nr], h->action, | 257 | softirq_to_name[vec_nr], h->action, |
258 | prev_count, preempt_count()); | 258 | prev_count, preempt_count()); |
259 | preempt_count() = prev_count; | 259 | preempt_count_set(prev_count); |
260 | } | 260 | } |
261 | 261 | ||
262 | rcu_bh_qs(cpu); | 262 | rcu_bh_qs(cpu); |
@@ -369,7 +369,7 @@ void irq_exit(void) | |||
369 | 369 | ||
370 | account_irq_exit_time(current); | 370 | account_irq_exit_time(current); |
371 | trace_hardirq_exit(); | 371 | trace_hardirq_exit(); |
372 | sub_preempt_count(HARDIRQ_OFFSET); | 372 | preempt_count_sub(HARDIRQ_OFFSET); |
373 | if (!in_interrupt() && local_softirq_pending()) | 373 | if (!in_interrupt() && local_softirq_pending()) |
374 | invoke_softirq(); | 374 | invoke_softirq(); |
375 | 375 | ||
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index c09f2955ae30..c530bc5be7cf 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -115,6 +115,182 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) | |||
115 | return done.executed ? done.ret : -ENOENT; | 115 | return done.executed ? done.ret : -ENOENT; |
116 | } | 116 | } |
117 | 117 | ||
118 | /* This controls the threads on each CPU. */ | ||
119 | enum multi_stop_state { | ||
120 | /* Dummy starting state for thread. */ | ||
121 | MULTI_STOP_NONE, | ||
122 | /* Awaiting everyone to be scheduled. */ | ||
123 | MULTI_STOP_PREPARE, | ||
124 | /* Disable interrupts. */ | ||
125 | MULTI_STOP_DISABLE_IRQ, | ||
126 | /* Run the function */ | ||
127 | MULTI_STOP_RUN, | ||
128 | /* Exit */ | ||
129 | MULTI_STOP_EXIT, | ||
130 | }; | ||
131 | |||
132 | struct multi_stop_data { | ||
133 | int (*fn)(void *); | ||
134 | void *data; | ||
135 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ | ||
136 | unsigned int num_threads; | ||
137 | const struct cpumask *active_cpus; | ||
138 | |||
139 | enum multi_stop_state state; | ||
140 | atomic_t thread_ack; | ||
141 | }; | ||
142 | |||
143 | static void set_state(struct multi_stop_data *msdata, | ||
144 | enum multi_stop_state newstate) | ||
145 | { | ||
146 | /* Reset ack counter. */ | ||
147 | atomic_set(&msdata->thread_ack, msdata->num_threads); | ||
148 | smp_wmb(); | ||
149 | msdata->state = newstate; | ||
150 | } | ||
151 | |||
152 | /* Last one to ack a state moves to the next state. */ | ||
153 | static void ack_state(struct multi_stop_data *msdata) | ||
154 | { | ||
155 | if (atomic_dec_and_test(&msdata->thread_ack)) | ||
156 | set_state(msdata, msdata->state + 1); | ||
157 | } | ||
158 | |||
159 | /* This is the cpu_stop function which stops the CPU. */ | ||
160 | static int multi_cpu_stop(void *data) | ||
161 | { | ||
162 | struct multi_stop_data *msdata = data; | ||
163 | enum multi_stop_state curstate = MULTI_STOP_NONE; | ||
164 | int cpu = smp_processor_id(), err = 0; | ||
165 | unsigned long flags; | ||
166 | bool is_active; | ||
167 | |||
168 | /* | ||
169 | * When called from stop_machine_from_inactive_cpu(), irq might | ||
170 | * already be disabled. Save the state and restore it on exit. | ||
171 | */ | ||
172 | local_save_flags(flags); | ||
173 | |||
174 | if (!msdata->active_cpus) | ||
175 | is_active = cpu == cpumask_first(cpu_online_mask); | ||
176 | else | ||
177 | is_active = cpumask_test_cpu(cpu, msdata->active_cpus); | ||
178 | |||
179 | /* Simple state machine */ | ||
180 | do { | ||
181 | /* Chill out and ensure we re-read multi_stop_state. */ | ||
182 | cpu_relax(); | ||
183 | if (msdata->state != curstate) { | ||
184 | curstate = msdata->state; | ||
185 | switch (curstate) { | ||
186 | case MULTI_STOP_DISABLE_IRQ: | ||
187 | local_irq_disable(); | ||
188 | hard_irq_disable(); | ||
189 | break; | ||
190 | case MULTI_STOP_RUN: | ||
191 | if (is_active) | ||
192 | err = msdata->fn(msdata->data); | ||
193 | break; | ||
194 | default: | ||
195 | break; | ||
196 | } | ||
197 | ack_state(msdata); | ||
198 | } | ||
199 | } while (curstate != MULTI_STOP_EXIT); | ||
200 | |||
201 | local_irq_restore(flags); | ||
202 | return err; | ||
203 | } | ||
204 | |||
205 | struct irq_cpu_stop_queue_work_info { | ||
206 | int cpu1; | ||
207 | int cpu2; | ||
208 | struct cpu_stop_work *work1; | ||
209 | struct cpu_stop_work *work2; | ||
210 | }; | ||
211 | |||
212 | /* | ||
213 | * This function is always run with irqs and preemption disabled. | ||
214 | * This guarantees that both work1 and work2 get queued, before | ||
215 | * our local migrate thread gets the chance to preempt us. | ||
216 | */ | ||
217 | static void irq_cpu_stop_queue_work(void *arg) | ||
218 | { | ||
219 | struct irq_cpu_stop_queue_work_info *info = arg; | ||
220 | cpu_stop_queue_work(info->cpu1, info->work1); | ||
221 | cpu_stop_queue_work(info->cpu2, info->work2); | ||
222 | } | ||
223 | |||
224 | /** | ||
225 | * stop_two_cpus - stops two cpus | ||
226 | * @cpu1: the cpu to stop | ||
227 | * @cpu2: the other cpu to stop | ||
228 | * @fn: function to execute | ||
229 | * @arg: argument to @fn | ||
230 | * | ||
231 | * Stops both the current and specified CPU and runs @fn on one of them. | ||
232 | * | ||
233 | * returns when both are completed. | ||
234 | */ | ||
235 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) | ||
236 | { | ||
237 | struct cpu_stop_done done; | ||
238 | struct cpu_stop_work work1, work2; | ||
239 | struct irq_cpu_stop_queue_work_info call_args; | ||
240 | struct multi_stop_data msdata; | ||
241 | |||
242 | preempt_disable(); | ||
243 | msdata = (struct multi_stop_data){ | ||
244 | .fn = fn, | ||
245 | .data = arg, | ||
246 | .num_threads = 2, | ||
247 | .active_cpus = cpumask_of(cpu1), | ||
248 | }; | ||
249 | |||
250 | work1 = work2 = (struct cpu_stop_work){ | ||
251 | .fn = multi_cpu_stop, | ||
252 | .arg = &msdata, | ||
253 | .done = &done | ||
254 | }; | ||
255 | |||
256 | call_args = (struct irq_cpu_stop_queue_work_info){ | ||
257 | .cpu1 = cpu1, | ||
258 | .cpu2 = cpu2, | ||
259 | .work1 = &work1, | ||
260 | .work2 = &work2, | ||
261 | }; | ||
262 | |||
263 | cpu_stop_init_done(&done, 2); | ||
264 | set_state(&msdata, MULTI_STOP_PREPARE); | ||
265 | |||
266 | /* | ||
267 | * If we observe both CPUs active we know _cpu_down() cannot yet have | ||
268 | * queued its stop_machine works and therefore ours will get executed | ||
269 | * first. Or its not either one of our CPUs that's getting unplugged, | ||
270 | * in which case we don't care. | ||
271 | * | ||
272 | * This relies on the stopper workqueues to be FIFO. | ||
273 | */ | ||
274 | if (!cpu_active(cpu1) || !cpu_active(cpu2)) { | ||
275 | preempt_enable(); | ||
276 | return -ENOENT; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * Queuing needs to be done by the lowest numbered CPU, to ensure | ||
281 | * that works are always queued in the same order on every CPU. | ||
282 | * This prevents deadlocks. | ||
283 | */ | ||
284 | smp_call_function_single(min(cpu1, cpu2), | ||
285 | &irq_cpu_stop_queue_work, | ||
286 | &call_args, 0); | ||
287 | preempt_enable(); | ||
288 | |||
289 | wait_for_completion(&done.completion); | ||
290 | |||
291 | return done.executed ? done.ret : -ENOENT; | ||
292 | } | ||
293 | |||
118 | /** | 294 | /** |
119 | * stop_one_cpu_nowait - stop a cpu but don't wait for completion | 295 | * stop_one_cpu_nowait - stop a cpu but don't wait for completion |
120 | * @cpu: cpu to stop | 296 | * @cpu: cpu to stop |
@@ -359,98 +535,14 @@ early_initcall(cpu_stop_init); | |||
359 | 535 | ||
360 | #ifdef CONFIG_STOP_MACHINE | 536 | #ifdef CONFIG_STOP_MACHINE |
361 | 537 | ||
362 | /* This controls the threads on each CPU. */ | ||
363 | enum stopmachine_state { | ||
364 | /* Dummy starting state for thread. */ | ||
365 | STOPMACHINE_NONE, | ||
366 | /* Awaiting everyone to be scheduled. */ | ||
367 | STOPMACHINE_PREPARE, | ||
368 | /* Disable interrupts. */ | ||
369 | STOPMACHINE_DISABLE_IRQ, | ||
370 | /* Run the function */ | ||
371 | STOPMACHINE_RUN, | ||
372 | /* Exit */ | ||
373 | STOPMACHINE_EXIT, | ||
374 | }; | ||
375 | |||
376 | struct stop_machine_data { | ||
377 | int (*fn)(void *); | ||
378 | void *data; | ||
379 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ | ||
380 | unsigned int num_threads; | ||
381 | const struct cpumask *active_cpus; | ||
382 | |||
383 | enum stopmachine_state state; | ||
384 | atomic_t thread_ack; | ||
385 | }; | ||
386 | |||
387 | static void set_state(struct stop_machine_data *smdata, | ||
388 | enum stopmachine_state newstate) | ||
389 | { | ||
390 | /* Reset ack counter. */ | ||
391 | atomic_set(&smdata->thread_ack, smdata->num_threads); | ||
392 | smp_wmb(); | ||
393 | smdata->state = newstate; | ||
394 | } | ||
395 | |||
396 | /* Last one to ack a state moves to the next state. */ | ||
397 | static void ack_state(struct stop_machine_data *smdata) | ||
398 | { | ||
399 | if (atomic_dec_and_test(&smdata->thread_ack)) | ||
400 | set_state(smdata, smdata->state + 1); | ||
401 | } | ||
402 | |||
403 | /* This is the cpu_stop function which stops the CPU. */ | ||
404 | static int stop_machine_cpu_stop(void *data) | ||
405 | { | ||
406 | struct stop_machine_data *smdata = data; | ||
407 | enum stopmachine_state curstate = STOPMACHINE_NONE; | ||
408 | int cpu = smp_processor_id(), err = 0; | ||
409 | unsigned long flags; | ||
410 | bool is_active; | ||
411 | |||
412 | /* | ||
413 | * When called from stop_machine_from_inactive_cpu(), irq might | ||
414 | * already be disabled. Save the state and restore it on exit. | ||
415 | */ | ||
416 | local_save_flags(flags); | ||
417 | |||
418 | if (!smdata->active_cpus) | ||
419 | is_active = cpu == cpumask_first(cpu_online_mask); | ||
420 | else | ||
421 | is_active = cpumask_test_cpu(cpu, smdata->active_cpus); | ||
422 | |||
423 | /* Simple state machine */ | ||
424 | do { | ||
425 | /* Chill out and ensure we re-read stopmachine_state. */ | ||
426 | cpu_relax(); | ||
427 | if (smdata->state != curstate) { | ||
428 | curstate = smdata->state; | ||
429 | switch (curstate) { | ||
430 | case STOPMACHINE_DISABLE_IRQ: | ||
431 | local_irq_disable(); | ||
432 | hard_irq_disable(); | ||
433 | break; | ||
434 | case STOPMACHINE_RUN: | ||
435 | if (is_active) | ||
436 | err = smdata->fn(smdata->data); | ||
437 | break; | ||
438 | default: | ||
439 | break; | ||
440 | } | ||
441 | ack_state(smdata); | ||
442 | } | ||
443 | } while (curstate != STOPMACHINE_EXIT); | ||
444 | |||
445 | local_irq_restore(flags); | ||
446 | return err; | ||
447 | } | ||
448 | |||
449 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | 538 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) |
450 | { | 539 | { |
451 | struct stop_machine_data smdata = { .fn = fn, .data = data, | 540 | struct multi_stop_data msdata = { |
452 | .num_threads = num_online_cpus(), | 541 | .fn = fn, |
453 | .active_cpus = cpus }; | 542 | .data = data, |
543 | .num_threads = num_online_cpus(), | ||
544 | .active_cpus = cpus, | ||
545 | }; | ||
454 | 546 | ||
455 | if (!stop_machine_initialized) { | 547 | if (!stop_machine_initialized) { |
456 | /* | 548 | /* |
@@ -461,7 +553,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | |||
461 | unsigned long flags; | 553 | unsigned long flags; |
462 | int ret; | 554 | int ret; |
463 | 555 | ||
464 | WARN_ON_ONCE(smdata.num_threads != 1); | 556 | WARN_ON_ONCE(msdata.num_threads != 1); |
465 | 557 | ||
466 | local_irq_save(flags); | 558 | local_irq_save(flags); |
467 | hard_irq_disable(); | 559 | hard_irq_disable(); |
@@ -472,8 +564,8 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | |||
472 | } | 564 | } |
473 | 565 | ||
474 | /* Set the initial state and stop all online cpus. */ | 566 | /* Set the initial state and stop all online cpus. */ |
475 | set_state(&smdata, STOPMACHINE_PREPARE); | 567 | set_state(&msdata, MULTI_STOP_PREPARE); |
476 | return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata); | 568 | return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); |
477 | } | 569 | } |
478 | 570 | ||
479 | int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | 571 | int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) |
@@ -513,25 +605,25 @@ EXPORT_SYMBOL_GPL(stop_machine); | |||
513 | int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, | 605 | int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, |
514 | const struct cpumask *cpus) | 606 | const struct cpumask *cpus) |
515 | { | 607 | { |
516 | struct stop_machine_data smdata = { .fn = fn, .data = data, | 608 | struct multi_stop_data msdata = { .fn = fn, .data = data, |
517 | .active_cpus = cpus }; | 609 | .active_cpus = cpus }; |
518 | struct cpu_stop_done done; | 610 | struct cpu_stop_done done; |
519 | int ret; | 611 | int ret; |
520 | 612 | ||
521 | /* Local CPU must be inactive and CPU hotplug in progress. */ | 613 | /* Local CPU must be inactive and CPU hotplug in progress. */ |
522 | BUG_ON(cpu_active(raw_smp_processor_id())); | 614 | BUG_ON(cpu_active(raw_smp_processor_id())); |
523 | smdata.num_threads = num_active_cpus() + 1; /* +1 for local */ | 615 | msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ |
524 | 616 | ||
525 | /* No proper task established and can't sleep - busy wait for lock. */ | 617 | /* No proper task established and can't sleep - busy wait for lock. */ |
526 | while (!mutex_trylock(&stop_cpus_mutex)) | 618 | while (!mutex_trylock(&stop_cpus_mutex)) |
527 | cpu_relax(); | 619 | cpu_relax(); |
528 | 620 | ||
529 | /* Schedule work on other CPUs and execute directly for local CPU */ | 621 | /* Schedule work on other CPUs and execute directly for local CPU */ |
530 | set_state(&smdata, STOPMACHINE_PREPARE); | 622 | set_state(&msdata, MULTI_STOP_PREPARE); |
531 | cpu_stop_init_done(&done, num_active_cpus()); | 623 | cpu_stop_init_done(&done, num_active_cpus()); |
532 | queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata, | 624 | queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, |
533 | &done); | 625 | &done); |
534 | ret = stop_machine_cpu_stop(&smdata); | 626 | ret = multi_cpu_stop(&msdata); |
535 | 627 | ||
536 | /* Busy wait for completion. */ | 628 | /* Busy wait for completion. */ |
537 | while (!completion_done(&done.completion)) | 629 | while (!completion_done(&done.completion)) |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b2f06f3c6a3f..a159e1fd2013 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -371,13 +371,6 @@ static struct ctl_table kern_table[] = { | |||
371 | .proc_handler = proc_dointvec, | 371 | .proc_handler = proc_dointvec, |
372 | }, | 372 | }, |
373 | { | 373 | { |
374 | .procname = "numa_balancing_scan_period_reset", | ||
375 | .data = &sysctl_numa_balancing_scan_period_reset, | ||
376 | .maxlen = sizeof(unsigned int), | ||
377 | .mode = 0644, | ||
378 | .proc_handler = proc_dointvec, | ||
379 | }, | ||
380 | { | ||
381 | .procname = "numa_balancing_scan_period_max_ms", | 374 | .procname = "numa_balancing_scan_period_max_ms", |
382 | .data = &sysctl_numa_balancing_scan_period_max, | 375 | .data = &sysctl_numa_balancing_scan_period_max, |
383 | .maxlen = sizeof(unsigned int), | 376 | .maxlen = sizeof(unsigned int), |
@@ -391,6 +384,20 @@ static struct ctl_table kern_table[] = { | |||
391 | .mode = 0644, | 384 | .mode = 0644, |
392 | .proc_handler = proc_dointvec, | 385 | .proc_handler = proc_dointvec, |
393 | }, | 386 | }, |
387 | { | ||
388 | .procname = "numa_balancing_settle_count", | ||
389 | .data = &sysctl_numa_balancing_settle_count, | ||
390 | .maxlen = sizeof(unsigned int), | ||
391 | .mode = 0644, | ||
392 | .proc_handler = proc_dointvec, | ||
393 | }, | ||
394 | { | ||
395 | .procname = "numa_balancing_migrate_deferred", | ||
396 | .data = &sysctl_numa_balancing_migrate_deferred, | ||
397 | .maxlen = sizeof(unsigned int), | ||
398 | .mode = 0644, | ||
399 | .proc_handler = proc_dointvec, | ||
400 | }, | ||
394 | #endif /* CONFIG_NUMA_BALANCING */ | 401 | #endif /* CONFIG_NUMA_BALANCING */ |
395 | #endif /* CONFIG_SCHED_DEBUG */ | 402 | #endif /* CONFIG_SCHED_DEBUG */ |
396 | { | 403 | { |
diff --git a/kernel/timer.c b/kernel/timer.c index 4296d13db3d1..6582b82fa966 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1092,7 +1092,7 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index) | |||
1092 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), | 1092 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), |
1093 | unsigned long data) | 1093 | unsigned long data) |
1094 | { | 1094 | { |
1095 | int preempt_count = preempt_count(); | 1095 | int count = preempt_count(); |
1096 | 1096 | ||
1097 | #ifdef CONFIG_LOCKDEP | 1097 | #ifdef CONFIG_LOCKDEP |
1098 | /* | 1098 | /* |
@@ -1119,16 +1119,16 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), | |||
1119 | 1119 | ||
1120 | lock_map_release(&lockdep_map); | 1120 | lock_map_release(&lockdep_map); |
1121 | 1121 | ||
1122 | if (preempt_count != preempt_count()) { | 1122 | if (count != preempt_count()) { |
1123 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", | 1123 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", |
1124 | fn, preempt_count, preempt_count()); | 1124 | fn, count, preempt_count()); |
1125 | /* | 1125 | /* |
1126 | * Restore the preempt count. That gives us a decent | 1126 | * Restore the preempt count. That gives us a decent |
1127 | * chance to survive and extract information. If the | 1127 | * chance to survive and extract information. If the |
1128 | * callback kept a lock held, bad luck, but not worse | 1128 | * callback kept a lock held, bad luck, but not worse |
1129 | * than the BUG() we had. | 1129 | * than the BUG() we had. |
1130 | */ | 1130 | */ |
1131 | preempt_count() = preempt_count; | 1131 | preempt_count_set(count); |
1132 | } | 1132 | } |
1133 | } | 1133 | } |
1134 | 1134 | ||
diff --git a/kernel/wait.c b/kernel/wait.c index d550920e040c..de21c6305a44 100644 --- a/kernel/wait.c +++ b/kernel/wait.c | |||
@@ -92,6 +92,30 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | |||
92 | } | 92 | } |
93 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | 93 | EXPORT_SYMBOL(prepare_to_wait_exclusive); |
94 | 94 | ||
95 | long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||
96 | { | ||
97 | unsigned long flags; | ||
98 | |||
99 | if (signal_pending_state(state, current)) | ||
100 | return -ERESTARTSYS; | ||
101 | |||
102 | wait->private = current; | ||
103 | wait->func = autoremove_wake_function; | ||
104 | |||
105 | spin_lock_irqsave(&q->lock, flags); | ||
106 | if (list_empty(&wait->task_list)) { | ||
107 | if (wait->flags & WQ_FLAG_EXCLUSIVE) | ||
108 | __add_wait_queue_tail(q, wait); | ||
109 | else | ||
110 | __add_wait_queue(q, wait); | ||
111 | } | ||
112 | set_current_state(state); | ||
113 | spin_unlock_irqrestore(&q->lock, flags); | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | EXPORT_SYMBOL(prepare_to_wait_event); | ||
118 | |||
95 | /** | 119 | /** |
96 | * finish_wait - clean up after waiting in a queue | 120 | * finish_wait - clean up after waiting in a queue |
97 | * @q: waitqueue waited on | 121 | * @q: waitqueue waited on |