diff options
-rw-r--r-- | kernel/sched/core.c | 69 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 2 |
2 files changed, 70 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bd7c39450b1b..2d8927fda712 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -6122,6 +6122,17 @@ static void sched_init_numa(void) | |||
6122 | * numbers. | 6122 | * numbers. |
6123 | */ | 6123 | */ |
6124 | 6124 | ||
6125 | /* | ||
6126 | * Here, we should temporarily reset sched_domains_numa_levels to 0. | ||
6127 | * If it fails to allocate memory for array sched_domains_numa_masks[][], | ||
6128 | * the array will contain less then 'level' members. This could be | ||
6129 | * dangerous when we use it to iterate array sched_domains_numa_masks[][] | ||
6130 | * in other functions. | ||
6131 | * | ||
6132 | * We reset it to 'level' at the end of this function. | ||
6133 | */ | ||
6134 | sched_domains_numa_levels = 0; | ||
6135 | |||
6125 | sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); | 6136 | sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); |
6126 | if (!sched_domains_numa_masks) | 6137 | if (!sched_domains_numa_masks) |
6127 | return; | 6138 | return; |
@@ -6176,11 +6187,68 @@ static void sched_init_numa(void) | |||
6176 | } | 6187 | } |
6177 | 6188 | ||
6178 | sched_domain_topology = tl; | 6189 | sched_domain_topology = tl; |
6190 | |||
6191 | sched_domains_numa_levels = level; | ||
6192 | } | ||
6193 | |||
6194 | static void sched_domains_numa_masks_set(int cpu) | ||
6195 | { | ||
6196 | int i, j; | ||
6197 | int node = cpu_to_node(cpu); | ||
6198 | |||
6199 | for (i = 0; i < sched_domains_numa_levels; i++) { | ||
6200 | for (j = 0; j < nr_node_ids; j++) { | ||
6201 | if (node_distance(j, node) <= sched_domains_numa_distance[i]) | ||
6202 | cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); | ||
6203 | } | ||
6204 | } | ||
6205 | } | ||
6206 | |||
6207 | static void sched_domains_numa_masks_clear(int cpu) | ||
6208 | { | ||
6209 | int i, j; | ||
6210 | for (i = 0; i < sched_domains_numa_levels; i++) { | ||
6211 | for (j = 0; j < nr_node_ids; j++) | ||
6212 | cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); | ||
6213 | } | ||
6214 | } | ||
6215 | |||
6216 | /* | ||
6217 | * Update sched_domains_numa_masks[level][node] array when new cpus | ||
6218 | * are onlined. | ||
6219 | */ | ||
6220 | static int sched_domains_numa_masks_update(struct notifier_block *nfb, | ||
6221 | unsigned long action, | ||
6222 | void *hcpu) | ||
6223 | { | ||
6224 | int cpu = (long)hcpu; | ||
6225 | |||
6226 | switch (action & ~CPU_TASKS_FROZEN) { | ||
6227 | case CPU_ONLINE: | ||
6228 | sched_domains_numa_masks_set(cpu); | ||
6229 | break; | ||
6230 | |||
6231 | case CPU_DEAD: | ||
6232 | sched_domains_numa_masks_clear(cpu); | ||
6233 | break; | ||
6234 | |||
6235 | default: | ||
6236 | return NOTIFY_DONE; | ||
6237 | } | ||
6238 | |||
6239 | return NOTIFY_OK; | ||
6179 | } | 6240 | } |
6180 | #else | 6241 | #else |
6181 | static inline void sched_init_numa(void) | 6242 | static inline void sched_init_numa(void) |
6182 | { | 6243 | { |
6183 | } | 6244 | } |
6245 | |||
6246 | static int sched_domains_numa_masks_update(struct notifier_block *nfb, | ||
6247 | unsigned long action, | ||
6248 | void *hcpu) | ||
6249 | { | ||
6250 | return 0; | ||
6251 | } | ||
6184 | #endif /* CONFIG_NUMA */ | 6252 | #endif /* CONFIG_NUMA */ |
6185 | 6253 | ||
6186 | static int __sdt_alloc(const struct cpumask *cpu_map) | 6254 | static int __sdt_alloc(const struct cpumask *cpu_map) |
@@ -6629,6 +6697,7 @@ void __init sched_init_smp(void) | |||
6629 | mutex_unlock(&sched_domains_mutex); | 6697 | mutex_unlock(&sched_domains_mutex); |
6630 | put_online_cpus(); | 6698 | put_online_cpus(); |
6631 | 6699 | ||
6700 | hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); | ||
6632 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); | 6701 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); |
6633 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); | 6702 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); |
6634 | 6703 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f423bdd035c2..a40260885265 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -835,7 +835,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
835 | */ | 835 | */ |
836 | if (ts->tick_stopped) { | 836 | if (ts->tick_stopped) { |
837 | touch_softlockup_watchdog(); | 837 | touch_softlockup_watchdog(); |
838 | if (idle_cpu(cpu)) | 838 | if (is_idle_task(current)) |
839 | ts->idle_jiffies++; | 839 | ts->idle_jiffies++; |
840 | } | 840 | } |
841 | update_process_times(user_mode(regs)); | 841 | update_process_times(user_mode(regs)); |