diff options
| author | Ingo Molnar <mingo@kernel.org> | 2015-03-23 05:50:29 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2015-03-23 05:50:29 -0400 |
| commit | e1b63dec2ddba654c7ca75996284e453f32d1af7 (patch) | |
| tree | c48fbfdb84b4e1b6b416b0e2ce7e14cd1350c5f5 /kernel/sched | |
| parent | f8e617f4582995f7c25ef25b4167213120ad122b (diff) | |
| parent | 746db9443ea57fd9c059f62c4bfbf41cf224fe13 (diff) | |
Merge branch 'sched/urgent' into sched/core, to pick up fixes before applying new patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/Makefile | 2 | ||||
| -rw-r--r-- | kernel/sched/clock.c | 13 | ||||
| -rw-r--r-- | kernel/sched/core.c | 12 | ||||
| -rw-r--r-- | kernel/sched/idle.c | 58 | ||||
| -rw-r--r-- | kernel/sched/stats.c | 11 |
5 files changed, 65 insertions, 31 deletions
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index ab32b7b0db5c..46be87024875 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | ifdef CONFIG_FUNCTION_TRACER | 1 | ifdef CONFIG_FUNCTION_TRACER |
| 2 | CFLAGS_REMOVE_clock.o = -pg | 2 | CFLAGS_REMOVE_clock.o = $(CC_FLAGS_FTRACE) |
| 3 | endif | 3 | endif |
| 4 | 4 | ||
| 5 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | 5 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index c27e4f8f4879..c0a205101c23 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
| @@ -420,3 +420,16 @@ u64 local_clock(void) | |||
| 420 | 420 | ||
| 421 | EXPORT_SYMBOL_GPL(cpu_clock); | 421 | EXPORT_SYMBOL_GPL(cpu_clock); |
| 422 | EXPORT_SYMBOL_GPL(local_clock); | 422 | EXPORT_SYMBOL_GPL(local_clock); |
| 423 | |||
| 424 | /* | ||
| 425 | * Running clock - returns the time that has elapsed while a guest has been | ||
| 426 | * running. | ||
| 427 | * On a guest this value should be local_clock minus the time the guest was | ||
| 428 | * suspended by the hypervisor (for any reason). | ||
| 429 | * On bare metal this function should return the same as local_clock. | ||
| 430 | * Architectures and sub-architectures can override this. | ||
| 431 | */ | ||
| 432 | u64 __weak running_clock(void) | ||
| 433 | { | ||
| 434 | return local_clock(); | ||
| 435 | } | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 97fe79cf613e..feda520bd034 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -3051,6 +3051,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
| 3051 | } else { | 3051 | } else { |
| 3052 | if (dl_prio(oldprio)) | 3052 | if (dl_prio(oldprio)) |
| 3053 | p->dl.dl_boosted = 0; | 3053 | p->dl.dl_boosted = 0; |
| 3054 | if (rt_prio(oldprio)) | ||
| 3055 | p->rt.timeout = 0; | ||
| 3054 | p->sched_class = &fair_sched_class; | 3056 | p->sched_class = &fair_sched_class; |
| 3055 | } | 3057 | } |
| 3056 | 3058 | ||
| @@ -5412,9 +5414,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 5412 | struct cpumask *groupmask) | 5414 | struct cpumask *groupmask) |
| 5413 | { | 5415 | { |
| 5414 | struct sched_group *group = sd->groups; | 5416 | struct sched_group *group = sd->groups; |
| 5415 | char str[256]; | ||
| 5416 | 5417 | ||
| 5417 | cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); | ||
| 5418 | cpumask_clear(groupmask); | 5418 | cpumask_clear(groupmask); |
| 5419 | 5419 | ||
| 5420 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 5420 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
| @@ -5427,7 +5427,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 5427 | return -1; | 5427 | return -1; |
| 5428 | } | 5428 | } |
| 5429 | 5429 | ||
| 5430 | printk(KERN_CONT "span %s level %s\n", str, sd->name); | 5430 | printk(KERN_CONT "span %*pbl level %s\n", |
| 5431 | cpumask_pr_args(sched_domain_span(sd)), sd->name); | ||
| 5431 | 5432 | ||
| 5432 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { | 5433 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
| 5433 | printk(KERN_ERR "ERROR: domain->span does not contain " | 5434 | printk(KERN_ERR "ERROR: domain->span does not contain " |
| @@ -5472,9 +5473,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 5472 | 5473 | ||
| 5473 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); | 5474 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
| 5474 | 5475 | ||
| 5475 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); | 5476 | printk(KERN_CONT " %*pbl", |
| 5476 | 5477 | cpumask_pr_args(sched_group_cpus(group))); | |
| 5477 | printk(KERN_CONT " %s", str); | ||
| 5478 | if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { | 5478 | if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { |
| 5479 | printk(KERN_CONT " (cpu_capacity = %d)", | 5479 | printk(KERN_CONT " (cpu_capacity = %d)", |
| 5480 | group->sgc->capacity); | 5480 | group->sgc->capacity); |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index aaf1c1d5cf5d..80014a178342 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/tick.h> | 7 | #include <linux/tick.h> |
| 8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
| 9 | #include <linux/stackprotector.h> | 9 | #include <linux/stackprotector.h> |
| 10 | #include <linux/suspend.h> | ||
| 10 | 11 | ||
| 11 | #include <asm/tlb.h> | 12 | #include <asm/tlb.h> |
| 12 | 13 | ||
| @@ -81,6 +82,7 @@ static void cpuidle_idle_call(void) | |||
| 81 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
| 82 | int next_state, entered_state; | 83 | int next_state, entered_state; |
| 83 | unsigned int broadcast; | 84 | unsigned int broadcast; |
| 85 | bool reflect; | ||
| 84 | 86 | ||
| 85 | /* | 87 | /* |
| 86 | * Check if the idle task must be rescheduled. If it is the | 88 | * Check if the idle task must be rescheduled. If it is the |
| @@ -104,25 +106,37 @@ static void cpuidle_idle_call(void) | |||
| 104 | */ | 106 | */ |
| 105 | rcu_idle_enter(); | 107 | rcu_idle_enter(); |
| 106 | 108 | ||
| 109 | if (cpuidle_not_available(drv, dev)) | ||
| 110 | goto use_default; | ||
| 111 | |||
| 107 | /* | 112 | /* |
| 108 | * Ask the cpuidle framework to choose a convenient idle state. | 113 | * Suspend-to-idle ("freeze") is a system state in which all user space |
| 109 | * Fall back to the default arch idle method on errors. | 114 | * has been frozen, all I/O devices have been suspended and the only |
| 115 | * activity happens here and in iterrupts (if any). In that case bypass | ||
| 116 | * the cpuidle governor and go stratight for the deepest idle state | ||
| 117 | * available. Possibly also suspend the local tick and the entire | ||
| 118 | * timekeeping to prevent timer interrupts from kicking us out of idle | ||
| 119 | * until a proper wakeup interrupt happens. | ||
| 110 | */ | 120 | */ |
| 111 | next_state = cpuidle_select(drv, dev); | 121 | if (idle_should_freeze()) { |
| 112 | if (next_state < 0) { | 122 | entered_state = cpuidle_enter_freeze(drv, dev); |
| 113 | use_default: | 123 | if (entered_state >= 0) { |
| 114 | /* | ||
| 115 | * We can't use the cpuidle framework, let's use the default | ||
| 116 | * idle routine. | ||
| 117 | */ | ||
| 118 | if (current_clr_polling_and_test()) | ||
| 119 | local_irq_enable(); | 124 | local_irq_enable(); |
| 120 | else | 125 | goto exit_idle; |
| 121 | arch_cpu_idle(); | 126 | } |
| 122 | 127 | ||
| 123 | goto exit_idle; | 128 | reflect = false; |
| 129 | next_state = cpuidle_find_deepest_state(drv, dev); | ||
| 130 | } else { | ||
| 131 | reflect = true; | ||
| 132 | /* | ||
| 133 | * Ask the cpuidle framework to choose a convenient idle state. | ||
| 134 | */ | ||
| 135 | next_state = cpuidle_select(drv, dev); | ||
| 124 | } | 136 | } |
| 125 | 137 | /* Fall back to the default arch idle method on errors. */ | |
| 138 | if (next_state < 0) | ||
| 139 | goto use_default; | ||
| 126 | 140 | ||
| 127 | /* | 141 | /* |
| 128 | * The idle task must be scheduled, it is pointless to | 142 | * The idle task must be scheduled, it is pointless to |
| @@ -167,7 +181,8 @@ use_default: | |||
| 167 | /* | 181 | /* |
| 168 | * Give the governor an opportunity to reflect on the outcome | 182 | * Give the governor an opportunity to reflect on the outcome |
| 169 | */ | 183 | */ |
| 170 | cpuidle_reflect(dev, entered_state); | 184 | if (reflect) |
| 185 | cpuidle_reflect(dev, entered_state); | ||
| 171 | 186 | ||
| 172 | exit_idle: | 187 | exit_idle: |
| 173 | __current_set_polling(); | 188 | __current_set_polling(); |
| @@ -180,6 +195,19 @@ exit_idle: | |||
| 180 | 195 | ||
| 181 | rcu_idle_exit(); | 196 | rcu_idle_exit(); |
| 182 | start_critical_timings(); | 197 | start_critical_timings(); |
| 198 | return; | ||
| 199 | |||
| 200 | use_default: | ||
| 201 | /* | ||
| 202 | * We can't use the cpuidle framework, let's use the default | ||
| 203 | * idle routine. | ||
| 204 | */ | ||
| 205 | if (current_clr_polling_and_test()) | ||
| 206 | local_irq_enable(); | ||
| 207 | else | ||
| 208 | arch_cpu_idle(); | ||
| 209 | |||
| 210 | goto exit_idle; | ||
| 183 | } | 211 | } |
| 184 | 212 | ||
| 185 | /* | 213 | /* |
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index a476bea17fbc..87e2c9f0c33e 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c | |||
| @@ -15,11 +15,6 @@ | |||
| 15 | static int show_schedstat(struct seq_file *seq, void *v) | 15 | static int show_schedstat(struct seq_file *seq, void *v) |
| 16 | { | 16 | { |
| 17 | int cpu; | 17 | int cpu; |
| 18 | int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; | ||
| 19 | char *mask_str = kmalloc(mask_len, GFP_KERNEL); | ||
| 20 | |||
| 21 | if (mask_str == NULL) | ||
| 22 | return -ENOMEM; | ||
| 23 | 18 | ||
| 24 | if (v == (void *)1) { | 19 | if (v == (void *)1) { |
| 25 | seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); | 20 | seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); |
| @@ -50,9 +45,8 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
| 50 | for_each_domain(cpu, sd) { | 45 | for_each_domain(cpu, sd) { |
| 51 | enum cpu_idle_type itype; | 46 | enum cpu_idle_type itype; |
| 52 | 47 | ||
| 53 | cpumask_scnprintf(mask_str, mask_len, | 48 | seq_printf(seq, "domain%d %*pb", dcount++, |
| 54 | sched_domain_span(sd)); | 49 | cpumask_pr_args(sched_domain_span(sd))); |
| 55 | seq_printf(seq, "domain%d %s", dcount++, mask_str); | ||
| 56 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; | 50 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; |
| 57 | itype++) { | 51 | itype++) { |
| 58 | seq_printf(seq, " %u %u %u %u %u %u %u %u", | 52 | seq_printf(seq, " %u %u %u %u %u %u %u %u", |
| @@ -76,7 +70,6 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
| 76 | rcu_read_unlock(); | 70 | rcu_read_unlock(); |
| 77 | #endif | 71 | #endif |
| 78 | } | 72 | } |
| 79 | kfree(mask_str); | ||
| 80 | return 0; | 73 | return 0; |
| 81 | } | 74 | } |
| 82 | 75 | ||
