diff options
| author | Ingo Molnar <mingo@kernel.org> | 2014-06-06 01:55:06 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2014-06-06 01:55:06 -0400 |
| commit | ec00010972a0971b2c1da4fbe4e5c7d8ed1ecb05 (patch) | |
| tree | c28975d7daf6d8a3aa23afe8f42837b71105b269 /kernel/sched | |
| parent | 8c6e549a447c51f4f8c0ba7f1e444469f75a354a (diff) | |
| parent | e041e328c4b41e1db79bfe5ba9992c2ed771ad19 (diff) | |
Merge branch 'perf/urgent' into perf/core, to resolve conflict and to prepare for new patches
Conflicts:
arch/x86/kernel/traps.c
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/core.c | 80 | ||||
| -rw-r--r-- | kernel/sched/cpudeadline.c | 37 | ||||
| -rw-r--r-- | kernel/sched/cpudeadline.h | 6 | ||||
| -rw-r--r-- | kernel/sched/cpupri.c | 10 | ||||
| -rw-r--r-- | kernel/sched/cpupri.h | 2 | ||||
| -rw-r--r-- | kernel/sched/cputime.c | 32 | ||||
| -rw-r--r-- | kernel/sched/deadline.c | 5 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 16 |
8 files changed, 121 insertions, 67 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 00781cc38047..a8c0fde25e4a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq) | |||
| 2192 | * schedule_tail - first thing a freshly forked thread must call. | 2192 | * schedule_tail - first thing a freshly forked thread must call. |
| 2193 | * @prev: the thread we just switched away from. | 2193 | * @prev: the thread we just switched away from. |
| 2194 | */ | 2194 | */ |
| 2195 | asmlinkage void schedule_tail(struct task_struct *prev) | 2195 | asmlinkage __visible void schedule_tail(struct task_struct *prev) |
| 2196 | __releases(rq->lock) | 2196 | __releases(rq->lock) |
| 2197 | { | 2197 | { |
| 2198 | struct rq *rq = this_rq(); | 2198 | struct rq *rq = this_rq(); |
| @@ -2594,8 +2594,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev) | |||
| 2594 | if (likely(prev->sched_class == class && | 2594 | if (likely(prev->sched_class == class && |
| 2595 | rq->nr_running == rq->cfs.h_nr_running)) { | 2595 | rq->nr_running == rq->cfs.h_nr_running)) { |
| 2596 | p = fair_sched_class.pick_next_task(rq, prev); | 2596 | p = fair_sched_class.pick_next_task(rq, prev); |
| 2597 | if (likely(p && p != RETRY_TASK)) | 2597 | if (unlikely(p == RETRY_TASK)) |
| 2598 | return p; | 2598 | goto again; |
| 2599 | |||
| 2600 | /* assumes fair_sched_class->next == idle_sched_class */ | ||
| 2601 | if (unlikely(!p)) | ||
| 2602 | p = idle_sched_class.pick_next_task(rq, prev); | ||
| 2603 | |||
| 2604 | return p; | ||
| 2599 | } | 2605 | } |
| 2600 | 2606 | ||
| 2601 | again: | 2607 | again: |
| @@ -2743,7 +2749,7 @@ static inline void sched_submit_work(struct task_struct *tsk) | |||
| 2743 | blk_schedule_flush_plug(tsk); | 2749 | blk_schedule_flush_plug(tsk); |
| 2744 | } | 2750 | } |
| 2745 | 2751 | ||
| 2746 | asmlinkage void __sched schedule(void) | 2752 | asmlinkage __visible void __sched schedule(void) |
| 2747 | { | 2753 | { |
| 2748 | struct task_struct *tsk = current; | 2754 | struct task_struct *tsk = current; |
| 2749 | 2755 | ||
| @@ -2753,7 +2759,7 @@ asmlinkage void __sched schedule(void) | |||
| 2753 | EXPORT_SYMBOL(schedule); | 2759 | EXPORT_SYMBOL(schedule); |
| 2754 | 2760 | ||
| 2755 | #ifdef CONFIG_CONTEXT_TRACKING | 2761 | #ifdef CONFIG_CONTEXT_TRACKING |
| 2756 | asmlinkage void __sched schedule_user(void) | 2762 | asmlinkage __visible void __sched schedule_user(void) |
| 2757 | { | 2763 | { |
| 2758 | /* | 2764 | /* |
| 2759 | * If we come here after a random call to set_need_resched(), | 2765 | * If we come here after a random call to set_need_resched(), |
| @@ -2785,7 +2791,7 @@ void __sched schedule_preempt_disabled(void) | |||
| 2785 | * off of preempt_enable. Kernel preemptions off return from interrupt | 2791 | * off of preempt_enable. Kernel preemptions off return from interrupt |
| 2786 | * occur there and call schedule directly. | 2792 | * occur there and call schedule directly. |
| 2787 | */ | 2793 | */ |
| 2788 | asmlinkage void __sched notrace preempt_schedule(void) | 2794 | asmlinkage __visible void __sched notrace preempt_schedule(void) |
| 2789 | { | 2795 | { |
| 2790 | /* | 2796 | /* |
| 2791 | * If there is a non-zero preempt_count or interrupts are disabled, | 2797 | * If there is a non-zero preempt_count or interrupts are disabled, |
| @@ -2816,7 +2822,7 @@ EXPORT_SYMBOL(preempt_schedule); | |||
| 2816 | * Note, that this is called and return with irqs disabled. This will | 2822 | * Note, that this is called and return with irqs disabled. This will |
| 2817 | * protect us against recursive calling from irq. | 2823 | * protect us against recursive calling from irq. |
| 2818 | */ | 2824 | */ |
| 2819 | asmlinkage void __sched preempt_schedule_irq(void) | 2825 | asmlinkage __visible void __sched preempt_schedule_irq(void) |
| 2820 | { | 2826 | { |
| 2821 | enum ctx_state prev_state; | 2827 | enum ctx_state prev_state; |
| 2822 | 2828 | ||
| @@ -3127,6 +3133,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) | |||
| 3127 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); | 3133 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); |
| 3128 | dl_se->dl_throttled = 0; | 3134 | dl_se->dl_throttled = 0; |
| 3129 | dl_se->dl_new = 1; | 3135 | dl_se->dl_new = 1; |
| 3136 | dl_se->dl_yielded = 0; | ||
| 3130 | } | 3137 | } |
| 3131 | 3138 | ||
| 3132 | static void __setscheduler_params(struct task_struct *p, | 3139 | static void __setscheduler_params(struct task_struct *p, |
| @@ -3191,17 +3198,40 @@ __getparam_dl(struct task_struct *p, struct sched_attr *attr) | |||
| 3191 | * We ask for the deadline not being zero, and greater or equal | 3198 | * We ask for the deadline not being zero, and greater or equal |
| 3192 | * than the runtime, as well as the period of being zero or | 3199 | * than the runtime, as well as the period of being zero or |
| 3193 | * greater than deadline. Furthermore, we have to be sure that | 3200 | * greater than deadline. Furthermore, we have to be sure that |
| 3194 | * user parameters are above the internal resolution (1us); we | 3201 | * user parameters are above the internal resolution of 1us (we |
| 3195 | * check sched_runtime only since it is always the smaller one. | 3202 | * check sched_runtime only since it is always the smaller one) and |
| 3203 | * below 2^63 ns (we have to check both sched_deadline and | ||
| 3204 | * sched_period, as the latter can be zero). | ||
| 3196 | */ | 3205 | */ |
| 3197 | static bool | 3206 | static bool |
| 3198 | __checkparam_dl(const struct sched_attr *attr) | 3207 | __checkparam_dl(const struct sched_attr *attr) |
| 3199 | { | 3208 | { |
| 3200 | return attr && attr->sched_deadline != 0 && | 3209 | /* deadline != 0 */ |
| 3201 | (attr->sched_period == 0 || | 3210 | if (attr->sched_deadline == 0) |
| 3202 | (s64)(attr->sched_period - attr->sched_deadline) >= 0) && | 3211 | return false; |
| 3203 | (s64)(attr->sched_deadline - attr->sched_runtime ) >= 0 && | 3212 | |
| 3204 | attr->sched_runtime >= (2 << (DL_SCALE - 1)); | 3213 | /* |
| 3214 | * Since we truncate DL_SCALE bits, make sure we're at least | ||
| 3215 | * that big. | ||
| 3216 | */ | ||
| 3217 | if (attr->sched_runtime < (1ULL << DL_SCALE)) | ||
| 3218 | return false; | ||
| 3219 | |||
| 3220 | /* | ||
| 3221 | * Since we use the MSB for wrap-around and sign issues, make | ||
| 3222 | * sure it's not set (mind that period can be equal to zero). | ||
| 3223 | */ | ||
| 3224 | if (attr->sched_deadline & (1ULL << 63) || | ||
| 3225 | attr->sched_period & (1ULL << 63)) | ||
| 3226 | return false; | ||
| 3227 | |||
| 3228 | /* runtime <= deadline <= period (if period != 0) */ | ||
| 3229 | if ((attr->sched_period != 0 && | ||
| 3230 | attr->sched_period < attr->sched_deadline) || | ||
| 3231 | attr->sched_deadline < attr->sched_runtime) | ||
| 3232 | return false; | ||
| 3233 | |||
| 3234 | return true; | ||
| 3205 | } | 3235 | } |
| 3206 | 3236 | ||
| 3207 | /* | 3237 | /* |
| @@ -3642,6 +3672,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | |||
| 3642 | * sys_sched_setattr - same as above, but with extended sched_attr | 3672 | * sys_sched_setattr - same as above, but with extended sched_attr |
| 3643 | * @pid: the pid in question. | 3673 | * @pid: the pid in question. |
| 3644 | * @uattr: structure containing the extended parameters. | 3674 | * @uattr: structure containing the extended parameters. |
| 3675 | * @flags: for future extension. | ||
| 3645 | */ | 3676 | */ |
| 3646 | SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, | 3677 | SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, |
| 3647 | unsigned int, flags) | 3678 | unsigned int, flags) |
| @@ -3653,8 +3684,12 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, | |||
| 3653 | if (!uattr || pid < 0 || flags) | 3684 | if (!uattr || pid < 0 || flags) |
| 3654 | return -EINVAL; | 3685 | return -EINVAL; |
| 3655 | 3686 | ||
| 3656 | if (sched_copy_attr(uattr, &attr)) | 3687 | retval = sched_copy_attr(uattr, &attr); |
| 3657 | return -EFAULT; | 3688 | if (retval) |
| 3689 | return retval; | ||
| 3690 | |||
| 3691 | if (attr.sched_policy < 0) | ||
| 3692 | return -EINVAL; | ||
| 3658 | 3693 | ||
| 3659 | rcu_read_lock(); | 3694 | rcu_read_lock(); |
| 3660 | retval = -ESRCH; | 3695 | retval = -ESRCH; |
| @@ -3704,7 +3739,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | |||
| 3704 | */ | 3739 | */ |
| 3705 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | 3740 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
| 3706 | { | 3741 | { |
| 3707 | struct sched_param lp; | 3742 | struct sched_param lp = { .sched_priority = 0 }; |
| 3708 | struct task_struct *p; | 3743 | struct task_struct *p; |
| 3709 | int retval; | 3744 | int retval; |
| 3710 | 3745 | ||
| @@ -3721,11 +3756,8 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | |||
| 3721 | if (retval) | 3756 | if (retval) |
| 3722 | goto out_unlock; | 3757 | goto out_unlock; |
| 3723 | 3758 | ||
| 3724 | if (task_has_dl_policy(p)) { | 3759 | if (task_has_rt_policy(p)) |
| 3725 | retval = -EINVAL; | 3760 | lp.sched_priority = p->rt_priority; |
| 3726 | goto out_unlock; | ||
| 3727 | } | ||
| 3728 | lp.sched_priority = p->rt_priority; | ||
| 3729 | rcu_read_unlock(); | 3761 | rcu_read_unlock(); |
| 3730 | 3762 | ||
| 3731 | /* | 3763 | /* |
| @@ -3786,6 +3818,7 @@ err_size: | |||
| 3786 | * @pid: the pid in question. | 3818 | * @pid: the pid in question. |
| 3787 | * @uattr: structure containing the extended parameters. | 3819 | * @uattr: structure containing the extended parameters. |
| 3788 | * @size: sizeof(attr) for fwd/bwd comp. | 3820 | * @size: sizeof(attr) for fwd/bwd comp. |
| 3821 | * @flags: for future extension. | ||
| 3789 | */ | 3822 | */ |
| 3790 | SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, | 3823 | SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, |
| 3791 | unsigned int, size, unsigned int, flags) | 3824 | unsigned int, size, unsigned int, flags) |
| @@ -5046,7 +5079,6 @@ static int sched_cpu_active(struct notifier_block *nfb, | |||
| 5046 | unsigned long action, void *hcpu) | 5079 | unsigned long action, void *hcpu) |
| 5047 | { | 5080 | { |
| 5048 | switch (action & ~CPU_TASKS_FROZEN) { | 5081 | switch (action & ~CPU_TASKS_FROZEN) { |
| 5049 | case CPU_STARTING: | ||
| 5050 | case CPU_DOWN_FAILED: | 5082 | case CPU_DOWN_FAILED: |
| 5051 | set_cpu_active((long)hcpu, true); | 5083 | set_cpu_active((long)hcpu, true); |
| 5052 | return NOTIFY_OK; | 5084 | return NOTIFY_OK; |
| @@ -6020,6 +6052,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu) | |||
| 6020 | , | 6052 | , |
| 6021 | .last_balance = jiffies, | 6053 | .last_balance = jiffies, |
| 6022 | .balance_interval = sd_weight, | 6054 | .balance_interval = sd_weight, |
| 6055 | .max_newidle_lb_cost = 0, | ||
| 6056 | .next_decay_max_lb_cost = jiffies, | ||
| 6023 | }; | 6057 | }; |
| 6024 | SD_INIT_NAME(sd, NUMA); | 6058 | SD_INIT_NAME(sd, NUMA); |
| 6025 | sd->private = &tl->data; | 6059 | sd->private = &tl->data; |
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5b9bb42b2d47..bd95963dae80 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | 13 | ||
| 14 | #include <linux/gfp.h> | 14 | #include <linux/gfp.h> |
| 15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/slab.h> | ||
| 16 | #include "cpudeadline.h" | 17 | #include "cpudeadline.h" |
| 17 | 18 | ||
| 18 | static inline int parent(int i) | 19 | static inline int parent(int i) |
| @@ -39,8 +40,10 @@ static void cpudl_exchange(struct cpudl *cp, int a, int b) | |||
| 39 | { | 40 | { |
| 40 | int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; | 41 | int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; |
| 41 | 42 | ||
| 42 | swap(cp->elements[a], cp->elements[b]); | 43 | swap(cp->elements[a].cpu, cp->elements[b].cpu); |
| 43 | swap(cp->cpu_to_idx[cpu_a], cp->cpu_to_idx[cpu_b]); | 44 | swap(cp->elements[a].dl , cp->elements[b].dl ); |
| 45 | |||
| 46 | swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx); | ||
| 44 | } | 47 | } |
| 45 | 48 | ||
| 46 | static void cpudl_heapify(struct cpudl *cp, int idx) | 49 | static void cpudl_heapify(struct cpudl *cp, int idx) |
| @@ -140,7 +143,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
| 140 | WARN_ON(!cpu_present(cpu)); | 143 | WARN_ON(!cpu_present(cpu)); |
| 141 | 144 | ||
| 142 | raw_spin_lock_irqsave(&cp->lock, flags); | 145 | raw_spin_lock_irqsave(&cp->lock, flags); |
| 143 | old_idx = cp->cpu_to_idx[cpu]; | 146 | old_idx = cp->elements[cpu].idx; |
| 144 | if (!is_valid) { | 147 | if (!is_valid) { |
| 145 | /* remove item */ | 148 | /* remove item */ |
| 146 | if (old_idx == IDX_INVALID) { | 149 | if (old_idx == IDX_INVALID) { |
| @@ -155,8 +158,8 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
| 155 | cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; | 158 | cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; |
| 156 | cp->elements[old_idx].cpu = new_cpu; | 159 | cp->elements[old_idx].cpu = new_cpu; |
| 157 | cp->size--; | 160 | cp->size--; |
| 158 | cp->cpu_to_idx[new_cpu] = old_idx; | 161 | cp->elements[new_cpu].idx = old_idx; |
| 159 | cp->cpu_to_idx[cpu] = IDX_INVALID; | 162 | cp->elements[cpu].idx = IDX_INVALID; |
| 160 | while (old_idx > 0 && dl_time_before( | 163 | while (old_idx > 0 && dl_time_before( |
| 161 | cp->elements[parent(old_idx)].dl, | 164 | cp->elements[parent(old_idx)].dl, |
| 162 | cp->elements[old_idx].dl)) { | 165 | cp->elements[old_idx].dl)) { |
| @@ -173,7 +176,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
| 173 | cp->size++; | 176 | cp->size++; |
| 174 | cp->elements[cp->size - 1].dl = 0; | 177 | cp->elements[cp->size - 1].dl = 0; |
| 175 | cp->elements[cp->size - 1].cpu = cpu; | 178 | cp->elements[cp->size - 1].cpu = cpu; |
| 176 | cp->cpu_to_idx[cpu] = cp->size - 1; | 179 | cp->elements[cpu].idx = cp->size - 1; |
| 177 | cpudl_change_key(cp, cp->size - 1, dl); | 180 | cpudl_change_key(cp, cp->size - 1, dl); |
| 178 | cpumask_clear_cpu(cpu, cp->free_cpus); | 181 | cpumask_clear_cpu(cpu, cp->free_cpus); |
| 179 | } else { | 182 | } else { |
| @@ -195,10 +198,21 @@ int cpudl_init(struct cpudl *cp) | |||
| 195 | memset(cp, 0, sizeof(*cp)); | 198 | memset(cp, 0, sizeof(*cp)); |
| 196 | raw_spin_lock_init(&cp->lock); | 199 | raw_spin_lock_init(&cp->lock); |
| 197 | cp->size = 0; | 200 | cp->size = 0; |
| 198 | for (i = 0; i < NR_CPUS; i++) | 201 | |
| 199 | cp->cpu_to_idx[i] = IDX_INVALID; | 202 | cp->elements = kcalloc(nr_cpu_ids, |
| 200 | if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) | 203 | sizeof(struct cpudl_item), |
| 204 | GFP_KERNEL); | ||
| 205 | if (!cp->elements) | ||
| 206 | return -ENOMEM; | ||
| 207 | |||
| 208 | if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) { | ||
| 209 | kfree(cp->elements); | ||
| 201 | return -ENOMEM; | 210 | return -ENOMEM; |
| 211 | } | ||
| 212 | |||
| 213 | for_each_possible_cpu(i) | ||
| 214 | cp->elements[i].idx = IDX_INVALID; | ||
| 215 | |||
| 202 | cpumask_setall(cp->free_cpus); | 216 | cpumask_setall(cp->free_cpus); |
| 203 | 217 | ||
| 204 | return 0; | 218 | return 0; |
| @@ -210,7 +224,6 @@ int cpudl_init(struct cpudl *cp) | |||
| 210 | */ | 224 | */ |
| 211 | void cpudl_cleanup(struct cpudl *cp) | 225 | void cpudl_cleanup(struct cpudl *cp) |
| 212 | { | 226 | { |
| 213 | /* | 227 | free_cpumask_var(cp->free_cpus); |
| 214 | * nothing to do for the moment | 228 | kfree(cp->elements); |
| 215 | */ | ||
| 216 | } | 229 | } |
diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h index a202789a412c..538c9796ad4a 100644 --- a/kernel/sched/cpudeadline.h +++ b/kernel/sched/cpudeadline.h | |||
| @@ -5,17 +5,17 @@ | |||
| 5 | 5 | ||
| 6 | #define IDX_INVALID -1 | 6 | #define IDX_INVALID -1 |
| 7 | 7 | ||
| 8 | struct array_item { | 8 | struct cpudl_item { |
| 9 | u64 dl; | 9 | u64 dl; |
| 10 | int cpu; | 10 | int cpu; |
| 11 | int idx; | ||
| 11 | }; | 12 | }; |
| 12 | 13 | ||
| 13 | struct cpudl { | 14 | struct cpudl { |
| 14 | raw_spinlock_t lock; | 15 | raw_spinlock_t lock; |
| 15 | int size; | 16 | int size; |
| 16 | int cpu_to_idx[NR_CPUS]; | ||
| 17 | struct array_item elements[NR_CPUS]; | ||
| 18 | cpumask_var_t free_cpus; | 17 | cpumask_var_t free_cpus; |
| 18 | struct cpudl_item *elements; | ||
| 19 | }; | 19 | }; |
| 20 | 20 | ||
| 21 | 21 | ||
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 8b836b376d91..8834243abee2 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
| 31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
| 32 | #include <linux/sched/rt.h> | 32 | #include <linux/sched/rt.h> |
| 33 | #include <linux/slab.h> | ||
| 33 | #include "cpupri.h" | 34 | #include "cpupri.h" |
| 34 | 35 | ||
| 35 | /* Convert between a 140 based task->prio, and our 102 based cpupri */ | 36 | /* Convert between a 140 based task->prio, and our 102 based cpupri */ |
| @@ -70,8 +71,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, | |||
| 70 | int idx = 0; | 71 | int idx = 0; |
| 71 | int task_pri = convert_prio(p->prio); | 72 | int task_pri = convert_prio(p->prio); |
| 72 | 73 | ||
| 73 | if (task_pri >= MAX_RT_PRIO) | 74 | BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES); |
| 74 | return 0; | ||
| 75 | 75 | ||
| 76 | for (idx = 0; idx < task_pri; idx++) { | 76 | for (idx = 0; idx < task_pri; idx++) { |
| 77 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; | 77 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; |
| @@ -219,8 +219,13 @@ int cpupri_init(struct cpupri *cp) | |||
| 219 | goto cleanup; | 219 | goto cleanup; |
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); | ||
| 223 | if (!cp->cpu_to_pri) | ||
| 224 | goto cleanup; | ||
| 225 | |||
| 222 | for_each_possible_cpu(i) | 226 | for_each_possible_cpu(i) |
| 223 | cp->cpu_to_pri[i] = CPUPRI_INVALID; | 227 | cp->cpu_to_pri[i] = CPUPRI_INVALID; |
| 228 | |||
| 224 | return 0; | 229 | return 0; |
| 225 | 230 | ||
| 226 | cleanup: | 231 | cleanup: |
| @@ -237,6 +242,7 @@ void cpupri_cleanup(struct cpupri *cp) | |||
| 237 | { | 242 | { |
| 238 | int i; | 243 | int i; |
| 239 | 244 | ||
| 245 | kfree(cp->cpu_to_pri); | ||
| 240 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) | 246 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) |
| 241 | free_cpumask_var(cp->pri_to_cpu[i].mask); | 247 | free_cpumask_var(cp->pri_to_cpu[i].mask); |
| 242 | } | 248 | } |
diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h index f6d756173491..6b033347fdfd 100644 --- a/kernel/sched/cpupri.h +++ b/kernel/sched/cpupri.h | |||
| @@ -17,7 +17,7 @@ struct cpupri_vec { | |||
| 17 | 17 | ||
| 18 | struct cpupri { | 18 | struct cpupri { |
| 19 | struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; | 19 | struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; |
| 20 | int cpu_to_pri[NR_CPUS]; | 20 | int *cpu_to_pri; |
| 21 | }; | 21 | }; |
| 22 | 22 | ||
| 23 | #ifdef CONFIG_SMP | 23 | #ifdef CONFIG_SMP |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index a95097cb4591..72fdf06ef865 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
| @@ -332,50 +332,50 @@ out: | |||
| 332 | * softirq as those do not count in task exec_runtime any more. | 332 | * softirq as those do not count in task exec_runtime any more. |
| 333 | */ | 333 | */ |
| 334 | static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | 334 | static void irqtime_account_process_tick(struct task_struct *p, int user_tick, |
| 335 | struct rq *rq) | 335 | struct rq *rq, int ticks) |
| 336 | { | 336 | { |
| 337 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); | 337 | cputime_t scaled = cputime_to_scaled(cputime_one_jiffy); |
| 338 | u64 cputime = (__force u64) cputime_one_jiffy; | ||
| 338 | u64 *cpustat = kcpustat_this_cpu->cpustat; | 339 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
| 339 | 340 | ||
| 340 | if (steal_account_process_tick()) | 341 | if (steal_account_process_tick()) |
| 341 | return; | 342 | return; |
| 342 | 343 | ||
| 344 | cputime *= ticks; | ||
| 345 | scaled *= ticks; | ||
| 346 | |||
| 343 | if (irqtime_account_hi_update()) { | 347 | if (irqtime_account_hi_update()) { |
| 344 | cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; | 348 | cpustat[CPUTIME_IRQ] += cputime; |
| 345 | } else if (irqtime_account_si_update()) { | 349 | } else if (irqtime_account_si_update()) { |
| 346 | cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; | 350 | cpustat[CPUTIME_SOFTIRQ] += cputime; |
| 347 | } else if (this_cpu_ksoftirqd() == p) { | 351 | } else if (this_cpu_ksoftirqd() == p) { |
| 348 | /* | 352 | /* |
| 349 | * ksoftirqd time do not get accounted in cpu_softirq_time. | 353 | * ksoftirqd time do not get accounted in cpu_softirq_time. |
| 350 | * So, we have to handle it separately here. | 354 | * So, we have to handle it separately here. |
| 351 | * Also, p->stime needs to be updated for ksoftirqd. | 355 | * Also, p->stime needs to be updated for ksoftirqd. |
| 352 | */ | 356 | */ |
| 353 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, | 357 | __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ); |
| 354 | CPUTIME_SOFTIRQ); | ||
| 355 | } else if (user_tick) { | 358 | } else if (user_tick) { |
| 356 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); | 359 | account_user_time(p, cputime, scaled); |
| 357 | } else if (p == rq->idle) { | 360 | } else if (p == rq->idle) { |
| 358 | account_idle_time(cputime_one_jiffy); | 361 | account_idle_time(cputime); |
| 359 | } else if (p->flags & PF_VCPU) { /* System time or guest time */ | 362 | } else if (p->flags & PF_VCPU) { /* System time or guest time */ |
| 360 | account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); | 363 | account_guest_time(p, cputime, scaled); |
| 361 | } else { | 364 | } else { |
| 362 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, | 365 | __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM); |
| 363 | CPUTIME_SYSTEM); | ||
| 364 | } | 366 | } |
| 365 | } | 367 | } |
| 366 | 368 | ||
| 367 | static void irqtime_account_idle_ticks(int ticks) | 369 | static void irqtime_account_idle_ticks(int ticks) |
| 368 | { | 370 | { |
| 369 | int i; | ||
| 370 | struct rq *rq = this_rq(); | 371 | struct rq *rq = this_rq(); |
| 371 | 372 | ||
| 372 | for (i = 0; i < ticks; i++) | 373 | irqtime_account_process_tick(current, 0, rq, ticks); |
| 373 | irqtime_account_process_tick(current, 0, rq); | ||
| 374 | } | 374 | } |
| 375 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ | 375 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ |
| 376 | static inline void irqtime_account_idle_ticks(int ticks) {} | 376 | static inline void irqtime_account_idle_ticks(int ticks) {} |
| 377 | static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, | 377 | static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, |
| 378 | struct rq *rq) {} | 378 | struct rq *rq, int nr_ticks) {} |
| 379 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ | 379 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
| 380 | 380 | ||
| 381 | /* | 381 | /* |
| @@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick) | |||
| 464 | return; | 464 | return; |
| 465 | 465 | ||
| 466 | if (sched_clock_irqtime) { | 466 | if (sched_clock_irqtime) { |
| 467 | irqtime_account_process_tick(p, user_tick, rq); | 467 | irqtime_account_process_tick(p, user_tick, rq, 1); |
| 468 | return; | 468 | return; |
| 469 | } | 469 | } |
| 470 | 470 | ||
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index b08095786cb8..800e99b99075 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) | |||
| 528 | sched_clock_tick(); | 528 | sched_clock_tick(); |
| 529 | update_rq_clock(rq); | 529 | update_rq_clock(rq); |
| 530 | dl_se->dl_throttled = 0; | 530 | dl_se->dl_throttled = 0; |
| 531 | dl_se->dl_yielded = 0; | ||
| 531 | if (p->on_rq) { | 532 | if (p->on_rq) { |
| 532 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); | 533 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); |
| 533 | if (task_has_dl_policy(rq->curr)) | 534 | if (task_has_dl_policy(rq->curr)) |
| @@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq) | |||
| 893 | * We make the task go to sleep until its current deadline by | 894 | * We make the task go to sleep until its current deadline by |
| 894 | * forcing its runtime to zero. This way, update_curr_dl() stops | 895 | * forcing its runtime to zero. This way, update_curr_dl() stops |
| 895 | * it and the bandwidth timer will wake it up and will give it | 896 | * it and the bandwidth timer will wake it up and will give it |
| 896 | * new scheduling parameters (thanks to dl_new=1). | 897 | * new scheduling parameters (thanks to dl_yielded=1). |
| 897 | */ | 898 | */ |
| 898 | if (p->dl.runtime > 0) { | 899 | if (p->dl.runtime > 0) { |
| 899 | rq->curr->dl.dl_new = 1; | 900 | rq->curr->dl.dl_yielded = 1; |
| 900 | p->dl.runtime = 0; | 901 | p->dl.runtime = 0; |
| 901 | } | 902 | } |
| 902 | update_curr_dl(rq); | 903 | update_curr_dl(rq); |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7570dd969c28..0fdb96de81a5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq) | |||
| 6653 | int this_cpu = this_rq->cpu; | 6653 | int this_cpu = this_rq->cpu; |
| 6654 | 6654 | ||
| 6655 | idle_enter_fair(this_rq); | 6655 | idle_enter_fair(this_rq); |
| 6656 | |||
| 6656 | /* | 6657 | /* |
| 6657 | * We must set idle_stamp _before_ calling idle_balance(), such that we | 6658 | * We must set idle_stamp _before_ calling idle_balance(), such that we |
| 6658 | * measure the duration of idle_balance() as idle time. | 6659 | * measure the duration of idle_balance() as idle time. |
| @@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq) | |||
| 6705 | 6706 | ||
| 6706 | raw_spin_lock(&this_rq->lock); | 6707 | raw_spin_lock(&this_rq->lock); |
| 6707 | 6708 | ||
| 6709 | if (curr_cost > this_rq->max_idle_balance_cost) | ||
| 6710 | this_rq->max_idle_balance_cost = curr_cost; | ||
| 6711 | |||
| 6708 | /* | 6712 | /* |
| 6709 | * While browsing the domains, we released the rq lock. | 6713 | * While browsing the domains, we released the rq lock, a task could |
| 6710 | * A task could have be enqueued in the meantime | 6714 | * have been enqueued in the meantime. Since we're not going idle, |
| 6715 | * pretend we pulled a task. | ||
| 6711 | */ | 6716 | */ |
| 6712 | if (this_rq->cfs.h_nr_running && !pulled_task) { | 6717 | if (this_rq->cfs.h_nr_running && !pulled_task) |
| 6713 | pulled_task = 1; | 6718 | pulled_task = 1; |
| 6714 | goto out; | ||
| 6715 | } | ||
| 6716 | 6719 | ||
| 6717 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | 6720 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { |
| 6718 | /* | 6721 | /* |
| @@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq) | |||
| 6722 | this_rq->next_balance = next_balance; | 6725 | this_rq->next_balance = next_balance; |
| 6723 | } | 6726 | } |
| 6724 | 6727 | ||
| 6725 | if (curr_cost > this_rq->max_idle_balance_cost) | ||
| 6726 | this_rq->max_idle_balance_cost = curr_cost; | ||
| 6727 | |||
| 6728 | out: | 6728 | out: |
| 6729 | /* Is there a task of a high priority class? */ | 6729 | /* Is there a task of a high priority class? */ |
| 6730 | if (this_rq->nr_running != this_rq->cfs.h_nr_running && | 6730 | if (this_rq->nr_running != this_rq->cfs.h_nr_running && |
