diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 80 |
1 files changed, 57 insertions, 23 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 268a45ea238c..0a7251678982 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq) | |||
2192 | * schedule_tail - first thing a freshly forked thread must call. | 2192 | * schedule_tail - first thing a freshly forked thread must call. |
2193 | * @prev: the thread we just switched away from. | 2193 | * @prev: the thread we just switched away from. |
2194 | */ | 2194 | */ |
2195 | asmlinkage void schedule_tail(struct task_struct *prev) | 2195 | asmlinkage __visible void schedule_tail(struct task_struct *prev) |
2196 | __releases(rq->lock) | 2196 | __releases(rq->lock) |
2197 | { | 2197 | { |
2198 | struct rq *rq = this_rq(); | 2198 | struct rq *rq = this_rq(); |
@@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev) | |||
2592 | if (likely(prev->sched_class == class && | 2592 | if (likely(prev->sched_class == class && |
2593 | rq->nr_running == rq->cfs.h_nr_running)) { | 2593 | rq->nr_running == rq->cfs.h_nr_running)) { |
2594 | p = fair_sched_class.pick_next_task(rq, prev); | 2594 | p = fair_sched_class.pick_next_task(rq, prev); |
2595 | if (likely(p && p != RETRY_TASK)) | 2595 | if (unlikely(p == RETRY_TASK)) |
2596 | return p; | 2596 | goto again; |
2597 | |||
2598 | /* assumes fair_sched_class->next == idle_sched_class */ | ||
2599 | if (unlikely(!p)) | ||
2600 | p = idle_sched_class.pick_next_task(rq, prev); | ||
2601 | |||
2602 | return p; | ||
2597 | } | 2603 | } |
2598 | 2604 | ||
2599 | again: | 2605 | again: |
@@ -2741,7 +2747,7 @@ static inline void sched_submit_work(struct task_struct *tsk) | |||
2741 | blk_schedule_flush_plug(tsk); | 2747 | blk_schedule_flush_plug(tsk); |
2742 | } | 2748 | } |
2743 | 2749 | ||
2744 | asmlinkage void __sched schedule(void) | 2750 | asmlinkage __visible void __sched schedule(void) |
2745 | { | 2751 | { |
2746 | struct task_struct *tsk = current; | 2752 | struct task_struct *tsk = current; |
2747 | 2753 | ||
@@ -2751,7 +2757,7 @@ asmlinkage void __sched schedule(void) | |||
2751 | EXPORT_SYMBOL(schedule); | 2757 | EXPORT_SYMBOL(schedule); |
2752 | 2758 | ||
2753 | #ifdef CONFIG_CONTEXT_TRACKING | 2759 | #ifdef CONFIG_CONTEXT_TRACKING |
2754 | asmlinkage void __sched schedule_user(void) | 2760 | asmlinkage __visible void __sched schedule_user(void) |
2755 | { | 2761 | { |
2756 | /* | 2762 | /* |
2757 | * If we come here after a random call to set_need_resched(), | 2763 | * If we come here after a random call to set_need_resched(), |
@@ -2783,7 +2789,7 @@ void __sched schedule_preempt_disabled(void) | |||
2783 | * off of preempt_enable. Kernel preemptions off return from interrupt | 2789 | * off of preempt_enable. Kernel preemptions off return from interrupt |
2784 | * occur there and call schedule directly. | 2790 | * occur there and call schedule directly. |
2785 | */ | 2791 | */ |
2786 | asmlinkage void __sched notrace preempt_schedule(void) | 2792 | asmlinkage __visible void __sched notrace preempt_schedule(void) |
2787 | { | 2793 | { |
2788 | /* | 2794 | /* |
2789 | * If there is a non-zero preempt_count or interrupts are disabled, | 2795 | * If there is a non-zero preempt_count or interrupts are disabled, |
@@ -2813,7 +2819,7 @@ EXPORT_SYMBOL(preempt_schedule); | |||
2813 | * Note, that this is called and return with irqs disabled. This will | 2819 | * Note, that this is called and return with irqs disabled. This will |
2814 | * protect us against recursive calling from irq. | 2820 | * protect us against recursive calling from irq. |
2815 | */ | 2821 | */ |
2816 | asmlinkage void __sched preempt_schedule_irq(void) | 2822 | asmlinkage __visible void __sched preempt_schedule_irq(void) |
2817 | { | 2823 | { |
2818 | enum ctx_state prev_state; | 2824 | enum ctx_state prev_state; |
2819 | 2825 | ||
@@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) | |||
3124 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); | 3130 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); |
3125 | dl_se->dl_throttled = 0; | 3131 | dl_se->dl_throttled = 0; |
3126 | dl_se->dl_new = 1; | 3132 | dl_se->dl_new = 1; |
3133 | dl_se->dl_yielded = 0; | ||
3127 | } | 3134 | } |
3128 | 3135 | ||
3129 | static void __setscheduler_params(struct task_struct *p, | 3136 | static void __setscheduler_params(struct task_struct *p, |
@@ -3188,17 +3195,40 @@ __getparam_dl(struct task_struct *p, struct sched_attr *attr) | |||
3188 | * We ask for the deadline not being zero, and greater or equal | 3195 | * We ask for the deadline not being zero, and greater or equal |
3189 | * than the runtime, as well as the period of being zero or | 3196 | * than the runtime, as well as the period of being zero or |
3190 | * greater than deadline. Furthermore, we have to be sure that | 3197 | * greater than deadline. Furthermore, we have to be sure that |
3191 | * user parameters are above the internal resolution (1us); we | 3198 | * user parameters are above the internal resolution of 1us (we |
3192 | * check sched_runtime only since it is always the smaller one. | 3199 | * check sched_runtime only since it is always the smaller one) and |
3200 | * below 2^63 ns (we have to check both sched_deadline and | ||
3201 | * sched_period, as the latter can be zero). | ||
3193 | */ | 3202 | */ |
3194 | static bool | 3203 | static bool |
3195 | __checkparam_dl(const struct sched_attr *attr) | 3204 | __checkparam_dl(const struct sched_attr *attr) |
3196 | { | 3205 | { |
3197 | return attr && attr->sched_deadline != 0 && | 3206 | /* deadline != 0 */ |
3198 | (attr->sched_period == 0 || | 3207 | if (attr->sched_deadline == 0) |
3199 | (s64)(attr->sched_period - attr->sched_deadline) >= 0) && | 3208 | return false; |
3200 | (s64)(attr->sched_deadline - attr->sched_runtime ) >= 0 && | 3209 | |
3201 | attr->sched_runtime >= (2 << (DL_SCALE - 1)); | 3210 | /* |
3211 | * Since we truncate DL_SCALE bits, make sure we're at least | ||
3212 | * that big. | ||
3213 | */ | ||
3214 | if (attr->sched_runtime < (1ULL << DL_SCALE)) | ||
3215 | return false; | ||
3216 | |||
3217 | /* | ||
3218 | * Since we use the MSB for wrap-around and sign issues, make | ||
3219 | * sure it's not set (mind that period can be equal to zero). | ||
3220 | */ | ||
3221 | if (attr->sched_deadline & (1ULL << 63) || | ||
3222 | attr->sched_period & (1ULL << 63)) | ||
3223 | return false; | ||
3224 | |||
3225 | /* runtime <= deadline <= period (if period != 0) */ | ||
3226 | if ((attr->sched_period != 0 && | ||
3227 | attr->sched_period < attr->sched_deadline) || | ||
3228 | attr->sched_deadline < attr->sched_runtime) | ||
3229 | return false; | ||
3230 | |||
3231 | return true; | ||
3202 | } | 3232 | } |
3203 | 3233 | ||
3204 | /* | 3234 | /* |
@@ -3639,6 +3669,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | |||
3639 | * sys_sched_setattr - same as above, but with extended sched_attr | 3669 | * sys_sched_setattr - same as above, but with extended sched_attr |
3640 | * @pid: the pid in question. | 3670 | * @pid: the pid in question. |
3641 | * @uattr: structure containing the extended parameters. | 3671 | * @uattr: structure containing the extended parameters. |
3672 | * @flags: for future extension. | ||
3642 | */ | 3673 | */ |
3643 | SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, | 3674 | SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, |
3644 | unsigned int, flags) | 3675 | unsigned int, flags) |
@@ -3650,8 +3681,12 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, | |||
3650 | if (!uattr || pid < 0 || flags) | 3681 | if (!uattr || pid < 0 || flags) |
3651 | return -EINVAL; | 3682 | return -EINVAL; |
3652 | 3683 | ||
3653 | if (sched_copy_attr(uattr, &attr)) | 3684 | retval = sched_copy_attr(uattr, &attr); |
3654 | return -EFAULT; | 3685 | if (retval) |
3686 | return retval; | ||
3687 | |||
3688 | if (attr.sched_policy < 0) | ||
3689 | return -EINVAL; | ||
3655 | 3690 | ||
3656 | rcu_read_lock(); | 3691 | rcu_read_lock(); |
3657 | retval = -ESRCH; | 3692 | retval = -ESRCH; |
@@ -3701,7 +3736,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | |||
3701 | */ | 3736 | */ |
3702 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | 3737 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
3703 | { | 3738 | { |
3704 | struct sched_param lp; | 3739 | struct sched_param lp = { .sched_priority = 0 }; |
3705 | struct task_struct *p; | 3740 | struct task_struct *p; |
3706 | int retval; | 3741 | int retval; |
3707 | 3742 | ||
@@ -3718,11 +3753,8 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | |||
3718 | if (retval) | 3753 | if (retval) |
3719 | goto out_unlock; | 3754 | goto out_unlock; |
3720 | 3755 | ||
3721 | if (task_has_dl_policy(p)) { | 3756 | if (task_has_rt_policy(p)) |
3722 | retval = -EINVAL; | 3757 | lp.sched_priority = p->rt_priority; |
3723 | goto out_unlock; | ||
3724 | } | ||
3725 | lp.sched_priority = p->rt_priority; | ||
3726 | rcu_read_unlock(); | 3758 | rcu_read_unlock(); |
3727 | 3759 | ||
3728 | /* | 3760 | /* |
@@ -3783,6 +3815,7 @@ err_size: | |||
3783 | * @pid: the pid in question. | 3815 | * @pid: the pid in question. |
3784 | * @uattr: structure containing the extended parameters. | 3816 | * @uattr: structure containing the extended parameters. |
3785 | * @size: sizeof(attr) for fwd/bwd comp. | 3817 | * @size: sizeof(attr) for fwd/bwd comp. |
3818 | * @flags: for future extension. | ||
3786 | */ | 3819 | */ |
3787 | SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, | 3820 | SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, |
3788 | unsigned int, size, unsigned int, flags) | 3821 | unsigned int, size, unsigned int, flags) |
@@ -5043,7 +5076,6 @@ static int sched_cpu_active(struct notifier_block *nfb, | |||
5043 | unsigned long action, void *hcpu) | 5076 | unsigned long action, void *hcpu) |
5044 | { | 5077 | { |
5045 | switch (action & ~CPU_TASKS_FROZEN) { | 5078 | switch (action & ~CPU_TASKS_FROZEN) { |
5046 | case CPU_STARTING: | ||
5047 | case CPU_DOWN_FAILED: | 5079 | case CPU_DOWN_FAILED: |
5048 | set_cpu_active((long)hcpu, true); | 5080 | set_cpu_active((long)hcpu, true); |
5049 | return NOTIFY_OK; | 5081 | return NOTIFY_OK; |
@@ -6017,6 +6049,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu) | |||
6017 | , | 6049 | , |
6018 | .last_balance = jiffies, | 6050 | .last_balance = jiffies, |
6019 | .balance_interval = sd_weight, | 6051 | .balance_interval = sd_weight, |
6052 | .max_newidle_lb_cost = 0, | ||
6053 | .next_decay_max_lb_cost = jiffies, | ||
6020 | }; | 6054 | }; |
6021 | SD_INIT_NAME(sd, NUMA); | 6055 | SD_INIT_NAME(sd, NUMA); |
6022 | sd->private = &tl->data; | 6056 | sd->private = &tl->data; |