aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c28
-rw-r--r--kernel/sched/cpudeadline.c6
-rw-r--r--kernel/sched/deadline.c20
-rw-r--r--kernel/sched/fair.c10
-rw-r--r--kernel/sched/rt.c8
-rw-r--r--kernel/sched/sched.h1
6 files changed, 42 insertions, 31 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b46131ef6aab..6edbef296ece 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1952,7 +1952,7 @@ static int dl_overflow(struct task_struct *p, int policy,
1952{ 1952{
1953 1953
1954 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1954 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1955 u64 period = attr->sched_period; 1955 u64 period = attr->sched_period ?: attr->sched_deadline;
1956 u64 runtime = attr->sched_runtime; 1956 u64 runtime = attr->sched_runtime;
1957 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 1957 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
1958 int cpus, err = -1; 1958 int cpus, err = -1;
@@ -3661,13 +3661,14 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3661 * @pid: the pid in question. 3661 * @pid: the pid in question.
3662 * @uattr: structure containing the extended parameters. 3662 * @uattr: structure containing the extended parameters.
3663 */ 3663 */
3664SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr) 3664SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3665 unsigned int, flags)
3665{ 3666{
3666 struct sched_attr attr; 3667 struct sched_attr attr;
3667 struct task_struct *p; 3668 struct task_struct *p;
3668 int retval; 3669 int retval;
3669 3670
3670 if (!uattr || pid < 0) 3671 if (!uattr || pid < 0 || flags)
3671 return -EINVAL; 3672 return -EINVAL;
3672 3673
3673 if (sched_copy_attr(uattr, &attr)) 3674 if (sched_copy_attr(uattr, &attr))
@@ -3786,7 +3787,7 @@ static int sched_read_attr(struct sched_attr __user *uattr,
3786 attr->size = usize; 3787 attr->size = usize;
3787 } 3788 }
3788 3789
3789 ret = copy_to_user(uattr, attr, usize); 3790 ret = copy_to_user(uattr, attr, attr->size);
3790 if (ret) 3791 if (ret)
3791 return -EFAULT; 3792 return -EFAULT;
3792 3793
@@ -3804,8 +3805,8 @@ err_size:
3804 * @uattr: structure containing the extended parameters. 3805 * @uattr: structure containing the extended parameters.
3805 * @size: sizeof(attr) for fwd/bwd comp. 3806 * @size: sizeof(attr) for fwd/bwd comp.
3806 */ 3807 */
3807SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3808SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3808 unsigned int, size) 3809 unsigned int, size, unsigned int, flags)
3809{ 3810{
3810 struct sched_attr attr = { 3811 struct sched_attr attr = {
3811 .size = sizeof(struct sched_attr), 3812 .size = sizeof(struct sched_attr),
@@ -3814,7 +3815,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3814 int retval; 3815 int retval;
3815 3816
3816 if (!uattr || pid < 0 || size > PAGE_SIZE || 3817 if (!uattr || pid < 0 || size > PAGE_SIZE ||
3817 size < SCHED_ATTR_SIZE_VER0) 3818 size < SCHED_ATTR_SIZE_VER0 || flags)
3818 return -EINVAL; 3819 return -EINVAL;
3819 3820
3820 rcu_read_lock(); 3821 rcu_read_lock();
@@ -7422,6 +7423,7 @@ static int sched_dl_global_constraints(void)
7422 u64 period = global_rt_period(); 7423 u64 period = global_rt_period();
7423 u64 new_bw = to_ratio(period, runtime); 7424 u64 new_bw = to_ratio(period, runtime);
7424 int cpu, ret = 0; 7425 int cpu, ret = 0;
7426 unsigned long flags;
7425 7427
7426 /* 7428 /*
7427 * Here we want to check the bandwidth not being set to some 7429 * Here we want to check the bandwidth not being set to some
@@ -7435,10 +7437,10 @@ static int sched_dl_global_constraints(void)
7435 for_each_possible_cpu(cpu) { 7437 for_each_possible_cpu(cpu) {
7436 struct dl_bw *dl_b = dl_bw_of(cpu); 7438 struct dl_bw *dl_b = dl_bw_of(cpu);
7437 7439
7438 raw_spin_lock(&dl_b->lock); 7440 raw_spin_lock_irqsave(&dl_b->lock, flags);
7439 if (new_bw < dl_b->total_bw) 7441 if (new_bw < dl_b->total_bw)
7440 ret = -EBUSY; 7442 ret = -EBUSY;
7441 raw_spin_unlock(&dl_b->lock); 7443 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7442 7444
7443 if (ret) 7445 if (ret)
7444 break; 7446 break;
@@ -7451,6 +7453,7 @@ static void sched_dl_do_global(void)
7451{ 7453{
7452 u64 new_bw = -1; 7454 u64 new_bw = -1;
7453 int cpu; 7455 int cpu;
7456 unsigned long flags;
7454 7457
7455 def_dl_bandwidth.dl_period = global_rt_period(); 7458 def_dl_bandwidth.dl_period = global_rt_period();
7456 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 7459 def_dl_bandwidth.dl_runtime = global_rt_runtime();
@@ -7464,9 +7467,9 @@ static void sched_dl_do_global(void)
7464 for_each_possible_cpu(cpu) { 7467 for_each_possible_cpu(cpu) {
7465 struct dl_bw *dl_b = dl_bw_of(cpu); 7468 struct dl_bw *dl_b = dl_bw_of(cpu);
7466 7469
7467 raw_spin_lock(&dl_b->lock); 7470 raw_spin_lock_irqsave(&dl_b->lock, flags);
7468 dl_b->bw = new_bw; 7471 dl_b->bw = new_bw;
7469 raw_spin_unlock(&dl_b->lock); 7472 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7470 } 7473 }
7471} 7474}
7472 7475
@@ -7475,7 +7478,8 @@ static int sched_rt_global_validate(void)
7475 if (sysctl_sched_rt_period <= 0) 7478 if (sysctl_sched_rt_period <= 0)
7476 return -EINVAL; 7479 return -EINVAL;
7477 7480
7478 if (sysctl_sched_rt_runtime > sysctl_sched_rt_period) 7481 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
7482 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
7479 return -EINVAL; 7483 return -EINVAL;
7480 7484
7481 return 0; 7485 return 0;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 045fc74e3f09..5b9bb42b2d47 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)
70 70
71static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) 71static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
72{ 72{
73 WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID); 73 WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
74 74
75 if (dl_time_before(new_dl, cp->elements[idx].dl)) { 75 if (dl_time_before(new_dl, cp->elements[idx].dl)) {
76 cp->elements[idx].dl = new_dl; 76 cp->elements[idx].dl = new_dl;
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
117 } 117 }
118 118
119out: 119out:
120 WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1); 120 WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
121 121
122 return best_cpu; 122 return best_cpu;
123} 123}
@@ -137,7 +137,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
137 int old_idx, new_cpu; 137 int old_idx, new_cpu;
138 unsigned long flags; 138 unsigned long flags;
139 139
140 WARN_ON(cpu > num_present_cpus()); 140 WARN_ON(!cpu_present(cpu));
141 141
142 raw_spin_lock_irqsave(&cp->lock, flags); 142 raw_spin_lock_irqsave(&cp->lock, flags);
143 old_idx = cp->cpu_to_idx[cpu]; 143 old_idx = cp->cpu_to_idx[cpu];
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0dd5e0971a07..6e79b3faa4cd 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq)
121 121
122static void update_dl_migration(struct dl_rq *dl_rq) 122static void update_dl_migration(struct dl_rq *dl_rq)
123{ 123{
124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { 124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
125 if (!dl_rq->overloaded) { 125 if (!dl_rq->overloaded) {
126 dl_set_overload(rq_of_dl_rq(dl_rq)); 126 dl_set_overload(rq_of_dl_rq(dl_rq));
127 dl_rq->overloaded = 1; 127 dl_rq->overloaded = 1;
@@ -135,9 +135,7 @@ static void update_dl_migration(struct dl_rq *dl_rq)
135static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 135static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
136{ 136{
137 struct task_struct *p = dl_task_of(dl_se); 137 struct task_struct *p = dl_task_of(dl_se);
138 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
139 138
140 dl_rq->dl_nr_total++;
141 if (p->nr_cpus_allowed > 1) 139 if (p->nr_cpus_allowed > 1)
142 dl_rq->dl_nr_migratory++; 140 dl_rq->dl_nr_migratory++;
143 141
@@ -147,9 +145,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
147static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 145static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
148{ 146{
149 struct task_struct *p = dl_task_of(dl_se); 147 struct task_struct *p = dl_task_of(dl_se);
150 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
151 148
152 dl_rq->dl_nr_total--;
153 if (p->nr_cpus_allowed > 1) 149 if (p->nr_cpus_allowed > 1)
154 dl_rq->dl_nr_migratory--; 150 dl_rq->dl_nr_migratory--;
155 151
@@ -566,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
566 return 1; 562 return 1;
567} 563}
568 564
565extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
566
569/* 567/*
570 * Update the current task's runtime statistics (provided it is still 568 * Update the current task's runtime statistics (provided it is still
571 * a -deadline task and has not been removed from the dl_rq). 569 * a -deadline task and has not been removed from the dl_rq).
@@ -629,11 +627,13 @@ static void update_curr_dl(struct rq *rq)
629 struct rt_rq *rt_rq = &rq->rt; 627 struct rt_rq *rt_rq = &rq->rt;
630 628
631 raw_spin_lock(&rt_rq->rt_runtime_lock); 629 raw_spin_lock(&rt_rq->rt_runtime_lock);
632 rt_rq->rt_time += delta_exec;
633 /* 630 /*
634 * We'll let actual RT tasks worry about the overflow here, we 631 * We'll let actual RT tasks worry about the overflow here, we
635 * have our own CBS to keep us inline -- see above. 632 * have our own CBS to keep us inline; only account when RT
633 * bandwidth is relevant.
636 */ 634 */
635 if (sched_rt_bandwidth_account(rt_rq))
636 rt_rq->rt_time += delta_exec;
637 raw_spin_unlock(&rt_rq->rt_runtime_lock); 637 raw_spin_unlock(&rt_rq->rt_runtime_lock);
638 } 638 }
639} 639}
@@ -717,6 +717,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
717 717
718 WARN_ON(!dl_prio(prio)); 718 WARN_ON(!dl_prio(prio));
719 dl_rq->dl_nr_running++; 719 dl_rq->dl_nr_running++;
720 inc_nr_running(rq_of_dl_rq(dl_rq));
720 721
721 inc_dl_deadline(dl_rq, deadline); 722 inc_dl_deadline(dl_rq, deadline);
722 inc_dl_migration(dl_se, dl_rq); 723 inc_dl_migration(dl_se, dl_rq);
@@ -730,6 +731,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
730 WARN_ON(!dl_prio(prio)); 731 WARN_ON(!dl_prio(prio));
731 WARN_ON(!dl_rq->dl_nr_running); 732 WARN_ON(!dl_rq->dl_nr_running);
732 dl_rq->dl_nr_running--; 733 dl_rq->dl_nr_running--;
734 dec_nr_running(rq_of_dl_rq(dl_rq));
733 735
734 dec_dl_deadline(dl_rq, dl_se->deadline); 736 dec_dl_deadline(dl_rq, dl_se->deadline);
735 dec_dl_migration(dl_se, dl_rq); 737 dec_dl_migration(dl_se, dl_rq);
@@ -836,8 +838,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
836 838
837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 839 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
838 enqueue_pushable_dl_task(rq, p); 840 enqueue_pushable_dl_task(rq, p);
839
840 inc_nr_running(rq);
841} 841}
842 842
843static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 843static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
@@ -850,8 +850,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
850{ 850{
851 update_curr_dl(rq); 851 update_curr_dl(rq);
852 __dequeue_task_dl(rq, p, flags); 852 __dequeue_task_dl(rq, p, flags);
853
854 dec_nr_running(rq);
855} 853}
856 854
857/* 855/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 966cc2bfcb77..9b4c4f320130 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1757,6 +1757,8 @@ void task_numa_work(struct callback_head *work)
1757 start = end; 1757 start = end;
1758 if (pages <= 0) 1758 if (pages <= 0)
1759 goto out; 1759 goto out;
1760
1761 cond_resched();
1760 } while (end != vma->vm_end); 1762 } while (end != vma->vm_end);
1761 } 1763 }
1762 1764
@@ -6999,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
6999 struct cfs_rq *cfs_rq = cfs_rq_of(se); 7001 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7000 7002
7001 /* 7003 /*
7002 * Ensure the task's vruntime is normalized, so that when its 7004 * Ensure the task's vruntime is normalized, so that when it's
7003 * switched back to the fair class the enqueue_entity(.flags=0) will 7005 * switched back to the fair class the enqueue_entity(.flags=0) will
7004 * do the right thing. 7006 * do the right thing.
7005 * 7007 *
7006 * If it was on_rq, then the dequeue_entity(.flags=0) will already 7008 * If it's on_rq, then the dequeue_entity(.flags=0) will already
7007 * have normalized the vruntime, if it was !on_rq, then only when 7009 * have normalized the vruntime, if it's !on_rq, then only when
7008 * the task is sleeping will it still have non-normalized vruntime. 7010 * the task is sleeping will it still have non-normalized vruntime.
7009 */ 7011 */
7010 if (!se->on_rq && p->state != TASK_RUNNING) { 7012 if (!p->on_rq && p->state != TASK_RUNNING) {
7011 /* 7013 /*
7012 * Fix up our vruntime so that the current sleep doesn't 7014 * Fix up our vruntime so that the current sleep doesn't
7013 * cause 'unlimited' sleep bonus. 7015 * cause 'unlimited' sleep bonus.
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a2740b775b45..1999021042c7 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
538 538
539#endif /* CONFIG_RT_GROUP_SCHED */ 539#endif /* CONFIG_RT_GROUP_SCHED */
540 540
541bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
542{
543 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
544
545 return (hrtimer_active(&rt_b->rt_period_timer) ||
546 rt_rq->rt_time < rt_b->rt_runtime);
547}
548
541#ifdef CONFIG_SMP 549#ifdef CONFIG_SMP
542/* 550/*
543 * We ran out of runtime, see if we can borrow some from our neighbours. 551 * We ran out of runtime, see if we can borrow some from our neighbours.
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c2119fd20f8b..f964add50f38 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -462,7 +462,6 @@ struct dl_rq {
462 } earliest_dl; 462 } earliest_dl;
463 463
464 unsigned long dl_nr_migratory; 464 unsigned long dl_nr_migratory;
465 unsigned long dl_nr_total;
466 int overloaded; 465 int overloaded;
467 466
468 /* 467 /*