aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/deadline.c10
-rw-r--r--kernel/sched/fair.c15
-rw-r--r--kernel/sched/sched.h2
4 files changed, 20 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0a7251678982..084d17f89139 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3685,7 +3685,7 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3685 if (retval) 3685 if (retval)
3686 return retval; 3686 return retval;
3687 3687
3688 if (attr.sched_policy < 0) 3688 if ((int)attr.sched_policy < 0)
3689 return -EINVAL; 3689 return -EINVAL;
3690 3690
3691 rcu_read_lock(); 3691 rcu_read_lock();
@@ -7751,8 +7751,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7751 /* restart the period timer (if active) to handle new period expiry */ 7751 /* restart the period timer (if active) to handle new period expiry */
7752 if (runtime_enabled && cfs_b->timer_active) { 7752 if (runtime_enabled && cfs_b->timer_active) {
7753 /* force a reprogram */ 7753 /* force a reprogram */
7754 cfs_b->timer_active = 0; 7754 __start_cfs_bandwidth(cfs_b, true);
7755 __start_cfs_bandwidth(cfs_b);
7756 } 7755 }
7757 raw_spin_unlock_irq(&cfs_b->lock); 7756 raw_spin_unlock_irq(&cfs_b->lock);
7758 7757
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 800e99b99075..14bc348ba3b4 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -513,9 +513,17 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
513 struct sched_dl_entity, 513 struct sched_dl_entity,
514 dl_timer); 514 dl_timer);
515 struct task_struct *p = dl_task_of(dl_se); 515 struct task_struct *p = dl_task_of(dl_se);
516 struct rq *rq = task_rq(p); 516 struct rq *rq;
517again:
518 rq = task_rq(p);
517 raw_spin_lock(&rq->lock); 519 raw_spin_lock(&rq->lock);
518 520
521 if (rq != task_rq(p)) {
522 /* Task was moved, retrying. */
523 raw_spin_unlock(&rq->lock);
524 goto again;
525 }
526
519 /* 527 /*
520 * We need to take care of a possible races here. In fact, the 528 * We need to take care of a possible races here. In fact, the
521 * task might have changed its scheduling policy to something 529 * task might have changed its scheduling policy to something
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0fdb96de81a5..8cbe2d2c16de 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1707,18 +1707,19 @@ no_join:
1707void task_numa_free(struct task_struct *p) 1707void task_numa_free(struct task_struct *p)
1708{ 1708{
1709 struct numa_group *grp = p->numa_group; 1709 struct numa_group *grp = p->numa_group;
1710 int i;
1711 void *numa_faults = p->numa_faults_memory; 1710 void *numa_faults = p->numa_faults_memory;
1711 unsigned long flags;
1712 int i;
1712 1713
1713 if (grp) { 1714 if (grp) {
1714 spin_lock_irq(&grp->lock); 1715 spin_lock_irqsave(&grp->lock, flags);
1715 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 1716 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1716 grp->faults[i] -= p->numa_faults_memory[i]; 1717 grp->faults[i] -= p->numa_faults_memory[i];
1717 grp->total_faults -= p->total_numa_faults; 1718 grp->total_faults -= p->total_numa_faults;
1718 1719
1719 list_del(&p->numa_entry); 1720 list_del(&p->numa_entry);
1720 grp->nr_tasks--; 1721 grp->nr_tasks--;
1721 spin_unlock_irq(&grp->lock); 1722 spin_unlock_irqrestore(&grp->lock, flags);
1722 rcu_assign_pointer(p->numa_group, NULL); 1723 rcu_assign_pointer(p->numa_group, NULL);
1723 put_numa_group(grp); 1724 put_numa_group(grp);
1724 } 1725 }
@@ -3129,7 +3130,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3129 */ 3130 */
3130 if (!cfs_b->timer_active) { 3131 if (!cfs_b->timer_active) {
3131 __refill_cfs_bandwidth_runtime(cfs_b); 3132 __refill_cfs_bandwidth_runtime(cfs_b);
3132 __start_cfs_bandwidth(cfs_b); 3133 __start_cfs_bandwidth(cfs_b, false);
3133 } 3134 }
3134 3135
3135 if (cfs_b->runtime > 0) { 3136 if (cfs_b->runtime > 0) {
@@ -3308,7 +3309,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3308 raw_spin_lock(&cfs_b->lock); 3309 raw_spin_lock(&cfs_b->lock);
3309 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 3310 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3310 if (!cfs_b->timer_active) 3311 if (!cfs_b->timer_active)
3311 __start_cfs_bandwidth(cfs_b); 3312 __start_cfs_bandwidth(cfs_b, false);
3312 raw_spin_unlock(&cfs_b->lock); 3313 raw_spin_unlock(&cfs_b->lock);
3313} 3314}
3314 3315
@@ -3690,7 +3691,7 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3690} 3691}
3691 3692
3692/* requires cfs_b->lock, may release to reprogram timer */ 3693/* requires cfs_b->lock, may release to reprogram timer */
3693void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 3694void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
3694{ 3695{
3695 /* 3696 /*
3696 * The timer may be active because we're trying to set a new bandwidth 3697 * The timer may be active because we're trying to set a new bandwidth
@@ -3705,7 +3706,7 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3705 cpu_relax(); 3706 cpu_relax();
3706 raw_spin_lock(&cfs_b->lock); 3707 raw_spin_lock(&cfs_b->lock);
3707 /* if someone else restarted the timer then we're done */ 3708 /* if someone else restarted the timer then we're done */
3708 if (cfs_b->timer_active) 3709 if (!force && cfs_b->timer_active)
3709 return; 3710 return;
3710 } 3711 }
3711 3712
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 456e492a3dca..369b4d663c42 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -278,7 +278,7 @@ extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
278extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 278extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
279 279
280extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 280extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
281extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 281extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force);
282extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 282extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
283 283
284extern void free_rt_sched_group(struct task_group *tg); 284extern void free_rt_sched_group(struct task_group *tg);