aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c37
1 files changed, 17 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3eedd5260907..6b02276baaa6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -232,10 +232,10 @@ static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
232static struct sched_rt_entity *init_sched_rt_entity_p[NR_CPUS]; 232static struct sched_rt_entity *init_sched_rt_entity_p[NR_CPUS];
233static struct rt_rq *init_rt_rq_p[NR_CPUS]; 233static struct rt_rq *init_rt_rq_p[NR_CPUS];
234 234
235/* task_group_mutex serializes add/remove of task groups and also changes to 235/* task_group_lock serializes add/remove of task groups and also changes to
236 * a task group's cpu shares. 236 * a task group's cpu shares.
237 */ 237 */
238static DEFINE_MUTEX(task_group_mutex); 238static DEFINE_SPINLOCK(task_group_lock);
239 239
240/* doms_cur_mutex serializes access to doms_cur[] array */ 240/* doms_cur_mutex serializes access to doms_cur[] array */
241static DEFINE_MUTEX(doms_cur_mutex); 241static DEFINE_MUTEX(doms_cur_mutex);
@@ -295,16 +295,6 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
295 p->rt.parent = task_group(p)->rt_se[cpu]; 295 p->rt.parent = task_group(p)->rt_se[cpu];
296} 296}
297 297
298static inline void lock_task_group_list(void)
299{
300 mutex_lock(&task_group_mutex);
301}
302
303static inline void unlock_task_group_list(void)
304{
305 mutex_unlock(&task_group_mutex);
306}
307
308static inline void lock_doms_cur(void) 298static inline void lock_doms_cur(void)
309{ 299{
310 mutex_lock(&doms_cur_mutex); 300 mutex_lock(&doms_cur_mutex);
@@ -318,8 +308,6 @@ static inline void unlock_doms_cur(void)
318#else 308#else
319 309
320static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 310static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
321static inline void lock_task_group_list(void) { }
322static inline void unlock_task_group_list(void) { }
323static inline void lock_doms_cur(void) { } 311static inline void lock_doms_cur(void) { }
324static inline void unlock_doms_cur(void) { } 312static inline void unlock_doms_cur(void) { }
325 313
@@ -7571,6 +7559,7 @@ struct task_group *sched_create_group(void)
7571 struct rt_rq *rt_rq; 7559 struct rt_rq *rt_rq;
7572 struct sched_rt_entity *rt_se; 7560 struct sched_rt_entity *rt_se;
7573 struct rq *rq; 7561 struct rq *rq;
7562 unsigned long flags;
7574 int i; 7563 int i;
7575 7564
7576 tg = kzalloc(sizeof(*tg), GFP_KERNEL); 7565 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
@@ -7620,7 +7609,7 @@ struct task_group *sched_create_group(void)
7620 init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0); 7609 init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0);
7621 } 7610 }
7622 7611
7623 lock_task_group_list(); 7612 spin_lock_irqsave(&task_group_lock, flags);
7624 for_each_possible_cpu(i) { 7613 for_each_possible_cpu(i) {
7625 rq = cpu_rq(i); 7614 rq = cpu_rq(i);
7626 cfs_rq = tg->cfs_rq[i]; 7615 cfs_rq = tg->cfs_rq[i];
@@ -7629,7 +7618,7 @@ struct task_group *sched_create_group(void)
7629 list_add_rcu(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); 7618 list_add_rcu(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
7630 } 7619 }
7631 list_add_rcu(&tg->list, &task_groups); 7620 list_add_rcu(&tg->list, &task_groups);
7632 unlock_task_group_list(); 7621 spin_unlock_irqrestore(&task_group_lock, flags);
7633 7622
7634 return tg; 7623 return tg;
7635 7624
@@ -7650,9 +7639,10 @@ void sched_destroy_group(struct task_group *tg)
7650{ 7639{
7651 struct cfs_rq *cfs_rq = NULL; 7640 struct cfs_rq *cfs_rq = NULL;
7652 struct rt_rq *rt_rq = NULL; 7641 struct rt_rq *rt_rq = NULL;
7642 unsigned long flags;
7653 int i; 7643 int i;
7654 7644
7655 lock_task_group_list(); 7645 spin_lock_irqsave(&task_group_lock, flags);
7656 for_each_possible_cpu(i) { 7646 for_each_possible_cpu(i) {
7657 cfs_rq = tg->cfs_rq[i]; 7647 cfs_rq = tg->cfs_rq[i];
7658 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 7648 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
@@ -7660,7 +7650,7 @@ void sched_destroy_group(struct task_group *tg)
7660 list_del_rcu(&rt_rq->leaf_rt_rq_list); 7650 list_del_rcu(&rt_rq->leaf_rt_rq_list);
7661 } 7651 }
7662 list_del_rcu(&tg->list); 7652 list_del_rcu(&tg->list);
7663 unlock_task_group_list(); 7653 spin_unlock_irqrestore(&task_group_lock, flags);
7664 7654
7665 BUG_ON(!cfs_rq); 7655 BUG_ON(!cfs_rq);
7666 7656
@@ -7728,13 +7718,16 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
7728 } 7718 }
7729} 7719}
7730 7720
7721static DEFINE_MUTEX(shares_mutex);
7722
7731int sched_group_set_shares(struct task_group *tg, unsigned long shares) 7723int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7732{ 7724{
7733 int i; 7725 int i;
7734 struct cfs_rq *cfs_rq; 7726 struct cfs_rq *cfs_rq;
7735 struct rq *rq; 7727 struct rq *rq;
7728 unsigned long flags;
7736 7729
7737 lock_task_group_list(); 7730 mutex_lock(&shares_mutex);
7738 if (tg->shares == shares) 7731 if (tg->shares == shares)
7739 goto done; 7732 goto done;
7740 7733
@@ -7746,10 +7739,12 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7746 * load_balance_fair) from referring to this group first, 7739 * load_balance_fair) from referring to this group first,
7747 * by taking it off the rq->leaf_cfs_rq_list on each cpu. 7740 * by taking it off the rq->leaf_cfs_rq_list on each cpu.
7748 */ 7741 */
7742 spin_lock_irqsave(&task_group_lock, flags);
7749 for_each_possible_cpu(i) { 7743 for_each_possible_cpu(i) {
7750 cfs_rq = tg->cfs_rq[i]; 7744 cfs_rq = tg->cfs_rq[i];
7751 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 7745 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
7752 } 7746 }
7747 spin_unlock_irqrestore(&task_group_lock, flags);
7753 7748
7754 /* wait for any ongoing reference to this group to finish */ 7749 /* wait for any ongoing reference to this group to finish */
7755 synchronize_sched(); 7750 synchronize_sched();
@@ -7769,13 +7764,15 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7769 * Enable load balance activity on this group, by inserting it back on 7764 * Enable load balance activity on this group, by inserting it back on
7770 * each cpu's rq->leaf_cfs_rq_list. 7765 * each cpu's rq->leaf_cfs_rq_list.
7771 */ 7766 */
7767 spin_lock_irqsave(&task_group_lock, flags);
7772 for_each_possible_cpu(i) { 7768 for_each_possible_cpu(i) {
7773 rq = cpu_rq(i); 7769 rq = cpu_rq(i);
7774 cfs_rq = tg->cfs_rq[i]; 7770 cfs_rq = tg->cfs_rq[i];
7775 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); 7771 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
7776 } 7772 }
7773 spin_unlock_irqrestore(&task_group_lock, flags);
7777done: 7774done:
7778 unlock_task_group_list(); 7775 mutex_unlock(&shares_mutex);
7779 return 0; 7776 return 0;
7780} 7777}
7781 7778