aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7e9bd0b1fa9e..0fdb96de81a5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1497,7 +1497,7 @@ static void task_numa_placement(struct task_struct *p)
1497 /* If the task is part of a group prevent parallel updates to group stats */ 1497 /* If the task is part of a group prevent parallel updates to group stats */
1498 if (p->numa_group) { 1498 if (p->numa_group) {
1499 group_lock = &p->numa_group->lock; 1499 group_lock = &p->numa_group->lock;
1500 spin_lock(group_lock); 1500 spin_lock_irq(group_lock);
1501 } 1501 }
1502 1502
1503 /* Find the node with the highest number of faults */ 1503 /* Find the node with the highest number of faults */
@@ -1572,7 +1572,7 @@ static void task_numa_placement(struct task_struct *p)
1572 } 1572 }
1573 } 1573 }
1574 1574
1575 spin_unlock(group_lock); 1575 spin_unlock_irq(group_lock);
1576 } 1576 }
1577 1577
1578 /* Preferred node as the node with the most faults */ 1578 /* Preferred node as the node with the most faults */
@@ -1677,7 +1677,8 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1677 if (!join) 1677 if (!join)
1678 return; 1678 return;
1679 1679
1680 double_lock(&my_grp->lock, &grp->lock); 1680 BUG_ON(irqs_disabled());
1681 double_lock_irq(&my_grp->lock, &grp->lock);
1681 1682
1682 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { 1683 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
1683 my_grp->faults[i] -= p->numa_faults_memory[i]; 1684 my_grp->faults[i] -= p->numa_faults_memory[i];
@@ -1691,7 +1692,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1691 grp->nr_tasks++; 1692 grp->nr_tasks++;
1692 1693
1693 spin_unlock(&my_grp->lock); 1694 spin_unlock(&my_grp->lock);
1694 spin_unlock(&grp->lock); 1695 spin_unlock_irq(&grp->lock);
1695 1696
1696 rcu_assign_pointer(p->numa_group, grp); 1697 rcu_assign_pointer(p->numa_group, grp);
1697 1698
@@ -1710,14 +1711,14 @@ void task_numa_free(struct task_struct *p)
1710 void *numa_faults = p->numa_faults_memory; 1711 void *numa_faults = p->numa_faults_memory;
1711 1712
1712 if (grp) { 1713 if (grp) {
1713 spin_lock(&grp->lock); 1714 spin_lock_irq(&grp->lock);
1714 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 1715 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1715 grp->faults[i] -= p->numa_faults_memory[i]; 1716 grp->faults[i] -= p->numa_faults_memory[i];
1716 grp->total_faults -= p->total_numa_faults; 1717 grp->total_faults -= p->total_numa_faults;
1717 1718
1718 list_del(&p->numa_entry); 1719 list_del(&p->numa_entry);
1719 grp->nr_tasks--; 1720 grp->nr_tasks--;
1720 spin_unlock(&grp->lock); 1721 spin_unlock_irq(&grp->lock);
1721 rcu_assign_pointer(p->numa_group, NULL); 1722 rcu_assign_pointer(p->numa_group, NULL);
1722 put_numa_group(grp); 1723 put_numa_group(grp);
1723 } 1724 }
@@ -6652,6 +6653,7 @@ static int idle_balance(struct rq *this_rq)
6652 int this_cpu = this_rq->cpu; 6653 int this_cpu = this_rq->cpu;
6653 6654
6654 idle_enter_fair(this_rq); 6655 idle_enter_fair(this_rq);
6656
6655 /* 6657 /*
6656 * We must set idle_stamp _before_ calling idle_balance(), such that we 6658 * We must set idle_stamp _before_ calling idle_balance(), such that we
6657 * measure the duration of idle_balance() as idle time. 6659 * measure the duration of idle_balance() as idle time.
@@ -6704,14 +6706,16 @@ static int idle_balance(struct rq *this_rq)
6704 6706
6705 raw_spin_lock(&this_rq->lock); 6707 raw_spin_lock(&this_rq->lock);
6706 6708
6709 if (curr_cost > this_rq->max_idle_balance_cost)
6710 this_rq->max_idle_balance_cost = curr_cost;
6711
6707 /* 6712 /*
6708 * While browsing the domains, we released the rq lock. 6713 * While browsing the domains, we released the rq lock, a task could
6709 * A task could have be enqueued in the meantime 6714 * have been enqueued in the meantime. Since we're not going idle,
6715 * pretend we pulled a task.
6710 */ 6716 */
6711 if (this_rq->cfs.h_nr_running && !pulled_task) { 6717 if (this_rq->cfs.h_nr_running && !pulled_task)
6712 pulled_task = 1; 6718 pulled_task = 1;
6713 goto out;
6714 }
6715 6719
6716 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 6720 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6717 /* 6721 /*
@@ -6721,13 +6725,11 @@ static int idle_balance(struct rq *this_rq)
6721 this_rq->next_balance = next_balance; 6725 this_rq->next_balance = next_balance;
6722 } 6726 }
6723 6727
6724 if (curr_cost > this_rq->max_idle_balance_cost)
6725 this_rq->max_idle_balance_cost = curr_cost;
6726
6727out: 6728out:
6728 /* Is there a task of a high priority class? */ 6729 /* Is there a task of a high priority class? */
6729 if (this_rq->nr_running != this_rq->cfs.h_nr_running && 6730 if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
6730 (this_rq->dl.dl_nr_running || 6731 ((this_rq->stop && this_rq->stop->on_rq) ||
6732 this_rq->dl.dl_nr_running ||
6731 (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt)))) 6733 (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
6732 pulled_task = -1; 6734 pulled_task = -1;
6733 6735