aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/cpuset.c41
1 files changed, 15 insertions, 26 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 918bee9dc7a2..6868c1e78917 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -752,7 +752,7 @@ static int update_cpumask(struct cpuset *cs, char *buf)
752 trialcs = *cs; 752 trialcs = *cs;
753 753
754 /* 754 /*
755 * An empty cpus_allowed is ok if there are no tasks in the cpuset. 755 * An empty cpus_allowed is ok only if the cpuset has no tasks.
756 * Since cpulist_parse() fails on an empty mask, we special case 756 * Since cpulist_parse() fails on an empty mask, we special case
757 * that parsing. The validate_change() call ensures that cpusets 757 * that parsing. The validate_change() call ensures that cpusets
758 * with tasks have cpus. 758 * with tasks have cpus.
@@ -809,7 +809,7 @@ static int update_cpumask(struct cpuset *cs, char *buf)
809 * so that the migration code can allocate pages on these nodes. 809 * so that the migration code can allocate pages on these nodes.
810 * 810 *
811 * Call holding cgroup_mutex, so current's cpuset won't change 811 * Call holding cgroup_mutex, so current's cpuset won't change
812 * during this call, as cgroup_mutex holds off any attach_task() 812 * during this call, as manage_mutex holds off any cpuset_attach()
813 * calls. Therefore we don't need to take task_lock around the 813 * calls. Therefore we don't need to take task_lock around the
814 * call to guarantee_online_mems(), as we know no one is changing 814 * call to guarantee_online_mems(), as we know no one is changing
815 * our task's cpuset. 815 * our task's cpuset.
@@ -1661,8 +1661,8 @@ void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan)
1661 * @from: cpuset in which the tasks currently reside 1661 * @from: cpuset in which the tasks currently reside
1662 * @to: cpuset to which the tasks will be moved 1662 * @to: cpuset to which the tasks will be moved
1663 * 1663 *
1664 * Called with manage_sem held 1664 * Called with cgroup_mutex held
1665 * callback_mutex must not be held, as attach_task() will take it. 1665 * callback_mutex must not be held, as cpuset_attach() will take it.
1666 * 1666 *
1667 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, 1667 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1668 * calling callback functions for each. 1668 * calling callback functions for each.
@@ -1689,18 +1689,18 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1689 * last CPU or node from a cpuset, then move the tasks in the empty 1689 * last CPU or node from a cpuset, then move the tasks in the empty
1690 * cpuset to its next-highest non-empty parent. 1690 * cpuset to its next-highest non-empty parent.
1691 * 1691 *
1692 * The parent cpuset has some superset of the 'mems' nodes that the 1692 * Called with cgroup_mutex held
1693 * newly empty cpuset held, so no migration of memory is necessary. 1693 * callback_mutex must not be held, as cpuset_attach() will take it.
1694 *
1695 * Called with both manage_sem and callback_sem held
1696 */ 1694 */
1697static void remove_tasks_in_empty_cpuset(struct cpuset *cs) 1695static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1698{ 1696{
1699 struct cpuset *parent; 1697 struct cpuset *parent;
1700 1698
1701 /* the cgroup's css_sets list is in use if there are tasks 1699 /*
1702 in the cpuset; the list is empty if there are none; 1700 * The cgroup's css_sets list is in use if there are tasks
1703 the cs->css.refcnt seems always 0 */ 1701 * in the cpuset; the list is empty if there are none;
1702 * the cs->css.refcnt seems always 0.
1703 */
1704 if (list_empty(&cs->css.cgroup->css_sets)) 1704 if (list_empty(&cs->css.cgroup->css_sets))
1705 return; 1705 return;
1706 1706
@@ -1709,14 +1709,8 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1709 * has online cpus, so can't be empty). 1709 * has online cpus, so can't be empty).
1710 */ 1710 */
1711 parent = cs->parent; 1711 parent = cs->parent;
1712 while (cpus_empty(parent->cpus_allowed)) { 1712 while (cpus_empty(parent->cpus_allowed))
1713 /*
1714 * this empty cpuset should now be considered to
1715 * have been used, and therefore eligible for
1716 * release when empty (if it is notify_on_release)
1717 */
1718 parent = parent->parent; 1713 parent = parent->parent;
1719 }
1720 1714
1721 move_member_tasks_to_cpuset(cs, parent); 1715 move_member_tasks_to_cpuset(cs, parent);
1722} 1716}
@@ -1725,10 +1719,6 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1725 * Walk the specified cpuset subtree and look for empty cpusets. 1719 * Walk the specified cpuset subtree and look for empty cpusets.
1726 * The tasks of such cpuset must be moved to a parent cpuset. 1720 * The tasks of such cpuset must be moved to a parent cpuset.
1727 * 1721 *
1728 * Note that such a notify_on_release cpuset must have had, at some time,
1729 * member tasks or cpuset descendants and cpus and memory, before it can
1730 * be a candidate for release.
1731 *
1732 * Called with cgroup_mutex held. We take callback_mutex to modify 1722 * Called with cgroup_mutex held. We take callback_mutex to modify
1733 * cpus_allowed and mems_allowed. 1723 * cpus_allowed and mems_allowed.
1734 * 1724 *
@@ -1764,8 +1754,8 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1764 cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map); 1754 cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
1765 nodes_and(cp->mems_allowed, cp->mems_allowed, 1755 nodes_and(cp->mems_allowed, cp->mems_allowed,
1766 node_states[N_HIGH_MEMORY]); 1756 node_states[N_HIGH_MEMORY]);
1767 if ((cpus_empty(cp->cpus_allowed) || 1757 if (cpus_empty(cp->cpus_allowed) ||
1768 nodes_empty(cp->mems_allowed))) { 1758 nodes_empty(cp->mems_allowed)) {
1769 /* Move tasks from the empty cpuset to a parent */ 1759 /* Move tasks from the empty cpuset to a parent */
1770 mutex_unlock(&callback_mutex); 1760 mutex_unlock(&callback_mutex);
1771 remove_tasks_in_empty_cpuset(cp); 1761 remove_tasks_in_empty_cpuset(cp);
@@ -1773,7 +1763,6 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1773 } 1763 }
1774 } 1764 }
1775 mutex_unlock(&callback_mutex); 1765 mutex_unlock(&callback_mutex);
1776 return;
1777} 1766}
1778 1767
1779/* 1768/*
@@ -2207,7 +2196,7 @@ void __cpuset_memory_pressure_bump(void)
2207 * - Used for /proc/<pid>/cpuset. 2196 * - Used for /proc/<pid>/cpuset.
2208 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 2197 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2209 * doesn't really matter if tsk->cpuset changes after we read it, 2198 * doesn't really matter if tsk->cpuset changes after we read it,
2210 * and we take cgroup_mutex, keeping attach_task() from changing it 2199 * and we take cgroup_mutex, keeping cpuset_attach() from changing it
2211 * anyway. 2200 * anyway.
2212 */ 2201 */
2213static int proc_cpuset_show(struct seq_file *m, void *unused_v) 2202static int proc_cpuset_show(struct seq_file *m, void *unused_v)