aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2008-04-28 05:12:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:20 -0400
commit1d0d2680a01c4f9e292ec6d4714884da939053a1 (patch)
tree1377ed40ec15ffecc584b308a671be47b5145db3 /mm/mempolicy.c
parent65d66fc02ed9433b957588071b60425b12628e25 (diff)
mempolicy: move rebind functions
Move the mpol_rebind_{policy,task,mm}() functions after mpol_new() to avoid having to declare function prototypes. Cc: Paul Jackson <pj@sgi.com> Cc: Christoph Lameter <clameter@sgi.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c185
1 files changed, 91 insertions, 94 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ffd3be66b255..d44c524e5ae4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -110,9 +110,6 @@ struct mempolicy default_policy = {
110 .policy = MPOL_DEFAULT, 110 .policy = MPOL_DEFAULT,
111}; 111};
112 112
113static void mpol_rebind_policy(struct mempolicy *pol,
114 const nodemask_t *newmask);
115
116/* Check that the nodemask contains at least one populated zone */ 113/* Check that the nodemask contains at least one populated zone */
117static int is_valid_nodemask(nodemask_t *nodemask) 114static int is_valid_nodemask(nodemask_t *nodemask)
118{ 115{
@@ -203,6 +200,97 @@ free:
203 return ERR_PTR(-EINVAL); 200 return ERR_PTR(-EINVAL);
204} 201}
205 202
203/* Migrate a policy to a different set of nodes */
204static void mpol_rebind_policy(struct mempolicy *pol,
205 const nodemask_t *newmask)
206{
207 nodemask_t tmp;
208 int static_nodes;
209 int relative_nodes;
210
211 if (!pol)
212 return;
213 static_nodes = pol->flags & MPOL_F_STATIC_NODES;
214 relative_nodes = pol->flags & MPOL_F_RELATIVE_NODES;
215 if (!mpol_store_user_nodemask(pol) &&
216 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
217 return;
218
219 switch (pol->policy) {
220 case MPOL_DEFAULT:
221 break;
222 case MPOL_BIND:
223 /* Fall through */
224 case MPOL_INTERLEAVE:
225 if (static_nodes)
226 nodes_and(tmp, pol->w.user_nodemask, *newmask);
227 else if (relative_nodes)
228 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask,
229 newmask);
230 else {
231 nodes_remap(tmp, pol->v.nodes,
232 pol->w.cpuset_mems_allowed, *newmask);
233 pol->w.cpuset_mems_allowed = *newmask;
234 }
235 pol->v.nodes = tmp;
236 if (!node_isset(current->il_next, tmp)) {
237 current->il_next = next_node(current->il_next, tmp);
238 if (current->il_next >= MAX_NUMNODES)
239 current->il_next = first_node(tmp);
240 if (current->il_next >= MAX_NUMNODES)
241 current->il_next = numa_node_id();
242 }
243 break;
244 case MPOL_PREFERRED:
245 if (static_nodes) {
246 int node = first_node(pol->w.user_nodemask);
247
248 if (node_isset(node, *newmask))
249 pol->v.preferred_node = node;
250 else
251 pol->v.preferred_node = -1;
252 } else if (relative_nodes) {
253 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask,
254 newmask);
255 pol->v.preferred_node = first_node(tmp);
256 } else {
257 pol->v.preferred_node = node_remap(pol->v.preferred_node,
258 pol->w.cpuset_mems_allowed, *newmask);
259 pol->w.cpuset_mems_allowed = *newmask;
260 }
261 break;
262 default:
263 BUG();
264 break;
265 }
266}
267
268/*
269 * Wrapper for mpol_rebind_policy() that just requires task
270 * pointer, and updates task mempolicy.
271 */
272
273void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
274{
275 mpol_rebind_policy(tsk->mempolicy, new);
276}
277
278/*
279 * Rebind each vma in mm to new nodemask.
280 *
281 * Call holding a reference to mm. Takes mm->mmap_sem during call.
282 */
283
284void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
285{
286 struct vm_area_struct *vma;
287
288 down_write(&mm->mmap_sem);
289 for (vma = mm->mmap; vma; vma = vma->vm_next)
290 mpol_rebind_policy(vma->vm_policy, new);
291 up_write(&mm->mmap_sem);
292}
293
206static void gather_stats(struct page *, void *, int pte_dirty); 294static void gather_stats(struct page *, void *, int pte_dirty);
207static void migrate_page_add(struct page *page, struct list_head *pagelist, 295static void migrate_page_add(struct page *page, struct list_head *pagelist,
208 unsigned long flags); 296 unsigned long flags);
@@ -1757,97 +1845,6 @@ void numa_default_policy(void)
1757 do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 1845 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1758} 1846}
1759 1847
1760/* Migrate a policy to a different set of nodes */
1761static void mpol_rebind_policy(struct mempolicy *pol,
1762 const nodemask_t *newmask)
1763{
1764 nodemask_t tmp;
1765 int static_nodes;
1766 int relative_nodes;
1767
1768 if (!pol)
1769 return;
1770 static_nodes = pol->flags & MPOL_F_STATIC_NODES;
1771 relative_nodes = pol->flags & MPOL_F_RELATIVE_NODES;
1772 if (!mpol_store_user_nodemask(pol) &&
1773 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
1774 return;
1775
1776 switch (pol->policy) {
1777 case MPOL_DEFAULT:
1778 break;
1779 case MPOL_BIND:
1780 /* Fall through */
1781 case MPOL_INTERLEAVE:
1782 if (static_nodes)
1783 nodes_and(tmp, pol->w.user_nodemask, *newmask);
1784 else if (relative_nodes)
1785 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask,
1786 newmask);
1787 else {
1788 nodes_remap(tmp, pol->v.nodes,
1789 pol->w.cpuset_mems_allowed, *newmask);
1790 pol->w.cpuset_mems_allowed = *newmask;
1791 }
1792 pol->v.nodes = tmp;
1793 if (!node_isset(current->il_next, tmp)) {
1794 current->il_next = next_node(current->il_next, tmp);
1795 if (current->il_next >= MAX_NUMNODES)
1796 current->il_next = first_node(tmp);
1797 if (current->il_next >= MAX_NUMNODES)
1798 current->il_next = numa_node_id();
1799 }
1800 break;
1801 case MPOL_PREFERRED:
1802 if (static_nodes) {
1803 int node = first_node(pol->w.user_nodemask);
1804
1805 if (node_isset(node, *newmask))
1806 pol->v.preferred_node = node;
1807 else
1808 pol->v.preferred_node = -1;
1809 } else if (relative_nodes) {
1810 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask,
1811 newmask);
1812 pol->v.preferred_node = first_node(tmp);
1813 } else {
1814 pol->v.preferred_node = node_remap(pol->v.preferred_node,
1815 pol->w.cpuset_mems_allowed, *newmask);
1816 pol->w.cpuset_mems_allowed = *newmask;
1817 }
1818 break;
1819 default:
1820 BUG();
1821 break;
1822 }
1823}
1824
1825/*
1826 * Wrapper for mpol_rebind_policy() that just requires task
1827 * pointer, and updates task mempolicy.
1828 */
1829
1830void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1831{
1832 mpol_rebind_policy(tsk->mempolicy, new);
1833}
1834
1835/*
1836 * Rebind each vma in mm to new nodemask.
1837 *
1838 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1839 */
1840
1841void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1842{
1843 struct vm_area_struct *vma;
1844
1845 down_write(&mm->mmap_sem);
1846 for (vma = mm->mmap; vma; vma = vma->vm_next)
1847 mpol_rebind_policy(vma->vm_policy, new);
1848 up_write(&mm->mmap_sem);
1849}
1850
1851/* 1848/*
1852 * Display pages allocated per node and memory policy via /proc. 1849 * Display pages allocated per node and memory policy via /proc.
1853 */ 1850 */