aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2005-10-29 21:15:48 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:35 -0400
commitdfcd3c0dc426bb75770c34b40e14f2da8845ea62 (patch)
treebd7e9f8463a18025c4775c6cdf22abbbd4236b64 /mm/mempolicy.c
parente46a5e28c201f703c18b47b108bfddec44f897c4 (diff)
[PATCH] Convert mempolicies to nodemask_t
The NUMA policy code predated nodemask_t so it used open coded bitmaps. Convert everything to nodemask_t. Big patch, but shouldn't have any actual behaviour changes (except I removed one unnecessary check against node_online_map and one unnecessary BUG_ON) Signed-off-by: "Andi Kleen" <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c120
1 files changed, 53 insertions, 67 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1d5c64df1653..8bc0be1c9efd 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -93,23 +93,10 @@ struct mempolicy default_policy = {
93 .policy = MPOL_DEFAULT, 93 .policy = MPOL_DEFAULT,
94}; 94};
95 95
96/* Check if all specified nodes are online */
97static int nodes_online(unsigned long *nodes)
98{
99 DECLARE_BITMAP(online2, MAX_NUMNODES);
100
101 bitmap_copy(online2, nodes_addr(node_online_map), MAX_NUMNODES);
102 if (bitmap_empty(online2, MAX_NUMNODES))
103 set_bit(0, online2);
104 if (!bitmap_subset(nodes, online2, MAX_NUMNODES))
105 return -EINVAL;
106 return 0;
107}
108
109/* Do sanity checking on a policy */ 96/* Do sanity checking on a policy */
110static int mpol_check_policy(int mode, unsigned long *nodes) 97static int mpol_check_policy(int mode, nodemask_t *nodes)
111{ 98{
112 int empty = bitmap_empty(nodes, MAX_NUMNODES); 99 int empty = nodes_empty(*nodes);
113 100
114 switch (mode) { 101 switch (mode) {
115 case MPOL_DEFAULT: 102 case MPOL_DEFAULT:
@@ -124,11 +111,11 @@ static int mpol_check_policy(int mode, unsigned long *nodes)
124 return -EINVAL; 111 return -EINVAL;
125 break; 112 break;
126 } 113 }
127 return nodes_online(nodes); 114 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
128} 115}
129 116
130/* Copy a node mask from user space. */ 117/* Copy a node mask from user space. */
131static int get_nodes(unsigned long *nodes, unsigned long __user *nmask, 118static int get_nodes(nodemask_t *nodes, unsigned long __user *nmask,
132 unsigned long maxnode, int mode) 119 unsigned long maxnode, int mode)
133{ 120{
134 unsigned long k; 121 unsigned long k;
@@ -136,7 +123,7 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
136 unsigned long endmask; 123 unsigned long endmask;
137 124
138 --maxnode; 125 --maxnode;
139 bitmap_zero(nodes, MAX_NUMNODES); 126 nodes_clear(*nodes);
140 if (maxnode == 0 || !nmask) 127 if (maxnode == 0 || !nmask)
141 return 0; 128 return 0;
142 129
@@ -153,7 +140,7 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
153 return -EINVAL; 140 return -EINVAL;
154 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 141 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
155 unsigned long t; 142 unsigned long t;
156 if (get_user(t, nmask + k)) 143 if (get_user(t, nmask + k))
157 return -EFAULT; 144 return -EFAULT;
158 if (k == nlongs - 1) { 145 if (k == nlongs - 1) {
159 if (t & endmask) 146 if (t & endmask)
@@ -165,30 +152,29 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
165 endmask = ~0UL; 152 endmask = ~0UL;
166 } 153 }
167 154
168 if (copy_from_user(nodes, nmask, nlongs*sizeof(unsigned long))) 155 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
169 return -EFAULT; 156 return -EFAULT;
170 nodes[nlongs-1] &= endmask; 157 nodes_addr(*nodes)[nlongs-1] &= endmask;
171 /* Update current mems_allowed */ 158 /* Update current mems_allowed */
172 cpuset_update_current_mems_allowed(); 159 cpuset_update_current_mems_allowed();
173 /* Ignore nodes not set in current->mems_allowed */ 160 /* Ignore nodes not set in current->mems_allowed */
174 cpuset_restrict_to_mems_allowed(nodes); 161 /* AK: shouldn't this error out instead? */
162 cpuset_restrict_to_mems_allowed(nodes_addr(*nodes));
175 return mpol_check_policy(mode, nodes); 163 return mpol_check_policy(mode, nodes);
176} 164}
177 165
178/* Generate a custom zonelist for the BIND policy. */ 166/* Generate a custom zonelist for the BIND policy. */
179static struct zonelist *bind_zonelist(unsigned long *nodes) 167static struct zonelist *bind_zonelist(nodemask_t *nodes)
180{ 168{
181 struct zonelist *zl; 169 struct zonelist *zl;
182 int num, max, nd; 170 int num, max, nd;
183 171
184 max = 1 + MAX_NR_ZONES * bitmap_weight(nodes, MAX_NUMNODES); 172 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
185 zl = kmalloc(sizeof(void *) * max, GFP_KERNEL); 173 zl = kmalloc(sizeof(void *) * max, GFP_KERNEL);
186 if (!zl) 174 if (!zl)
187 return NULL; 175 return NULL;
188 num = 0; 176 num = 0;
189 for (nd = find_first_bit(nodes, MAX_NUMNODES); 177 for_each_node_mask(nd, *nodes) {
190 nd < MAX_NUMNODES;
191 nd = find_next_bit(nodes, MAX_NUMNODES, 1+nd)) {
192 int k; 178 int k;
193 for (k = MAX_NR_ZONES-1; k >= 0; k--) { 179 for (k = MAX_NR_ZONES-1; k >= 0; k--) {
194 struct zone *z = &NODE_DATA(nd)->node_zones[k]; 180 struct zone *z = &NODE_DATA(nd)->node_zones[k];
@@ -205,11 +191,11 @@ static struct zonelist *bind_zonelist(unsigned long *nodes)
205} 191}
206 192
207/* Create a new policy */ 193/* Create a new policy */
208static struct mempolicy *mpol_new(int mode, unsigned long *nodes) 194static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
209{ 195{
210 struct mempolicy *policy; 196 struct mempolicy *policy;
211 197
212 PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes[0]); 198 PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]);
213 if (mode == MPOL_DEFAULT) 199 if (mode == MPOL_DEFAULT)
214 return NULL; 200 return NULL;
215 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 201 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
@@ -218,10 +204,10 @@ static struct mempolicy *mpol_new(int mode, unsigned long *nodes)
218 atomic_set(&policy->refcnt, 1); 204 atomic_set(&policy->refcnt, 1);
219 switch (mode) { 205 switch (mode) {
220 case MPOL_INTERLEAVE: 206 case MPOL_INTERLEAVE:
221 bitmap_copy(policy->v.nodes, nodes, MAX_NUMNODES); 207 policy->v.nodes = *nodes;
222 break; 208 break;
223 case MPOL_PREFERRED: 209 case MPOL_PREFERRED:
224 policy->v.preferred_node = find_first_bit(nodes, MAX_NUMNODES); 210 policy->v.preferred_node = first_node(*nodes);
225 if (policy->v.preferred_node >= MAX_NUMNODES) 211 if (policy->v.preferred_node >= MAX_NUMNODES)
226 policy->v.preferred_node = -1; 212 policy->v.preferred_node = -1;
227 break; 213 break;
@@ -239,7 +225,7 @@ static struct mempolicy *mpol_new(int mode, unsigned long *nodes)
239 225
240/* Ensure all existing pages follow the policy. */ 226/* Ensure all existing pages follow the policy. */
241static int check_pte_range(struct mm_struct *mm, pmd_t *pmd, 227static int check_pte_range(struct mm_struct *mm, pmd_t *pmd,
242 unsigned long addr, unsigned long end, unsigned long *nodes) 228 unsigned long addr, unsigned long end, nodemask_t *nodes)
243{ 229{
244 pte_t *orig_pte; 230 pte_t *orig_pte;
245 pte_t *pte; 231 pte_t *pte;
@@ -256,7 +242,7 @@ static int check_pte_range(struct mm_struct *mm, pmd_t *pmd,
256 if (!pfn_valid(pfn)) 242 if (!pfn_valid(pfn))
257 continue; 243 continue;
258 nid = pfn_to_nid(pfn); 244 nid = pfn_to_nid(pfn);
259 if (!test_bit(nid, nodes)) 245 if (!node_isset(nid, *nodes))
260 break; 246 break;
261 } while (pte++, addr += PAGE_SIZE, addr != end); 247 } while (pte++, addr += PAGE_SIZE, addr != end);
262 pte_unmap(orig_pte); 248 pte_unmap(orig_pte);
@@ -265,7 +251,7 @@ static int check_pte_range(struct mm_struct *mm, pmd_t *pmd,
265} 251}
266 252
267static inline int check_pmd_range(struct mm_struct *mm, pud_t *pud, 253static inline int check_pmd_range(struct mm_struct *mm, pud_t *pud,
268 unsigned long addr, unsigned long end, unsigned long *nodes) 254 unsigned long addr, unsigned long end, nodemask_t *nodes)
269{ 255{
270 pmd_t *pmd; 256 pmd_t *pmd;
271 unsigned long next; 257 unsigned long next;
@@ -282,7 +268,7 @@ static inline int check_pmd_range(struct mm_struct *mm, pud_t *pud,
282} 268}
283 269
284static inline int check_pud_range(struct mm_struct *mm, pgd_t *pgd, 270static inline int check_pud_range(struct mm_struct *mm, pgd_t *pgd,
285 unsigned long addr, unsigned long end, unsigned long *nodes) 271 unsigned long addr, unsigned long end, nodemask_t *nodes)
286{ 272{
287 pud_t *pud; 273 pud_t *pud;
288 unsigned long next; 274 unsigned long next;
@@ -299,7 +285,7 @@ static inline int check_pud_range(struct mm_struct *mm, pgd_t *pgd,
299} 285}
300 286
301static inline int check_pgd_range(struct mm_struct *mm, 287static inline int check_pgd_range(struct mm_struct *mm,
302 unsigned long addr, unsigned long end, unsigned long *nodes) 288 unsigned long addr, unsigned long end, nodemask_t *nodes)
303{ 289{
304 pgd_t *pgd; 290 pgd_t *pgd;
305 unsigned long next; 291 unsigned long next;
@@ -318,7 +304,7 @@ static inline int check_pgd_range(struct mm_struct *mm,
318/* Step 1: check the range */ 304/* Step 1: check the range */
319static struct vm_area_struct * 305static struct vm_area_struct *
320check_range(struct mm_struct *mm, unsigned long start, unsigned long end, 306check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
321 unsigned long *nodes, unsigned long flags) 307 nodemask_t *nodes, unsigned long flags)
322{ 308{
323 int err; 309 int err;
324 struct vm_area_struct *first, *vma, *prev; 310 struct vm_area_struct *first, *vma, *prev;
@@ -403,7 +389,7 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
403 struct mm_struct *mm = current->mm; 389 struct mm_struct *mm = current->mm;
404 struct mempolicy *new; 390 struct mempolicy *new;
405 unsigned long end; 391 unsigned long end;
406 DECLARE_BITMAP(nodes, MAX_NUMNODES); 392 nodemask_t nodes;
407 int err; 393 int err;
408 394
409 if ((flags & ~(unsigned long)(MPOL_MF_STRICT)) || mode > MPOL_MAX) 395 if ((flags & ~(unsigned long)(MPOL_MF_STRICT)) || mode > MPOL_MAX)
@@ -419,19 +405,19 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
419 if (end == start) 405 if (end == start)
420 return 0; 406 return 0;
421 407
422 err = get_nodes(nodes, nmask, maxnode, mode); 408 err = get_nodes(&nodes, nmask, maxnode, mode);
423 if (err) 409 if (err)
424 return err; 410 return err;
425 411
426 new = mpol_new(mode, nodes); 412 new = mpol_new(mode, &nodes);
427 if (IS_ERR(new)) 413 if (IS_ERR(new))
428 return PTR_ERR(new); 414 return PTR_ERR(new);
429 415
430 PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len, 416 PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
431 mode,nodes[0]); 417 mode,nodes_addr(nodes)[0]);
432 418
433 down_write(&mm->mmap_sem); 419 down_write(&mm->mmap_sem);
434 vma = check_range(mm, start, end, nodes, flags); 420 vma = check_range(mm, start, end, &nodes, flags);
435 err = PTR_ERR(vma); 421 err = PTR_ERR(vma);
436 if (!IS_ERR(vma)) 422 if (!IS_ERR(vma))
437 err = mbind_range(vma, start, end, new); 423 err = mbind_range(vma, start, end, new);
@@ -446,45 +432,45 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
446{ 432{
447 int err; 433 int err;
448 struct mempolicy *new; 434 struct mempolicy *new;
449 DECLARE_BITMAP(nodes, MAX_NUMNODES); 435 nodemask_t nodes;
450 436
451 if (mode < 0 || mode > MPOL_MAX) 437 if (mode < 0 || mode > MPOL_MAX)
452 return -EINVAL; 438 return -EINVAL;
453 err = get_nodes(nodes, nmask, maxnode, mode); 439 err = get_nodes(&nodes, nmask, maxnode, mode);
454 if (err) 440 if (err)
455 return err; 441 return err;
456 new = mpol_new(mode, nodes); 442 new = mpol_new(mode, &nodes);
457 if (IS_ERR(new)) 443 if (IS_ERR(new))
458 return PTR_ERR(new); 444 return PTR_ERR(new);
459 mpol_free(current->mempolicy); 445 mpol_free(current->mempolicy);
460 current->mempolicy = new; 446 current->mempolicy = new;
461 if (new && new->policy == MPOL_INTERLEAVE) 447 if (new && new->policy == MPOL_INTERLEAVE)
462 current->il_next = find_first_bit(new->v.nodes, MAX_NUMNODES); 448 current->il_next = first_node(new->v.nodes);
463 return 0; 449 return 0;
464} 450}
465 451
466/* Fill a zone bitmap for a policy */ 452/* Fill a zone bitmap for a policy */
467static void get_zonemask(struct mempolicy *p, unsigned long *nodes) 453static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
468{ 454{
469 int i; 455 int i;
470 456
471 bitmap_zero(nodes, MAX_NUMNODES); 457 nodes_clear(*nodes);
472 switch (p->policy) { 458 switch (p->policy) {
473 case MPOL_BIND: 459 case MPOL_BIND:
474 for (i = 0; p->v.zonelist->zones[i]; i++) 460 for (i = 0; p->v.zonelist->zones[i]; i++)
475 __set_bit(p->v.zonelist->zones[i]->zone_pgdat->node_id, nodes); 461 node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id, *nodes);
476 break; 462 break;
477 case MPOL_DEFAULT: 463 case MPOL_DEFAULT:
478 break; 464 break;
479 case MPOL_INTERLEAVE: 465 case MPOL_INTERLEAVE:
480 bitmap_copy(nodes, p->v.nodes, MAX_NUMNODES); 466 *nodes = p->v.nodes;
481 break; 467 break;
482 case MPOL_PREFERRED: 468 case MPOL_PREFERRED:
483 /* or use current node instead of online map? */ 469 /* or use current node instead of online map? */
484 if (p->v.preferred_node < 0) 470 if (p->v.preferred_node < 0)
485 bitmap_copy(nodes, nodes_addr(node_online_map), MAX_NUMNODES); 471 *nodes = node_online_map;
486 else 472 else
487 __set_bit(p->v.preferred_node, nodes); 473 node_set(p->v.preferred_node, *nodes);
488 break; 474 break;
489 default: 475 default:
490 BUG(); 476 BUG();
@@ -506,9 +492,10 @@ static int lookup_node(struct mm_struct *mm, unsigned long addr)
506 492
507/* Copy a kernel node mask to user space */ 493/* Copy a kernel node mask to user space */
508static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 494static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
509 void *nodes, unsigned nbytes) 495 nodemask_t *nodes)
510{ 496{
511 unsigned long copy = ALIGN(maxnode-1, 64) / 8; 497 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
498 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
512 499
513 if (copy > nbytes) { 500 if (copy > nbytes) {
514 if (copy > PAGE_SIZE) 501 if (copy > PAGE_SIZE)
@@ -517,7 +504,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
517 return -EFAULT; 504 return -EFAULT;
518 copy = nbytes; 505 copy = nbytes;
519 } 506 }
520 return copy_to_user(mask, nodes, copy) ? -EFAULT : 0; 507 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
521} 508}
522 509
523/* Retrieve NUMA policy */ 510/* Retrieve NUMA policy */
@@ -578,9 +565,9 @@ asmlinkage long sys_get_mempolicy(int __user *policy,
578 565
579 err = 0; 566 err = 0;
580 if (nmask) { 567 if (nmask) {
581 DECLARE_BITMAP(nodes, MAX_NUMNODES); 568 nodemask_t nodes;
582 get_zonemask(pol, nodes); 569 get_zonemask(pol, &nodes);
583 err = copy_nodes_to_user(nmask, maxnode, nodes, sizeof(nodes)); 570 err = copy_nodes_to_user(nmask, maxnode, &nodes);
584 } 571 }
585 572
586 out: 573 out:
@@ -649,15 +636,15 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
649 long err = 0; 636 long err = 0;
650 unsigned long __user *nm = NULL; 637 unsigned long __user *nm = NULL;
651 unsigned long nr_bits, alloc_size; 638 unsigned long nr_bits, alloc_size;
652 DECLARE_BITMAP(bm, MAX_NUMNODES); 639 nodemask_t bm;
653 640
654 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 641 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
655 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 642 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
656 643
657 if (nmask) { 644 if (nmask) {
658 err = compat_get_bitmap(bm, nmask, nr_bits); 645 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
659 nm = compat_alloc_user_space(alloc_size); 646 nm = compat_alloc_user_space(alloc_size);
660 err |= copy_to_user(nm, bm, alloc_size); 647 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
661 } 648 }
662 649
663 if (err) 650 if (err)
@@ -723,9 +710,9 @@ static unsigned interleave_nodes(struct mempolicy *policy)
723 710
724 nid = me->il_next; 711 nid = me->il_next;
725 BUG_ON(nid >= MAX_NUMNODES); 712 BUG_ON(nid >= MAX_NUMNODES);
726 next = find_next_bit(policy->v.nodes, MAX_NUMNODES, 1+nid); 713 next = next_node(nid, policy->v.nodes);
727 if (next >= MAX_NUMNODES) 714 if (next >= MAX_NUMNODES)
728 next = find_first_bit(policy->v.nodes, MAX_NUMNODES); 715 next = first_node(policy->v.nodes);
729 me->il_next = next; 716 me->il_next = next;
730 return nid; 717 return nid;
731} 718}
@@ -734,18 +721,17 @@ static unsigned interleave_nodes(struct mempolicy *policy)
734static unsigned offset_il_node(struct mempolicy *pol, 721static unsigned offset_il_node(struct mempolicy *pol,
735 struct vm_area_struct *vma, unsigned long off) 722 struct vm_area_struct *vma, unsigned long off)
736{ 723{
737 unsigned nnodes = bitmap_weight(pol->v.nodes, MAX_NUMNODES); 724 unsigned nnodes = nodes_weight(pol->v.nodes);
738 unsigned target = (unsigned)off % nnodes; 725 unsigned target = (unsigned)off % nnodes;
739 int c; 726 int c;
740 int nid = -1; 727 int nid = -1;
741 728
742 c = 0; 729 c = 0;
743 do { 730 do {
744 nid = find_next_bit(pol->v.nodes, MAX_NUMNODES, nid+1); 731 nid = next_node(nid, pol->v.nodes);
745 c++; 732 c++;
746 } while (c <= target); 733 } while (c <= target);
747 BUG_ON(nid >= MAX_NUMNODES); 734 BUG_ON(nid >= MAX_NUMNODES);
748 BUG_ON(!test_bit(nid, pol->v.nodes));
749 return nid; 735 return nid;
750} 736}
751 737
@@ -878,7 +864,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
878 case MPOL_DEFAULT: 864 case MPOL_DEFAULT:
879 return 1; 865 return 1;
880 case MPOL_INTERLEAVE: 866 case MPOL_INTERLEAVE:
881 return bitmap_equal(a->v.nodes, b->v.nodes, MAX_NUMNODES); 867 return nodes_equal(a->v.nodes, b->v.nodes);
882 case MPOL_PREFERRED: 868 case MPOL_PREFERRED:
883 return a->v.preferred_node == b->v.preferred_node; 869 return a->v.preferred_node == b->v.preferred_node;
884 case MPOL_BIND: { 870 case MPOL_BIND: {
@@ -1117,7 +1103,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
1117 PDprintk("set_shared_policy %lx sz %lu %d %lx\n", 1103 PDprintk("set_shared_policy %lx sz %lu %d %lx\n",
1118 vma->vm_pgoff, 1104 vma->vm_pgoff,
1119 sz, npol? npol->policy : -1, 1105 sz, npol? npol->policy : -1,
1120 npol ? npol->v.nodes[0] : -1); 1106 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1121 1107
1122 if (npol) { 1108 if (npol) {
1123 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 1109 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);