aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLee Schermerhorn <lee.schermerhorn@hp.com>2008-04-28 05:13:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:24 -0400
commit45c4745af381851b0406d8e4db99e62e265691c2 (patch)
treed93f6f7b3d7eb3773aaa80444c56baff99e670d6
parentae4d8c16aa22775f5731677abb8a82f03cec877e (diff)
mempolicy: rename struct mempolicy 'policy' member to 'mode'
The terms 'policy' and 'mode' are both used in various places to describe the semantics of the value stored in the 'policy' member of struct mempolicy. Furthermore, the term 'policy' is used to refer to that member, to the entire struct mempolicy and to the more abstract concept of the tuple consisting of a "mode" and an optional node or set of nodes. Recently, we have added "mode flags" that are passed in the upper bits of the 'mode' [or sometimes, 'policy'] member of the numa APIs. I'd like to resolve this confusion, which perhaps only exists in my mind, by renaming the 'policy' member to 'mode' throughout, and fixing up the Documentation. Man pages will be updated separately. Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Christoph Lameter <clameter@sgi.com> Cc: David Rientjes <rientjes@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/vm/numa_memory_policy.txt4
-rw-r--r--include/linux/mempolicy.h6
-rw-r--r--mm/mempolicy.c46
3 files changed, 26 insertions, 30 deletions
diff --git a/Documentation/vm/numa_memory_policy.txt b/Documentation/vm/numa_memory_policy.txt
index 1c7dd21623d2..27b9507a3769 100644
--- a/Documentation/vm/numa_memory_policy.txt
+++ b/Documentation/vm/numa_memory_policy.txt
@@ -145,10 +145,6 @@ Components of Memory Policies
145 structure, struct mempolicy. Details of this structure will be discussed 145 structure, struct mempolicy. Details of this structure will be discussed
146 in context, below, as required to explain the behavior. 146 in context, below, as required to explain the behavior.
147 147
148 Note: in some functions AND in the struct mempolicy itself, the mode
149 is called "policy". However, to avoid confusion with the policy tuple,
150 this document will continue to use the term "mode".
151
152 Linux memory policy supports the following 4 behavioral modes: 148 Linux memory policy supports the following 4 behavioral modes:
153 149
154 Default Mode--MPOL_DEFAULT: The behavior specified by this mode is 150 Default Mode--MPOL_DEFAULT: The behavior specified by this mode is
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5e19c2275a6f..9080fab1426d 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -80,7 +80,7 @@ struct mm_struct;
80 */ 80 */
81struct mempolicy { 81struct mempolicy {
82 atomic_t refcnt; 82 atomic_t refcnt;
83 unsigned short policy; /* See MPOL_* above */ 83 unsigned short mode; /* See MPOL_* above */
84 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ 84 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
85 union { 85 union {
86 short preferred_node; /* preferred */ 86 short preferred_node; /* preferred */
@@ -149,7 +149,7 @@ struct shared_policy {
149 spinlock_t lock; 149 spinlock_t lock;
150}; 150};
151 151
152void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy, 152void mpol_shared_policy_init(struct shared_policy *info, unsigned short mode,
153 unsigned short flags, nodemask_t *nodes); 153 unsigned short flags, nodemask_t *nodes);
154int mpol_set_shared_policy(struct shared_policy *info, 154int mpol_set_shared_policy(struct shared_policy *info,
155 struct vm_area_struct *vma, 155 struct vm_area_struct *vma,
@@ -213,7 +213,7 @@ static inline int mpol_set_shared_policy(struct shared_policy *info,
213} 213}
214 214
215static inline void mpol_shared_policy_init(struct shared_policy *info, 215static inline void mpol_shared_policy_init(struct shared_policy *info,
216 unsigned short policy, unsigned short flags, nodemask_t *nodes) 216 unsigned short mode, unsigned short flags, nodemask_t *nodes)
217{ 217{
218} 218}
219 219
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8924aaf4665c..5e7eea2dc8b4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -106,7 +106,7 @@ enum zone_type policy_zone = 0;
106 106
107struct mempolicy default_policy = { 107struct mempolicy default_policy = {
108 .refcnt = ATOMIC_INIT(1), /* never free it */ 108 .refcnt = ATOMIC_INIT(1), /* never free it */
109 .policy = MPOL_DEFAULT, 109 .mode = MPOL_DEFAULT,
110}; 110};
111 111
112static const struct mempolicy_operations { 112static const struct mempolicy_operations {
@@ -211,7 +211,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
211 if (!policy) 211 if (!policy)
212 return ERR_PTR(-ENOMEM); 212 return ERR_PTR(-ENOMEM);
213 atomic_set(&policy->refcnt, 1); 213 atomic_set(&policy->refcnt, 1);
214 policy->policy = mode; 214 policy->mode = mode;
215 policy->flags = flags; 215 policy->flags = flags;
216 216
217 if (nodes) { 217 if (nodes) {
@@ -302,7 +302,7 @@ static void mpol_rebind_policy(struct mempolicy *pol,
302 if (!mpol_store_user_nodemask(pol) && 302 if (!mpol_store_user_nodemask(pol) &&
303 nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 303 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
304 return; 304 return;
305 mpol_ops[pol->policy].rebind(pol, newmask); 305 mpol_ops[pol->mode].rebind(pol, newmask);
306} 306}
307 307
308/* 308/*
@@ -608,7 +608,7 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
608 mpol_put(current->mempolicy); 608 mpol_put(current->mempolicy);
609 current->mempolicy = new; 609 current->mempolicy = new;
610 mpol_set_task_struct_flag(); 610 mpol_set_task_struct_flag();
611 if (new && new->policy == MPOL_INTERLEAVE && 611 if (new && new->mode == MPOL_INTERLEAVE &&
612 nodes_weight(new->v.nodes)) 612 nodes_weight(new->v.nodes))
613 current->il_next = first_node(new->v.nodes); 613 current->il_next = first_node(new->v.nodes);
614 if (mm) 614 if (mm)
@@ -621,7 +621,7 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
621static void get_zonemask(struct mempolicy *p, nodemask_t *nodes) 621static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
622{ 622{
623 nodes_clear(*nodes); 623 nodes_clear(*nodes);
624 switch (p->policy) { 624 switch (p->mode) {
625 case MPOL_DEFAULT: 625 case MPOL_DEFAULT:
626 break; 626 break;
627 case MPOL_BIND: 627 case MPOL_BIND:
@@ -700,14 +700,14 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
700 goto out; 700 goto out;
701 *policy = err; 701 *policy = err;
702 } else if (pol == current->mempolicy && 702 } else if (pol == current->mempolicy &&
703 pol->policy == MPOL_INTERLEAVE) { 703 pol->mode == MPOL_INTERLEAVE) {
704 *policy = current->il_next; 704 *policy = current->il_next;
705 } else { 705 } else {
706 err = -EINVAL; 706 err = -EINVAL;
707 goto out; 707 goto out;
708 } 708 }
709 } else 709 } else
710 *policy = pol->policy | pol->flags; 710 *policy = pol->mode | pol->flags;
711 711
712 if (vma) { 712 if (vma) {
713 up_read(&current->mm->mmap_sem); 713 up_read(&current->mm->mmap_sem);
@@ -1276,7 +1276,7 @@ static struct mempolicy *get_vma_policy(struct task_struct *task,
1276 pol = vpol; 1276 pol = vpol;
1277 shared_pol = 1; /* if pol non-NULL, add ref below */ 1277 shared_pol = 1; /* if pol non-NULL, add ref below */
1278 } else if (vma->vm_policy && 1278 } else if (vma->vm_policy &&
1279 vma->vm_policy->policy != MPOL_DEFAULT) 1279 vma->vm_policy->mode != MPOL_DEFAULT)
1280 pol = vma->vm_policy; 1280 pol = vma->vm_policy;
1281 } 1281 }
1282 if (!pol) 1282 if (!pol)
@@ -1290,7 +1290,7 @@ static struct mempolicy *get_vma_policy(struct task_struct *task,
1290static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy) 1290static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
1291{ 1291{
1292 /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1292 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1293 if (unlikely(policy->policy == MPOL_BIND) && 1293 if (unlikely(policy->mode == MPOL_BIND) &&
1294 gfp_zone(gfp) >= policy_zone && 1294 gfp_zone(gfp) >= policy_zone &&
1295 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 1295 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1296 return &policy->v.nodes; 1296 return &policy->v.nodes;
@@ -1303,7 +1303,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1303{ 1303{
1304 int nd; 1304 int nd;
1305 1305
1306 switch (policy->policy) { 1306 switch (policy->mode) {
1307 case MPOL_PREFERRED: 1307 case MPOL_PREFERRED:
1308 nd = policy->v.preferred_node; 1308 nd = policy->v.preferred_node;
1309 if (nd < 0) 1309 if (nd < 0)
@@ -1353,7 +1353,7 @@ static unsigned interleave_nodes(struct mempolicy *policy)
1353 */ 1353 */
1354unsigned slab_node(struct mempolicy *policy) 1354unsigned slab_node(struct mempolicy *policy)
1355{ 1355{
1356 unsigned short pol = policy ? policy->policy : MPOL_DEFAULT; 1356 unsigned short pol = policy ? policy->mode : MPOL_DEFAULT;
1357 1357
1358 switch (pol) { 1358 switch (pol) {
1359 case MPOL_INTERLEAVE: 1359 case MPOL_INTERLEAVE:
@@ -1454,9 +1454,9 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1454 1454
1455 *mpol = NULL; /* probably no unref needed */ 1455 *mpol = NULL; /* probably no unref needed */
1456 *nodemask = NULL; /* assume !MPOL_BIND */ 1456 *nodemask = NULL; /* assume !MPOL_BIND */
1457 if (pol->policy == MPOL_BIND) { 1457 if (pol->mode == MPOL_BIND) {
1458 *nodemask = &pol->v.nodes; 1458 *nodemask = &pol->v.nodes;
1459 } else if (pol->policy == MPOL_INTERLEAVE) { 1459 } else if (pol->mode == MPOL_INTERLEAVE) {
1460 unsigned nid; 1460 unsigned nid;
1461 1461
1462 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); 1462 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
@@ -1468,7 +1468,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1468 1468
1469 zl = zonelist_policy(GFP_HIGHUSER, pol); 1469 zl = zonelist_policy(GFP_HIGHUSER, pol);
1470 if (unlikely(pol != &default_policy && pol != current->mempolicy)) { 1470 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1471 if (pol->policy != MPOL_BIND) 1471 if (pol->mode != MPOL_BIND)
1472 __mpol_put(pol); /* finished with pol */ 1472 __mpol_put(pol); /* finished with pol */
1473 else 1473 else
1474 *mpol = pol; /* unref needed after allocation */ 1474 *mpol = pol; /* unref needed after allocation */
@@ -1522,7 +1522,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1522 1522
1523 cpuset_update_task_memory_state(); 1523 cpuset_update_task_memory_state();
1524 1524
1525 if (unlikely(pol->policy == MPOL_INTERLEAVE)) { 1525 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1526 unsigned nid; 1526 unsigned nid;
1527 1527
1528 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 1528 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
@@ -1574,7 +1574,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1574 cpuset_update_task_memory_state(); 1574 cpuset_update_task_memory_state();
1575 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) 1575 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1576 pol = &default_policy; 1576 pol = &default_policy;
1577 if (pol->policy == MPOL_INTERLEAVE) 1577 if (pol->mode == MPOL_INTERLEAVE)
1578 return alloc_page_interleave(gfp, order, interleave_nodes(pol)); 1578 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1579 return __alloc_pages_nodemask(gfp, order, 1579 return __alloc_pages_nodemask(gfp, order,
1580 zonelist_policy(gfp, pol), nodemask_policy(gfp, pol)); 1580 zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
@@ -1620,11 +1620,11 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1620{ 1620{
1621 if (!a || !b) 1621 if (!a || !b)
1622 return 0; 1622 return 0;
1623 if (a->policy != b->policy) 1623 if (a->mode != b->mode)
1624 return 0; 1624 return 0;
1625 if (a->policy != MPOL_DEFAULT && !mpol_match_intent(a, b)) 1625 if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
1626 return 0; 1626 return 0;
1627 switch (a->policy) { 1627 switch (a->mode) {
1628 case MPOL_DEFAULT: 1628 case MPOL_DEFAULT:
1629 return 1; 1629 return 1;
1630 case MPOL_BIND: 1630 case MPOL_BIND:
@@ -1644,7 +1644,7 @@ void __mpol_put(struct mempolicy *p)
1644{ 1644{
1645 if (!atomic_dec_and_test(&p->refcnt)) 1645 if (!atomic_dec_and_test(&p->refcnt))
1646 return; 1646 return;
1647 p->policy = MPOL_DEFAULT; 1647 p->mode = MPOL_DEFAULT;
1648 kmem_cache_free(policy_cache, p); 1648 kmem_cache_free(policy_cache, p);
1649} 1649}
1650 1650
@@ -1710,7 +1710,7 @@ static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1710 rb_link_node(&new->nd, parent, p); 1710 rb_link_node(&new->nd, parent, p);
1711 rb_insert_color(&new->nd, &sp->root); 1711 rb_insert_color(&new->nd, &sp->root);
1712 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 1712 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1713 new->policy ? new->policy->policy : 0); 1713 new->policy ? new->policy->mode : 0);
1714} 1714}
1715 1715
1716/* Find shared policy intersecting idx */ 1716/* Find shared policy intersecting idx */
@@ -1835,7 +1835,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
1835 1835
1836 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 1836 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1837 vma->vm_pgoff, 1837 vma->vm_pgoff,
1838 sz, npol ? npol->policy : -1, 1838 sz, npol ? npol->mode : -1,
1839 npol ? npol->flags : -1, 1839 npol ? npol->flags : -1,
1840 npol ? nodes_addr(npol->v.nodes)[0] : -1); 1840 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1841 1841
@@ -1935,7 +1935,7 @@ static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1935 char *p = buffer; 1935 char *p = buffer;
1936 int l; 1936 int l;
1937 nodemask_t nodes; 1937 nodemask_t nodes;
1938 unsigned short mode = pol ? pol->policy : MPOL_DEFAULT; 1938 unsigned short mode = pol ? pol->mode : MPOL_DEFAULT;
1939 unsigned short flags = pol ? pol->flags : 0; 1939 unsigned short flags = pol ? pol->flags : 0;
1940 1940
1941 switch (mode) { 1941 switch (mode) {