aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2014-04-03 17:47:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 19:20:58 -0400
commitd26914d11751b23ca2e8747725f2cae10c2f2c1b (patch)
tree020b606fb9223e29292f54922a11111239e3a3f4 /include
parent91ca9186484809c57303b33778d841cc28f696ed (diff)
mm: optimize put_mems_allowed() usage
Since put_mems_allowed() is strictly optional, its a seqcount retry, we don't need to evaluate the function if the allocation was in fact successful, saving a smp_rmb some loads and comparisons on some relative fast-paths. Since the naming, get/put_mems_allowed() does suggest a mandatory pairing, rename the interface, as suggested by Mel, to resemble the seqcount interface. This gives us: read_mems_allowed_begin() and read_mems_allowed_retry(), where it is important to note that the return value of the latter call is inverted from its previous incarnation. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/cpuset.h27
1 files changed, 14 insertions, 13 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 3fe661fe96d1..b19d3dc2e651 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -87,25 +87,26 @@ extern void rebuild_sched_domains(void);
87extern void cpuset_print_task_mems_allowed(struct task_struct *p); 87extern void cpuset_print_task_mems_allowed(struct task_struct *p);
88 88
89/* 89/*
90 * get_mems_allowed is required when making decisions involving mems_allowed 90 * read_mems_allowed_begin is required when making decisions involving
91 * such as during page allocation. mems_allowed can be updated in parallel 91 * mems_allowed such as during page allocation. mems_allowed can be updated in
92 * and depending on the new value an operation can fail potentially causing 92 * parallel and depending on the new value an operation can fail potentially
93 * process failure. A retry loop with get_mems_allowed and put_mems_allowed 93 * causing process failure. A retry loop with read_mems_allowed_begin and
94 * prevents these artificial failures. 94 * read_mems_allowed_retry prevents these artificial failures.
95 */ 95 */
96static inline unsigned int get_mems_allowed(void) 96static inline unsigned int read_mems_allowed_begin(void)
97{ 97{
98 return read_seqcount_begin(&current->mems_allowed_seq); 98 return read_seqcount_begin(&current->mems_allowed_seq);
99} 99}
100 100
101/* 101/*
102 * If this returns false, the operation that took place after get_mems_allowed 102 * If this returns true, the operation that took place after
103 * may have failed. It is up to the caller to retry the operation if 103 * read_mems_allowed_begin may have failed artificially due to a concurrent
104 * update of mems_allowed. It is up to the caller to retry the operation if
104 * appropriate. 105 * appropriate.
105 */ 106 */
106static inline bool put_mems_allowed(unsigned int seq) 107static inline bool read_mems_allowed_retry(unsigned int seq)
107{ 108{
108 return !read_seqcount_retry(&current->mems_allowed_seq, seq); 109 return read_seqcount_retry(&current->mems_allowed_seq, seq);
109} 110}
110 111
111static inline void set_mems_allowed(nodemask_t nodemask) 112static inline void set_mems_allowed(nodemask_t nodemask)
@@ -225,14 +226,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
225{ 226{
226} 227}
227 228
228static inline unsigned int get_mems_allowed(void) 229static inline unsigned int read_mems_allowed_begin(void)
229{ 230{
230 return 0; 231 return 0;
231} 232}
232 233
233static inline bool put_mems_allowed(unsigned int seq) 234static inline bool read_mems_allowed_retry(unsigned int seq)
234{ 235{
235 return true; 236 return false;
236} 237}
237 238
238#endif /* !CONFIG_CPUSETS */ 239#endif /* !CONFIG_CPUSETS */