diff options
Diffstat (limited to 'include/linux/cpuset.h')
-rw-r--r-- | include/linux/cpuset.h | 47 |
1 files changed, 20 insertions, 27 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index e9eaec522655..7a7e5fd2a277 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -89,42 +89,33 @@ extern void rebuild_sched_domains(void); | |||
89 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); | 89 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * reading current mems_allowed and mempolicy in the fastpath must protected | 92 | * get_mems_allowed is required when making decisions involving mems_allowed |
93 | * by get_mems_allowed() | 93 | * such as during page allocation. mems_allowed can be updated in parallel |
94 | * and depending on the new value an operation can fail potentially causing | ||
95 | * process failure. A retry loop with get_mems_allowed and put_mems_allowed | ||
96 | * prevents these artificial failures. | ||
94 | */ | 97 | */ |
95 | static inline void get_mems_allowed(void) | 98 | static inline unsigned int get_mems_allowed(void) |
96 | { | 99 | { |
97 | current->mems_allowed_change_disable++; | 100 | return read_seqcount_begin(¤t->mems_allowed_seq); |
98 | |||
99 | /* | ||
100 | * ensure that reading mems_allowed and mempolicy happens after the | ||
101 | * update of ->mems_allowed_change_disable. | ||
102 | * | ||
103 | * the write-side task finds ->mems_allowed_change_disable is not 0, | ||
104 | * and knows the read-side task is reading mems_allowed or mempolicy, | ||
105 | * so it will clear old bits lazily. | ||
106 | */ | ||
107 | smp_mb(); | ||
108 | } | 101 | } |
109 | 102 | ||
110 | static inline void put_mems_allowed(void) | 103 | /* |
104 | * If this returns false, the operation that took place after get_mems_allowed | ||
105 | * may have failed. It is up to the caller to retry the operation if | ||
106 | * appropriate. | ||
107 | */ | ||
108 | static inline bool put_mems_allowed(unsigned int seq) | ||
111 | { | 109 | { |
112 | /* | 110 | return !read_seqcount_retry(¤t->mems_allowed_seq, seq); |
113 | * ensure that reading mems_allowed and mempolicy before reducing | ||
114 | * mems_allowed_change_disable. | ||
115 | * | ||
116 | * the write-side task will know that the read-side task is still | ||
117 | * reading mems_allowed or mempolicy, don't clears old bits in the | ||
118 | * nodemask. | ||
119 | */ | ||
120 | smp_mb(); | ||
121 | --ACCESS_ONCE(current->mems_allowed_change_disable); | ||
122 | } | 111 | } |
123 | 112 | ||
124 | static inline void set_mems_allowed(nodemask_t nodemask) | 113 | static inline void set_mems_allowed(nodemask_t nodemask) |
125 | { | 114 | { |
126 | task_lock(current); | 115 | task_lock(current); |
116 | write_seqcount_begin(¤t->mems_allowed_seq); | ||
127 | current->mems_allowed = nodemask; | 117 | current->mems_allowed = nodemask; |
118 | write_seqcount_end(¤t->mems_allowed_seq); | ||
128 | task_unlock(current); | 119 | task_unlock(current); |
129 | } | 120 | } |
130 | 121 | ||
@@ -234,12 +225,14 @@ static inline void set_mems_allowed(nodemask_t nodemask) | |||
234 | { | 225 | { |
235 | } | 226 | } |
236 | 227 | ||
237 | static inline void get_mems_allowed(void) | 228 | static inline unsigned int get_mems_allowed(void) |
238 | { | 229 | { |
230 | return 0; | ||
239 | } | 231 | } |
240 | 232 | ||
241 | static inline void put_mems_allowed(void) | 233 | static inline bool put_mems_allowed(unsigned int seq) |
242 | { | 234 | { |
235 | return true; | ||
243 | } | 236 | } |
244 | 237 | ||
245 | #endif /* !CONFIG_CPUSETS */ | 238 | #endif /* !CONFIG_CPUSETS */ |