diff options
Diffstat (limited to 'include/linux/cpuset.h')
| -rw-r--r-- | include/linux/cpuset.h | 53 |
1 files changed, 22 insertions, 31 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index e9eaec522655..668f66baac7b 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -22,7 +22,7 @@ extern int cpuset_init(void); | |||
| 22 | extern void cpuset_init_smp(void); | 22 | extern void cpuset_init_smp(void); |
| 23 | extern void cpuset_update_active_cpus(void); | 23 | extern void cpuset_update_active_cpus(void); |
| 24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | 24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
| 25 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); | 25 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); |
| 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
| 27 | #define cpuset_current_mems_allowed (current->mems_allowed) | 27 | #define cpuset_current_mems_allowed (current->mems_allowed) |
| 28 | void cpuset_init_current_mems_allowed(void); | 28 | void cpuset_init_current_mems_allowed(void); |
| @@ -89,42 +89,33 @@ extern void rebuild_sched_domains(void); | |||
| 89 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); | 89 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); |
| 90 | 90 | ||
| 91 | /* | 91 | /* |
| 92 | * reading current mems_allowed and mempolicy in the fastpath must protected | 92 | * get_mems_allowed is required when making decisions involving mems_allowed |
| 93 | * by get_mems_allowed() | 93 | * such as during page allocation. mems_allowed can be updated in parallel |
| 94 | * and depending on the new value an operation can fail potentially causing | ||
| 95 | * process failure. A retry loop with get_mems_allowed and put_mems_allowed | ||
| 96 | * prevents these artificial failures. | ||
| 94 | */ | 97 | */ |
| 95 | static inline void get_mems_allowed(void) | 98 | static inline unsigned int get_mems_allowed(void) |
| 96 | { | 99 | { |
| 97 | current->mems_allowed_change_disable++; | 100 | return read_seqcount_begin(¤t->mems_allowed_seq); |
| 98 | |||
| 99 | /* | ||
| 100 | * ensure that reading mems_allowed and mempolicy happens after the | ||
| 101 | * update of ->mems_allowed_change_disable. | ||
| 102 | * | ||
| 103 | * the write-side task finds ->mems_allowed_change_disable is not 0, | ||
| 104 | * and knows the read-side task is reading mems_allowed or mempolicy, | ||
| 105 | * so it will clear old bits lazily. | ||
| 106 | */ | ||
| 107 | smp_mb(); | ||
| 108 | } | 101 | } |
| 109 | 102 | ||
| 110 | static inline void put_mems_allowed(void) | 103 | /* |
| 104 | * If this returns false, the operation that took place after get_mems_allowed | ||
| 105 | * may have failed. It is up to the caller to retry the operation if | ||
| 106 | * appropriate. | ||
| 107 | */ | ||
| 108 | static inline bool put_mems_allowed(unsigned int seq) | ||
| 111 | { | 109 | { |
| 112 | /* | 110 | return !read_seqcount_retry(¤t->mems_allowed_seq, seq); |
| 113 | * ensure that reading mems_allowed and mempolicy before reducing | ||
| 114 | * mems_allowed_change_disable. | ||
| 115 | * | ||
| 116 | * the write-side task will know that the read-side task is still | ||
| 117 | * reading mems_allowed or mempolicy, don't clears old bits in the | ||
| 118 | * nodemask. | ||
| 119 | */ | ||
| 120 | smp_mb(); | ||
| 121 | --ACCESS_ONCE(current->mems_allowed_change_disable); | ||
| 122 | } | 111 | } |
| 123 | 112 | ||
| 124 | static inline void set_mems_allowed(nodemask_t nodemask) | 113 | static inline void set_mems_allowed(nodemask_t nodemask) |
| 125 | { | 114 | { |
| 126 | task_lock(current); | 115 | task_lock(current); |
| 116 | write_seqcount_begin(¤t->mems_allowed_seq); | ||
| 127 | current->mems_allowed = nodemask; | 117 | current->mems_allowed = nodemask; |
| 118 | write_seqcount_end(¤t->mems_allowed_seq); | ||
| 128 | task_unlock(current); | 119 | task_unlock(current); |
| 129 | } | 120 | } |
| 130 | 121 | ||
| @@ -144,10 +135,8 @@ static inline void cpuset_cpus_allowed(struct task_struct *p, | |||
| 144 | cpumask_copy(mask, cpu_possible_mask); | 135 | cpumask_copy(mask, cpu_possible_mask); |
| 145 | } | 136 | } |
| 146 | 137 | ||
| 147 | static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) | 138 | static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) |
| 148 | { | 139 | { |
| 149 | do_set_cpus_allowed(p, cpu_possible_mask); | ||
| 150 | return cpumask_any(cpu_active_mask); | ||
| 151 | } | 140 | } |
| 152 | 141 | ||
| 153 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | 142 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
| @@ -234,12 +223,14 @@ static inline void set_mems_allowed(nodemask_t nodemask) | |||
| 234 | { | 223 | { |
| 235 | } | 224 | } |
| 236 | 225 | ||
| 237 | static inline void get_mems_allowed(void) | 226 | static inline unsigned int get_mems_allowed(void) |
| 238 | { | 227 | { |
| 228 | return 0; | ||
| 239 | } | 229 | } |
| 240 | 230 | ||
| 241 | static inline void put_mems_allowed(void) | 231 | static inline bool put_mems_allowed(unsigned int seq) |
| 242 | { | 232 | { |
| 233 | return true; | ||
| 243 | } | 234 | } |
| 244 | 235 | ||
| 245 | #endif /* !CONFIG_CPUSETS */ | 236 | #endif /* !CONFIG_CPUSETS */ |
