diff options
Diffstat (limited to 'include/linux/mempolicy.h')
-rw-r--r-- | include/linux/mempolicy.h | 95 |
1 files changed, 64 insertions, 31 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 8b67cf837ca..d6a53ed6ab6 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -22,6 +22,9 @@ | |||
22 | 22 | ||
23 | /* Flags for mbind */ | 23 | /* Flags for mbind */ |
24 | #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ | 24 | #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ |
25 | #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ | ||
26 | #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ | ||
27 | #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ | ||
25 | 28 | ||
26 | #ifdef __KERNEL__ | 29 | #ifdef __KERNEL__ |
27 | 30 | ||
@@ -65,6 +68,7 @@ struct mempolicy { | |||
65 | nodemask_t nodes; /* interleave */ | 68 | nodemask_t nodes; /* interleave */ |
66 | /* undefined for default */ | 69 | /* undefined for default */ |
67 | } v; | 70 | } v; |
71 | nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */ | ||
68 | }; | 72 | }; |
69 | 73 | ||
70 | /* | 74 | /* |
@@ -110,14 +114,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) | |||
110 | #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) | 114 | #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) |
111 | 115 | ||
112 | /* | 116 | /* |
113 | * Hugetlb policy. i386 hugetlb so far works with node numbers | ||
114 | * instead of zone lists, so give it special interfaces for now. | ||
115 | */ | ||
116 | extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr); | ||
117 | extern int mpol_node_valid(int nid, struct vm_area_struct *vma, | ||
118 | unsigned long addr); | ||
119 | |||
120 | /* | ||
121 | * Tree of shared policies for a shared memory region. | 117 | * Tree of shared policies for a shared memory region. |
122 | * Maintain the policies in a pseudo mm that contains vmas. The vmas | 118 | * Maintain the policies in a pseudo mm that contains vmas. The vmas |
123 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not | 119 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not |
@@ -136,12 +132,8 @@ struct shared_policy { | |||
136 | spinlock_t lock; | 132 | spinlock_t lock; |
137 | }; | 133 | }; |
138 | 134 | ||
139 | static inline void mpol_shared_policy_init(struct shared_policy *info) | 135 | void mpol_shared_policy_init(struct shared_policy *info, int policy, |
140 | { | 136 | nodemask_t *nodes); |
141 | info->root = RB_ROOT; | ||
142 | spin_lock_init(&info->lock); | ||
143 | } | ||
144 | |||
145 | int mpol_set_shared_policy(struct shared_policy *info, | 137 | int mpol_set_shared_policy(struct shared_policy *info, |
146 | struct vm_area_struct *vma, | 138 | struct vm_area_struct *vma, |
147 | struct mempolicy *new); | 139 | struct mempolicy *new); |
@@ -149,13 +141,37 @@ void mpol_free_shared_policy(struct shared_policy *p); | |||
149 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, | 141 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, |
150 | unsigned long idx); | 142 | unsigned long idx); |
151 | 143 | ||
152 | struct mempolicy *get_vma_policy(struct task_struct *task, | ||
153 | struct vm_area_struct *vma, unsigned long addr); | ||
154 | |||
155 | extern void numa_default_policy(void); | 144 | extern void numa_default_policy(void); |
156 | extern void numa_policy_init(void); | 145 | extern void numa_policy_init(void); |
157 | extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new); | 146 | extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new); |
147 | extern void mpol_rebind_task(struct task_struct *tsk, | ||
148 | const nodemask_t *new); | ||
149 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); | ||
150 | #define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x)) | ||
151 | |||
152 | #ifdef CONFIG_CPUSET | ||
153 | #define current_cpuset_is_being_rebound() \ | ||
154 | (cpuset_being_rebound == current->cpuset) | ||
155 | #else | ||
156 | #define current_cpuset_is_being_rebound() 0 | ||
157 | #endif | ||
158 | |||
158 | extern struct mempolicy default_policy; | 159 | extern struct mempolicy default_policy; |
160 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | ||
161 | unsigned long addr); | ||
162 | |||
163 | extern int policy_zone; | ||
164 | |||
165 | static inline void check_highest_zone(int k) | ||
166 | { | ||
167 | if (k > policy_zone) | ||
168 | policy_zone = k; | ||
169 | } | ||
170 | |||
171 | int do_migrate_pages(struct mm_struct *mm, | ||
172 | const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); | ||
173 | |||
174 | extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */ | ||
159 | 175 | ||
160 | #else | 176 | #else |
161 | 177 | ||
@@ -182,17 +198,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old) | |||
182 | return NULL; | 198 | return NULL; |
183 | } | 199 | } |
184 | 200 | ||
185 | static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a) | ||
186 | { | ||
187 | return numa_node_id(); | ||
188 | } | ||
189 | |||
190 | static inline int | ||
191 | mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a) | ||
192 | { | ||
193 | return 1; | ||
194 | } | ||
195 | |||
196 | struct shared_policy {}; | 201 | struct shared_policy {}; |
197 | 202 | ||
198 | static inline int mpol_set_shared_policy(struct shared_policy *info, | 203 | static inline int mpol_set_shared_policy(struct shared_policy *info, |
@@ -202,7 +207,8 @@ static inline int mpol_set_shared_policy(struct shared_policy *info, | |||
202 | return -EINVAL; | 207 | return -EINVAL; |
203 | } | 208 | } |
204 | 209 | ||
205 | static inline void mpol_shared_policy_init(struct shared_policy *info) | 210 | static inline void mpol_shared_policy_init(struct shared_policy *info, |
211 | int policy, nodemask_t *nodes) | ||
206 | { | 212 | { |
207 | } | 213 | } |
208 | 214 | ||
@@ -227,11 +233,38 @@ static inline void numa_default_policy(void) | |||
227 | { | 233 | { |
228 | } | 234 | } |
229 | 235 | ||
230 | static inline void numa_policy_rebind(const nodemask_t *old, | 236 | static inline void mpol_rebind_policy(struct mempolicy *pol, |
237 | const nodemask_t *new) | ||
238 | { | ||
239 | } | ||
240 | |||
241 | static inline void mpol_rebind_task(struct task_struct *tsk, | ||
231 | const nodemask_t *new) | 242 | const nodemask_t *new) |
232 | { | 243 | { |
233 | } | 244 | } |
234 | 245 | ||
246 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) | ||
247 | { | ||
248 | } | ||
249 | |||
250 | #define set_cpuset_being_rebound(x) do {} while (0) | ||
251 | |||
252 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, | ||
253 | unsigned long addr) | ||
254 | { | ||
255 | return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); | ||
256 | } | ||
257 | |||
258 | static inline int do_migrate_pages(struct mm_struct *mm, | ||
259 | const nodemask_t *from_nodes, | ||
260 | const nodemask_t *to_nodes, int flags) | ||
261 | { | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static inline void check_highest_zone(int k) | ||
266 | { | ||
267 | } | ||
235 | #endif /* CONFIG_NUMA */ | 268 | #endif /* CONFIG_NUMA */ |
236 | #endif /* __KERNEL__ */ | 269 | #endif /* __KERNEL__ */ |
237 | 270 | ||