aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mempolicy.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mempolicy.h')
-rw-r--r--include/linux/mempolicy.h96
1 files changed, 65 insertions, 31 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 8b67cf837ca9..bbd2221923c3 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -22,6 +22,9 @@
22 22
23/* Flags for mbind */ 23/* Flags for mbind */
24#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ 24#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
25#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
26#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
27#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
25 28
26#ifdef __KERNEL__ 29#ifdef __KERNEL__
27 30
@@ -65,6 +68,7 @@ struct mempolicy {
65 nodemask_t nodes; /* interleave */ 68 nodemask_t nodes; /* interleave */
66 /* undefined for default */ 69 /* undefined for default */
67 } v; 70 } v;
71 nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
68}; 72};
69 73
70/* 74/*
@@ -110,14 +114,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
110#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) 114#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
111 115
112/* 116/*
113 * Hugetlb policy. i386 hugetlb so far works with node numbers
114 * instead of zone lists, so give it special interfaces for now.
115 */
116extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr);
117extern int mpol_node_valid(int nid, struct vm_area_struct *vma,
118 unsigned long addr);
119
120/*
121 * Tree of shared policies for a shared memory region. 117 * Tree of shared policies for a shared memory region.
122 * Maintain the policies in a pseudo mm that contains vmas. The vmas 118 * Maintain the policies in a pseudo mm that contains vmas. The vmas
123 * carry the policy. As a special twist the pseudo mm is indexed in pages, not 119 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
@@ -136,12 +132,8 @@ struct shared_policy {
136 spinlock_t lock; 132 spinlock_t lock;
137}; 133};
138 134
139static inline void mpol_shared_policy_init(struct shared_policy *info) 135void mpol_shared_policy_init(struct shared_policy *info, int policy,
140{ 136 nodemask_t *nodes);
141 info->root = RB_ROOT;
142 spin_lock_init(&info->lock);
143}
144
145int mpol_set_shared_policy(struct shared_policy *info, 137int mpol_set_shared_policy(struct shared_policy *info,
146 struct vm_area_struct *vma, 138 struct vm_area_struct *vma,
147 struct mempolicy *new); 139 struct mempolicy *new);
@@ -149,13 +141,38 @@ void mpol_free_shared_policy(struct shared_policy *p);
149struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 141struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
150 unsigned long idx); 142 unsigned long idx);
151 143
152struct mempolicy *get_vma_policy(struct task_struct *task,
153 struct vm_area_struct *vma, unsigned long addr);
154
155extern void numa_default_policy(void); 144extern void numa_default_policy(void);
156extern void numa_policy_init(void); 145extern void numa_policy_init(void);
157extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new); 146extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new);
147extern void mpol_rebind_task(struct task_struct *tsk,
148 const nodemask_t *new);
149extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
150#define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x))
151
152#ifdef CONFIG_CPUSET
153#define current_cpuset_is_being_rebound() \
154 (cpuset_being_rebound == current->cpuset)
155#else
156#define current_cpuset_is_being_rebound() 0
157#endif
158
158extern struct mempolicy default_policy; 159extern struct mempolicy default_policy;
160extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
161 unsigned long addr);
162extern unsigned slab_node(struct mempolicy *policy);
163
164extern int policy_zone;
165
166static inline void check_highest_zone(int k)
167{
168 if (k > policy_zone)
169 policy_zone = k;
170}
171
172int do_migrate_pages(struct mm_struct *mm,
173 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
174
175extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */
159 176
160#else 177#else
161 178
@@ -182,17 +199,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old)
182 return NULL; 199 return NULL;
183} 200}
184 201
185static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a)
186{
187 return numa_node_id();
188}
189
190static inline int
191mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a)
192{
193 return 1;
194}
195
196struct shared_policy {}; 202struct shared_policy {};
197 203
198static inline int mpol_set_shared_policy(struct shared_policy *info, 204static inline int mpol_set_shared_policy(struct shared_policy *info,
@@ -202,7 +208,8 @@ static inline int mpol_set_shared_policy(struct shared_policy *info,
202 return -EINVAL; 208 return -EINVAL;
203} 209}
204 210
205static inline void mpol_shared_policy_init(struct shared_policy *info) 211static inline void mpol_shared_policy_init(struct shared_policy *info,
212 int policy, nodemask_t *nodes)
206{ 213{
207} 214}
208 215
@@ -227,11 +234,38 @@ static inline void numa_default_policy(void)
227{ 234{
228} 235}
229 236
230static inline void numa_policy_rebind(const nodemask_t *old, 237static inline void mpol_rebind_policy(struct mempolicy *pol,
238 const nodemask_t *new)
239{
240}
241
242static inline void mpol_rebind_task(struct task_struct *tsk,
231 const nodemask_t *new) 243 const nodemask_t *new)
232{ 244{
233} 245}
234 246
247static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
248{
249}
250
251#define set_cpuset_being_rebound(x) do {} while (0)
252
253static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
254 unsigned long addr)
255{
256 return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
257}
258
259static inline int do_migrate_pages(struct mm_struct *mm,
260 const nodemask_t *from_nodes,
261 const nodemask_t *to_nodes, int flags)
262{
263 return 0;
264}
265
266static inline void check_highest_zone(int k)
267{
268}
235#endif /* CONFIG_NUMA */ 269#endif /* CONFIG_NUMA */
236#endif /* __KERNEL__ */ 270#endif /* __KERNEL__ */
237 271