diff options
Diffstat (limited to 'include/linux/mempolicy.h')
-rw-r--r-- | include/linux/mempolicy.h | 156 |
1 files changed, 118 insertions, 38 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 59c4865bc85f..3a39570b81b8 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -8,15 +8,32 @@ | |||
8 | * Copyright 2003,2004 Andi Kleen SuSE Labs | 8 | * Copyright 2003,2004 Andi Kleen SuSE Labs |
9 | */ | 9 | */ |
10 | 10 | ||
11 | /* | ||
12 | * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are | ||
13 | * passed by the user to either set_mempolicy() or mbind() in an 'int' actual. | ||
14 | * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags. | ||
15 | */ | ||
16 | |||
11 | /* Policies */ | 17 | /* Policies */ |
12 | #define MPOL_DEFAULT 0 | 18 | enum { |
13 | #define MPOL_PREFERRED 1 | 19 | MPOL_DEFAULT, |
14 | #define MPOL_BIND 2 | 20 | MPOL_PREFERRED, |
15 | #define MPOL_INTERLEAVE 3 | 21 | MPOL_BIND, |
22 | MPOL_INTERLEAVE, | ||
23 | MPOL_MAX, /* always last member of enum */ | ||
24 | }; | ||
16 | 25 | ||
17 | #define MPOL_MAX MPOL_INTERLEAVE | 26 | /* Flags for set_mempolicy */ |
27 | #define MPOL_F_STATIC_NODES (1 << 15) | ||
28 | #define MPOL_F_RELATIVE_NODES (1 << 14) | ||
18 | 29 | ||
19 | /* Flags for get_mem_policy */ | 30 | /* |
31 | * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to | ||
32 | * either set_mempolicy() or mbind(). | ||
33 | */ | ||
34 | #define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES) | ||
35 | |||
36 | /* Flags for get_mempolicy */ | ||
20 | #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ | 37 | #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ |
21 | #define MPOL_F_ADDR (1<<1) /* look up vma using address */ | 38 | #define MPOL_F_ADDR (1<<1) /* look up vma using address */ |
22 | #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */ | 39 | #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */ |
@@ -27,6 +44,14 @@ | |||
27 | #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ | 44 | #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ |
28 | #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ | 45 | #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ |
29 | 46 | ||
47 | /* | ||
48 | * Internal flags that share the struct mempolicy flags word with | ||
49 | * "mode flags". These flags are allocated from bit 0 up, as they | ||
50 | * are never OR'ed into the mode in mempolicy API arguments. | ||
51 | */ | ||
52 | #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ | ||
53 | #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ | ||
54 | |||
30 | #ifdef __KERNEL__ | 55 | #ifdef __KERNEL__ |
31 | 56 | ||
32 | #include <linux/mmzone.h> | 57 | #include <linux/mmzone.h> |
@@ -35,7 +60,6 @@ | |||
35 | #include <linux/spinlock.h> | 60 | #include <linux/spinlock.h> |
36 | #include <linux/nodemask.h> | 61 | #include <linux/nodemask.h> |
37 | 62 | ||
38 | struct vm_area_struct; | ||
39 | struct mm_struct; | 63 | struct mm_struct; |
40 | 64 | ||
41 | #ifdef CONFIG_NUMA | 65 | #ifdef CONFIG_NUMA |
@@ -54,22 +78,27 @@ struct mm_struct; | |||
54 | * mmap_sem. | 78 | * mmap_sem. |
55 | * | 79 | * |
56 | * Freeing policy: | 80 | * Freeing policy: |
57 | * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. | 81 | * Mempolicy objects are reference counted. A mempolicy will be freed when |
58 | * All other policies don't have any external state. mpol_free() handles this. | 82 | * mpol_put() decrements the reference count to zero. |
59 | * | 83 | * |
60 | * Copying policy objects: | 84 | * Duplicating policy objects: |
61 | * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this. | 85 | * mpol_dup() allocates a new mempolicy and copies the specified mempolicy |
86 | * to the new storage. The reference count of the new object is initialized | ||
87 | * to 1, representing the caller of mpol_dup(). | ||
62 | */ | 88 | */ |
63 | struct mempolicy { | 89 | struct mempolicy { |
64 | atomic_t refcnt; | 90 | atomic_t refcnt; |
65 | short policy; /* See MPOL_* above */ | 91 | unsigned short mode; /* See MPOL_* above */ |
92 | unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ | ||
66 | union { | 93 | union { |
67 | struct zonelist *zonelist; /* bind */ | ||
68 | short preferred_node; /* preferred */ | 94 | short preferred_node; /* preferred */ |
69 | nodemask_t nodes; /* interleave */ | 95 | nodemask_t nodes; /* interleave/bind */ |
70 | /* undefined for default */ | 96 | /* undefined for default */ |
71 | } v; | 97 | } v; |
72 | nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */ | 98 | union { |
99 | nodemask_t cpuset_mems_allowed; /* relative to these nodes */ | ||
100 | nodemask_t user_nodemask; /* nodemask passed by user */ | ||
101 | } w; | ||
73 | }; | 102 | }; |
74 | 103 | ||
75 | /* | 104 | /* |
@@ -77,18 +106,43 @@ struct mempolicy { | |||
77 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. | 106 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. |
78 | */ | 107 | */ |
79 | 108 | ||
80 | extern void __mpol_free(struct mempolicy *pol); | 109 | extern void __mpol_put(struct mempolicy *pol); |
81 | static inline void mpol_free(struct mempolicy *pol) | 110 | static inline void mpol_put(struct mempolicy *pol) |
82 | { | 111 | { |
83 | if (pol) | 112 | if (pol) |
84 | __mpol_free(pol); | 113 | __mpol_put(pol); |
85 | } | 114 | } |
86 | 115 | ||
87 | extern struct mempolicy *__mpol_copy(struct mempolicy *pol); | 116 | /* |
88 | static inline struct mempolicy *mpol_copy(struct mempolicy *pol) | 117 | * Does mempolicy pol need explicit unref after use? |
118 | * Currently only needed for shared policies. | ||
119 | */ | ||
120 | static inline int mpol_needs_cond_ref(struct mempolicy *pol) | ||
121 | { | ||
122 | return (pol && (pol->flags & MPOL_F_SHARED)); | ||
123 | } | ||
124 | |||
125 | static inline void mpol_cond_put(struct mempolicy *pol) | ||
126 | { | ||
127 | if (mpol_needs_cond_ref(pol)) | ||
128 | __mpol_put(pol); | ||
129 | } | ||
130 | |||
131 | extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, | ||
132 | struct mempolicy *frompol); | ||
133 | static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, | ||
134 | struct mempolicy *frompol) | ||
135 | { | ||
136 | if (!frompol) | ||
137 | return frompol; | ||
138 | return __mpol_cond_copy(tompol, frompol); | ||
139 | } | ||
140 | |||
141 | extern struct mempolicy *__mpol_dup(struct mempolicy *pol); | ||
142 | static inline struct mempolicy *mpol_dup(struct mempolicy *pol) | ||
89 | { | 143 | { |
90 | if (pol) | 144 | if (pol) |
91 | pol = __mpol_copy(pol); | 145 | pol = __mpol_dup(pol); |
92 | return pol; | 146 | return pol; |
93 | } | 147 | } |
94 | 148 | ||
@@ -108,11 +162,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) | |||
108 | return 1; | 162 | return 1; |
109 | return __mpol_equal(a, b); | 163 | return __mpol_equal(a, b); |
110 | } | 164 | } |
111 | #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b)) | ||
112 | |||
113 | /* Could later add inheritance of the process policy here. */ | ||
114 | |||
115 | #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) | ||
116 | 165 | ||
117 | /* | 166 | /* |
118 | * Tree of shared policies for a shared memory region. | 167 | * Tree of shared policies for a shared memory region. |
@@ -133,8 +182,7 @@ struct shared_policy { | |||
133 | spinlock_t lock; | 182 | spinlock_t lock; |
134 | }; | 183 | }; |
135 | 184 | ||
136 | void mpol_shared_policy_init(struct shared_policy *info, int policy, | 185 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); |
137 | nodemask_t *nodes); | ||
138 | int mpol_set_shared_policy(struct shared_policy *info, | 186 | int mpol_set_shared_policy(struct shared_policy *info, |
139 | struct vm_area_struct *vma, | 187 | struct vm_area_struct *vma, |
140 | struct mempolicy *new); | 188 | struct mempolicy *new); |
@@ -149,9 +197,9 @@ extern void mpol_rebind_task(struct task_struct *tsk, | |||
149 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); | 197 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
150 | extern void mpol_fix_fork_child_flag(struct task_struct *p); | 198 | extern void mpol_fix_fork_child_flag(struct task_struct *p); |
151 | 199 | ||
152 | extern struct mempolicy default_policy; | ||
153 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | 200 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
154 | unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol); | 201 | unsigned long addr, gfp_t gfp_flags, |
202 | struct mempolicy **mpol, nodemask_t **nodemask); | ||
155 | extern unsigned slab_node(struct mempolicy *policy); | 203 | extern unsigned slab_node(struct mempolicy *policy); |
156 | 204 | ||
157 | extern enum zone_type policy_zone; | 205 | extern enum zone_type policy_zone; |
@@ -165,6 +213,13 @@ static inline void check_highest_zone(enum zone_type k) | |||
165 | int do_migrate_pages(struct mm_struct *mm, | 213 | int do_migrate_pages(struct mm_struct *mm, |
166 | const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); | 214 | const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); |
167 | 215 | ||
216 | |||
217 | #ifdef CONFIG_TMPFS | ||
218 | extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); | ||
219 | |||
220 | extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, | ||
221 | int no_context); | ||
222 | #endif | ||
168 | #else | 223 | #else |
169 | 224 | ||
170 | struct mempolicy {}; | 225 | struct mempolicy {}; |
@@ -173,19 +228,26 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) | |||
173 | { | 228 | { |
174 | return 1; | 229 | return 1; |
175 | } | 230 | } |
176 | #define vma_mpol_equal(a,b) 1 | ||
177 | 231 | ||
178 | #define mpol_set_vma_default(vma) do {} while(0) | 232 | static inline void mpol_put(struct mempolicy *p) |
233 | { | ||
234 | } | ||
235 | |||
236 | static inline void mpol_cond_put(struct mempolicy *pol) | ||
237 | { | ||
238 | } | ||
179 | 239 | ||
180 | static inline void mpol_free(struct mempolicy *p) | 240 | static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, |
241 | struct mempolicy *from) | ||
181 | { | 242 | { |
243 | return from; | ||
182 | } | 244 | } |
183 | 245 | ||
184 | static inline void mpol_get(struct mempolicy *pol) | 246 | static inline void mpol_get(struct mempolicy *pol) |
185 | { | 247 | { |
186 | } | 248 | } |
187 | 249 | ||
188 | static inline struct mempolicy *mpol_copy(struct mempolicy *old) | 250 | static inline struct mempolicy *mpol_dup(struct mempolicy *old) |
189 | { | 251 | { |
190 | return NULL; | 252 | return NULL; |
191 | } | 253 | } |
@@ -199,8 +261,8 @@ static inline int mpol_set_shared_policy(struct shared_policy *info, | |||
199 | return -EINVAL; | 261 | return -EINVAL; |
200 | } | 262 | } |
201 | 263 | ||
202 | static inline void mpol_shared_policy_init(struct shared_policy *info, | 264 | static inline void mpol_shared_policy_init(struct shared_policy *sp, |
203 | int policy, nodemask_t *nodes) | 265 | struct mempolicy *mpol) |
204 | { | 266 | { |
205 | } | 267 | } |
206 | 268 | ||
@@ -239,9 +301,12 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p) | |||
239 | } | 301 | } |
240 | 302 | ||
241 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, | 303 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
242 | unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol) | 304 | unsigned long addr, gfp_t gfp_flags, |
305 | struct mempolicy **mpol, nodemask_t **nodemask) | ||
243 | { | 306 | { |
244 | return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags); | 307 | *mpol = NULL; |
308 | *nodemask = NULL; | ||
309 | return node_zonelist(0, gfp_flags); | ||
245 | } | 310 | } |
246 | 311 | ||
247 | static inline int do_migrate_pages(struct mm_struct *mm, | 312 | static inline int do_migrate_pages(struct mm_struct *mm, |
@@ -254,6 +319,21 @@ static inline int do_migrate_pages(struct mm_struct *mm, | |||
254 | static inline void check_highest_zone(int k) | 319 | static inline void check_highest_zone(int k) |
255 | { | 320 | { |
256 | } | 321 | } |
322 | |||
323 | #ifdef CONFIG_TMPFS | ||
324 | static inline int mpol_parse_str(char *str, struct mempolicy **mpol, | ||
325 | int no_context) | ||
326 | { | ||
327 | return 1; /* error */ | ||
328 | } | ||
329 | |||
330 | static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, | ||
331 | int no_context) | ||
332 | { | ||
333 | return 0; | ||
334 | } | ||
335 | #endif | ||
336 | |||
257 | #endif /* CONFIG_NUMA */ | 337 | #endif /* CONFIG_NUMA */ |
258 | #endif /* __KERNEL__ */ | 338 | #endif /* __KERNEL__ */ |
259 | 339 | ||