diff options
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r-- | mm/oom_kill.c | 103 |
1 files changed, 78 insertions, 25 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 949eba1d5ba3..8123fad5a485 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -133,6 +133,36 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
133 | } | 133 | } |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * Types of limitations to the nodes from which allocations may occur | ||
137 | */ | ||
138 | #define CONSTRAINT_NONE 1 | ||
139 | #define CONSTRAINT_MEMORY_POLICY 2 | ||
140 | #define CONSTRAINT_CPUSET 3 | ||
141 | |||
142 | /* | ||
143 | * Determine the type of allocation constraint. | ||
144 | */ | ||
145 | static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) | ||
146 | { | ||
147 | #ifdef CONFIG_NUMA | ||
148 | struct zone **z; | ||
149 | nodemask_t nodes = node_online_map; | ||
150 | |||
151 | for (z = zonelist->zones; *z; z++) | ||
152 | if (cpuset_zone_allowed(*z, gfp_mask)) | ||
153 | node_clear((*z)->zone_pgdat->node_id, | ||
154 | nodes); | ||
155 | else | ||
156 | return CONSTRAINT_CPUSET; | ||
157 | |||
158 | if (!nodes_empty(nodes)) | ||
159 | return CONSTRAINT_MEMORY_POLICY; | ||
160 | #endif | ||
161 | |||
162 | return CONSTRAINT_NONE; | ||
163 | } | ||
164 | |||
165 | /* | ||
136 | * Simple selection loop. We chose the process with the highest | 166 | * Simple selection loop. We chose the process with the highest |
137 | * number of 'points'. We expect the caller will lock the tasklist. | 167 | * number of 'points'. We expect the caller will lock the tasklist. |
138 | * | 168 | * |
@@ -184,7 +214,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) | |||
184 | * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that | 214 | * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that |
185 | * we select a process with CAP_SYS_RAW_IO set). | 215 | * we select a process with CAP_SYS_RAW_IO set). |
186 | */ | 216 | */ |
187 | static void __oom_kill_task(task_t *p) | 217 | static void __oom_kill_task(task_t *p, const char *message) |
188 | { | 218 | { |
189 | if (p->pid == 1) { | 219 | if (p->pid == 1) { |
190 | WARN_ON(1); | 220 | WARN_ON(1); |
@@ -200,8 +230,8 @@ static void __oom_kill_task(task_t *p) | |||
200 | return; | 230 | return; |
201 | } | 231 | } |
202 | task_unlock(p); | 232 | task_unlock(p); |
203 | printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n", | 233 | printk(KERN_ERR "%s: Killed process %d (%s).\n", |
204 | p->pid, p->comm); | 234 | message, p->pid, p->comm); |
205 | 235 | ||
206 | /* | 236 | /* |
207 | * We give our sacrificial lamb high priority and access to | 237 | * We give our sacrificial lamb high priority and access to |
@@ -214,7 +244,7 @@ static void __oom_kill_task(task_t *p) | |||
214 | force_sig(SIGKILL, p); | 244 | force_sig(SIGKILL, p); |
215 | } | 245 | } |
216 | 246 | ||
217 | static struct mm_struct *oom_kill_task(task_t *p) | 247 | static struct mm_struct *oom_kill_task(task_t *p, const char *message) |
218 | { | 248 | { |
219 | struct mm_struct *mm = get_task_mm(p); | 249 | struct mm_struct *mm = get_task_mm(p); |
220 | task_t * g, * q; | 250 | task_t * g, * q; |
@@ -226,21 +256,21 @@ static struct mm_struct *oom_kill_task(task_t *p) | |||
226 | return NULL; | 256 | return NULL; |
227 | } | 257 | } |
228 | 258 | ||
229 | __oom_kill_task(p); | 259 | __oom_kill_task(p, message); |
230 | /* | 260 | /* |
231 | * kill all processes that share the ->mm (i.e. all threads), | 261 | * kill all processes that share the ->mm (i.e. all threads), |
232 | * but are in a different thread group | 262 | * but are in a different thread group |
233 | */ | 263 | */ |
234 | do_each_thread(g, q) | 264 | do_each_thread(g, q) |
235 | if (q->mm == mm && q->tgid != p->tgid) | 265 | if (q->mm == mm && q->tgid != p->tgid) |
236 | __oom_kill_task(q); | 266 | __oom_kill_task(q, message); |
237 | while_each_thread(g, q); | 267 | while_each_thread(g, q); |
238 | 268 | ||
239 | return mm; | 269 | return mm; |
240 | } | 270 | } |
241 | 271 | ||
242 | static struct mm_struct *oom_kill_process(struct task_struct *p, | 272 | static struct mm_struct *oom_kill_process(struct task_struct *p, |
243 | unsigned long points) | 273 | unsigned long points, const char *message) |
244 | { | 274 | { |
245 | struct mm_struct *mm; | 275 | struct mm_struct *mm; |
246 | struct task_struct *c; | 276 | struct task_struct *c; |
@@ -253,11 +283,11 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, | |||
253 | c = list_entry(tsk, struct task_struct, sibling); | 283 | c = list_entry(tsk, struct task_struct, sibling); |
254 | if (c->mm == p->mm) | 284 | if (c->mm == p->mm) |
255 | continue; | 285 | continue; |
256 | mm = oom_kill_task(c); | 286 | mm = oom_kill_task(c, message); |
257 | if (mm) | 287 | if (mm) |
258 | return mm; | 288 | return mm; |
259 | } | 289 | } |
260 | return oom_kill_task(p); | 290 | return oom_kill_task(p, message); |
261 | } | 291 | } |
262 | 292 | ||
263 | /** | 293 | /** |
@@ -268,10 +298,10 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, | |||
268 | * OR try to be smart about which process to kill. Note that we | 298 | * OR try to be smart about which process to kill. Note that we |
269 | * don't have to be perfect here, we just have to be good. | 299 | * don't have to be perfect here, we just have to be good. |
270 | */ | 300 | */ |
271 | void out_of_memory(gfp_t gfp_mask, int order) | 301 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) |
272 | { | 302 | { |
273 | struct mm_struct *mm = NULL; | 303 | struct mm_struct *mm = NULL; |
274 | task_t * p; | 304 | task_t *p; |
275 | unsigned long points; | 305 | unsigned long points; |
276 | 306 | ||
277 | if (printk_ratelimit()) { | 307 | if (printk_ratelimit()) { |
@@ -283,25 +313,48 @@ void out_of_memory(gfp_t gfp_mask, int order) | |||
283 | 313 | ||
284 | cpuset_lock(); | 314 | cpuset_lock(); |
285 | read_lock(&tasklist_lock); | 315 | read_lock(&tasklist_lock); |
316 | |||
317 | /* | ||
318 | * Check if there were limitations on the allocation (only relevant for | ||
319 | * NUMA) that may require different handling. | ||
320 | */ | ||
321 | switch (constrained_alloc(zonelist, gfp_mask)) { | ||
322 | case CONSTRAINT_MEMORY_POLICY: | ||
323 | mm = oom_kill_process(current, points, | ||
324 | "No available memory (MPOL_BIND)"); | ||
325 | break; | ||
326 | |||
327 | case CONSTRAINT_CPUSET: | ||
328 | mm = oom_kill_process(current, points, | ||
329 | "No available memory in cpuset"); | ||
330 | break; | ||
331 | |||
332 | case CONSTRAINT_NONE: | ||
286 | retry: | 333 | retry: |
287 | p = select_bad_process(&points); | 334 | /* |
335 | * Rambo mode: Shoot down a process and hope it solves whatever | ||
336 | * issues we may have. | ||
337 | */ | ||
338 | p = select_bad_process(&points); | ||
288 | 339 | ||
289 | if (PTR_ERR(p) == -1UL) | 340 | if (PTR_ERR(p) == -1UL) |
290 | goto out; | 341 | goto out; |
291 | 342 | ||
292 | /* Found nothing?!?! Either we hang forever, or we panic. */ | 343 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
293 | if (!p) { | 344 | if (!p) { |
294 | read_unlock(&tasklist_lock); | 345 | read_unlock(&tasklist_lock); |
295 | cpuset_unlock(); | 346 | cpuset_unlock(); |
296 | panic("Out of memory and no killable processes...\n"); | 347 | panic("Out of memory and no killable processes...\n"); |
297 | } | 348 | } |
298 | 349 | ||
299 | mm = oom_kill_process(p, points); | 350 | mm = oom_kill_process(p, points, "Out of memory"); |
300 | if (!mm) | 351 | if (!mm) |
301 | goto retry; | 352 | goto retry; |
353 | |||
354 | break; | ||
355 | } | ||
302 | 356 | ||
303 | out: | 357 | out: |
304 | read_unlock(&tasklist_lock); | ||
305 | cpuset_unlock(); | 358 | cpuset_unlock(); |
306 | if (mm) | 359 | if (mm) |
307 | mmput(mm); | 360 | mmput(mm); |