aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c1
-rw-r--r--mm/oom_kill.c71
-rw-r--r--mm/page_alloc.c12
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slob.c10
-rw-r--r--mm/vmscan.c2
7 files changed, 56 insertions, 45 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index dec8249e972d..8778f58880c4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1761,7 +1761,6 @@ static void gather_stats(struct page *page, void *private, int pte_dirty)
1761 md->mapcount_max = count; 1761 md->mapcount_max = count;
1762 1762
1763 md->node[page_to_nid(page)]++; 1763 md->node[page_to_nid(page)]++;
1764 cond_resched();
1765} 1764}
1766 1765
1767#ifdef CONFIG_HUGETLB_PAGE 1766#ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 78747afad6b0..042e6436c3ee 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -46,15 +46,25 @@
46unsigned long badness(struct task_struct *p, unsigned long uptime) 46unsigned long badness(struct task_struct *p, unsigned long uptime)
47{ 47{
48 unsigned long points, cpu_time, run_time, s; 48 unsigned long points, cpu_time, run_time, s;
49 struct list_head *tsk; 49 struct mm_struct *mm;
50 struct task_struct *child;
50 51
51 if (!p->mm) 52 task_lock(p);
53 mm = p->mm;
54 if (!mm) {
55 task_unlock(p);
52 return 0; 56 return 0;
57 }
53 58
54 /* 59 /*
55 * The memory size of the process is the basis for the badness. 60 * The memory size of the process is the basis for the badness.
56 */ 61 */
57 points = p->mm->total_vm; 62 points = mm->total_vm;
63
64 /*
65 * After this unlock we can no longer dereference local variable `mm'
66 */
67 task_unlock(p);
58 68
59 /* 69 /*
60 * Processes which fork a lot of child processes are likely 70 * Processes which fork a lot of child processes are likely
@@ -64,11 +74,11 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
64 * child is eating the vast majority of memory, adding only half 74 * child is eating the vast majority of memory, adding only half
65 * to the parents will make the child our kill candidate of choice. 75 * to the parents will make the child our kill candidate of choice.
66 */ 76 */
67 list_for_each(tsk, &p->children) { 77 list_for_each_entry(child, &p->children, sibling) {
68 struct task_struct *chld; 78 task_lock(child);
69 chld = list_entry(tsk, struct task_struct, sibling); 79 if (child->mm != mm && child->mm)
70 if (chld->mm != p->mm && chld->mm) 80 points += child->mm->total_vm/2 + 1;
71 points += chld->mm->total_vm/2 + 1; 81 task_unlock(child);
72 } 82 }
73 83
74 /* 84 /*
@@ -244,17 +254,24 @@ static void __oom_kill_task(task_t *p, const char *message)
244 force_sig(SIGKILL, p); 254 force_sig(SIGKILL, p);
245} 255}
246 256
247static struct mm_struct *oom_kill_task(task_t *p, const char *message) 257static int oom_kill_task(task_t *p, const char *message)
248{ 258{
249 struct mm_struct *mm = get_task_mm(p); 259 struct mm_struct *mm;
250 task_t * g, * q; 260 task_t * g, * q;
251 261
252 if (!mm) 262 mm = p->mm;
253 return NULL; 263
254 if (mm == &init_mm) { 264 /* WARNING: mm may not be dereferenced since we did not obtain its
255 mmput(mm); 265 * value from get_task_mm(p). This is OK since all we need to do is
256 return NULL; 266 * compare mm to q->mm below.
257 } 267 *
268 * Furthermore, even if mm contains a non-NULL value, p->mm may
269 * change to NULL at any time since we do not hold task_lock(p).
270 * However, this is of no concern to us.
271 */
272
273 if (mm == NULL || mm == &init_mm)
274 return 1;
258 275
259 __oom_kill_task(p, message); 276 __oom_kill_task(p, message);
260 /* 277 /*
@@ -266,13 +283,12 @@ static struct mm_struct *oom_kill_task(task_t *p, const char *message)
266 __oom_kill_task(q, message); 283 __oom_kill_task(q, message);
267 while_each_thread(g, q); 284 while_each_thread(g, q);
268 285
269 return mm; 286 return 0;
270} 287}
271 288
272static struct mm_struct *oom_kill_process(struct task_struct *p, 289static int oom_kill_process(struct task_struct *p, unsigned long points,
273 unsigned long points, const char *message) 290 const char *message)
274{ 291{
275 struct mm_struct *mm;
276 struct task_struct *c; 292 struct task_struct *c;
277 struct list_head *tsk; 293 struct list_head *tsk;
278 294
@@ -283,9 +299,8 @@ static struct mm_struct *oom_kill_process(struct task_struct *p,
283 c = list_entry(tsk, struct task_struct, sibling); 299 c = list_entry(tsk, struct task_struct, sibling);
284 if (c->mm == p->mm) 300 if (c->mm == p->mm)
285 continue; 301 continue;
286 mm = oom_kill_task(c, message); 302 if (!oom_kill_task(c, message))
287 if (mm) 303 return 0;
288 return mm;
289 } 304 }
290 return oom_kill_task(p, message); 305 return oom_kill_task(p, message);
291} 306}
@@ -300,7 +315,6 @@ static struct mm_struct *oom_kill_process(struct task_struct *p,
300 */ 315 */
301void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) 316void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
302{ 317{
303 struct mm_struct *mm = NULL;
304 task_t *p; 318 task_t *p;
305 unsigned long points = 0; 319 unsigned long points = 0;
306 320
@@ -320,12 +334,12 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
320 */ 334 */
321 switch (constrained_alloc(zonelist, gfp_mask)) { 335 switch (constrained_alloc(zonelist, gfp_mask)) {
322 case CONSTRAINT_MEMORY_POLICY: 336 case CONSTRAINT_MEMORY_POLICY:
323 mm = oom_kill_process(current, points, 337 oom_kill_process(current, points,
324 "No available memory (MPOL_BIND)"); 338 "No available memory (MPOL_BIND)");
325 break; 339 break;
326 340
327 case CONSTRAINT_CPUSET: 341 case CONSTRAINT_CPUSET:
328 mm = oom_kill_process(current, points, 342 oom_kill_process(current, points,
329 "No available memory in cpuset"); 343 "No available memory in cpuset");
330 break; 344 break;
331 345
@@ -347,8 +361,7 @@ retry:
347 panic("Out of memory and no killable processes...\n"); 361 panic("Out of memory and no killable processes...\n");
348 } 362 }
349 363
350 mm = oom_kill_process(p, points, "Out of memory"); 364 if (oom_kill_process(p, points, "Out of memory"))
351 if (!mm)
352 goto retry; 365 goto retry;
353 366
354 break; 367 break;
@@ -357,8 +370,6 @@ retry:
357out: 370out:
358 read_unlock(&tasklist_lock); 371 read_unlock(&tasklist_lock);
359 cpuset_unlock(); 372 cpuset_unlock();
360 if (mm)
361 mmput(mm);
362 373
363 /* 374 /*
364 * Give "p" a good chance of killing itself before we 375 * Give "p" a good chance of killing itself before we
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 97d6827c7d66..ea77c999047e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -232,11 +232,13 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
232 * zone->lock is already acquired when we use these. 232 * zone->lock is already acquired when we use these.
233 * So, we don't need atomic page->flags operations here. 233 * So, we don't need atomic page->flags operations here.
234 */ 234 */
235static inline unsigned long page_order(struct page *page) { 235static inline unsigned long page_order(struct page *page)
236{
236 return page_private(page); 237 return page_private(page);
237} 238}
238 239
239static inline void set_page_order(struct page *page, int order) { 240static inline void set_page_order(struct page *page, int order)
241{
240 set_page_private(page, order); 242 set_page_private(page, order);
241 __SetPageBuddy(page); 243 __SetPageBuddy(page);
242} 244}
@@ -299,9 +301,9 @@ static inline int page_is_buddy(struct page *page, int order)
299 301
300 if (PageBuddy(page) && page_order(page) == order) { 302 if (PageBuddy(page) && page_order(page) == order) {
301 BUG_ON(page_count(page) != 0); 303 BUG_ON(page_count(page) != 0);
302 return 1; 304 return 1;
303 } 305 }
304 return 0; 306 return 0;
305} 307}
306 308
307/* 309/*
@@ -1960,7 +1962,7 @@ static inline void free_zone_pagesets(int cpu)
1960 } 1962 }
1961} 1963}
1962 1964
1963static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 1965static int pageset_cpuup_callback(struct notifier_block *nfb,
1964 unsigned long action, 1966 unsigned long action,
1965 void *hcpu) 1967 void *hcpu)
1966{ 1968{
diff --git a/mm/shmem.c b/mm/shmem.c
index 37eaf42ed2c6..4c5e68e4e9ae 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -46,6 +46,8 @@
46#include <linux/mempolicy.h> 46#include <linux/mempolicy.h>
47#include <linux/namei.h> 47#include <linux/namei.h>
48#include <linux/ctype.h> 48#include <linux/ctype.h>
49#include <linux/migrate.h>
50
49#include <asm/uaccess.h> 51#include <asm/uaccess.h>
50#include <asm/div64.h> 52#include <asm/div64.h>
51#include <asm/pgtable.h> 53#include <asm/pgtable.h>
@@ -2173,6 +2175,7 @@ static struct address_space_operations shmem_aops = {
2173 .prepare_write = shmem_prepare_write, 2175 .prepare_write = shmem_prepare_write,
2174 .commit_write = simple_commit_write, 2176 .commit_write = simple_commit_write,
2175#endif 2177#endif
2178 .migratepage = migrate_page,
2176}; 2179};
2177 2180
2178static struct file_operations shmem_file_operations = { 2181static struct file_operations shmem_file_operations = {
diff --git a/mm/slab.c b/mm/slab.c
index e6ef9bd52335..af5c5237e11a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1036,7 +1036,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
1036 1036
1037#endif 1037#endif
1038 1038
1039static int __devinit cpuup_callback(struct notifier_block *nfb, 1039static int cpuup_callback(struct notifier_block *nfb,
1040 unsigned long action, void *hcpu) 1040 unsigned long action, void *hcpu)
1041{ 1041{
1042 long cpu = (long)hcpu; 1042 long cpu = (long)hcpu;
diff --git a/mm/slob.c b/mm/slob.c
index 9bcc7e2cabfd..a68255ba4553 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -354,9 +354,7 @@ void *__alloc_percpu(size_t size)
354 if (!pdata) 354 if (!pdata)
355 return NULL; 355 return NULL;
356 356
357 for (i = 0; i < NR_CPUS; i++) { 357 for_each_possible_cpu(i) {
358 if (!cpu_possible(i))
359 continue;
360 pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); 358 pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
361 if (!pdata->ptrs[i]) 359 if (!pdata->ptrs[i])
362 goto unwind_oom; 360 goto unwind_oom;
@@ -383,11 +381,9 @@ free_percpu(const void *objp)
383 int i; 381 int i;
384 struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); 382 struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
385 383
386 for (i = 0; i < NR_CPUS; i++) { 384 for_each_possible_cpu(i)
387 if (!cpu_possible(i))
388 continue;
389 kfree(p->ptrs[i]); 385 kfree(p->ptrs[i]);
390 } 386
391 kfree(p); 387 kfree(p);
392} 388}
393EXPORT_SYMBOL(free_percpu); 389EXPORT_SYMBOL(free_percpu);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index acdf001d6941..4649a63a8cb6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1328,7 +1328,7 @@ repeat:
1328 not required for correctness. So if the last cpu in a node goes 1328 not required for correctness. So if the last cpu in a node goes
1329 away, we get changed to run anywhere: as the first one comes back, 1329 away, we get changed to run anywhere: as the first one comes back,
1330 restore their cpu bindings. */ 1330 restore their cpu bindings. */
1331static int __devinit cpu_callback(struct notifier_block *nfb, 1331static int cpu_callback(struct notifier_block *nfb,
1332 unsigned long action, void *hcpu) 1332 unsigned long action, void *hcpu)
1333{ 1333{
1334 pg_data_t *pgdat; 1334 pg_data_t *pgdat;