aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/oom_kill.c12
-rw-r--r--mm/page_alloc.c7
-rw-r--r--mm/slab.c4
-rw-r--r--mm/swapfile.c8
-rw-r--r--mm/vmscan.c33
5 files changed, 38 insertions, 26 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6969cfb33901..b278b8d60eee 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -61,12 +61,6 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
61 } 61 }
62 62
63 /* 63 /*
64 * swapoff can easily use up all memory, so kill those first.
65 */
66 if (p->flags & PF_SWAPOFF)
67 return ULONG_MAX;
68
69 /*
70 * The memory size of the process is the basis for the badness. 64 * The memory size of the process is the basis for the badness.
71 */ 65 */
72 points = mm->total_vm; 66 points = mm->total_vm;
@@ -77,6 +71,12 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
77 task_unlock(p); 71 task_unlock(p);
78 72
79 /* 73 /*
74 * swapoff can easily use up all memory, so kill those first.
75 */
76 if (p->flags & PF_SWAPOFF)
77 return ULONG_MAX;
78
79 /*
80 * Processes which fork a lot of child processes are likely 80 * Processes which fork a lot of child processes are likely
81 * a good choice. We add half the vmsize of the children if they 81 * a good choice. We add half the vmsize of the children if they
82 * have an own mm. This prevents forking servers to flood the 82 * have an own mm. This prevents forking servers to flood the
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8c1a116875bc..a49f96b7ea43 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -711,6 +711,9 @@ static void __drain_pages(unsigned int cpu)
711 for_each_zone(zone) { 711 for_each_zone(zone) {
712 struct per_cpu_pageset *pset; 712 struct per_cpu_pageset *pset;
713 713
714 if (!populated_zone(zone))
715 continue;
716
714 pset = zone_pcp(zone, cpu); 717 pset = zone_pcp(zone, cpu);
715 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 718 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
716 struct per_cpu_pages *pcp; 719 struct per_cpu_pages *pcp;
@@ -3321,6 +3324,10 @@ void *__init alloc_large_system_hash(const char *tablename,
3321 numentries >>= (scale - PAGE_SHIFT); 3324 numentries >>= (scale - PAGE_SHIFT);
3322 else 3325 else
3323 numentries <<= (PAGE_SHIFT - scale); 3326 numentries <<= (PAGE_SHIFT - scale);
3327
3328 /* Make sure we've got at least a 0-order allocation.. */
3329 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
3330 numentries = PAGE_SIZE / bucketsize;
3324 } 3331 }
3325 numentries = roundup_pow_of_two(numentries); 3332 numentries = roundup_pow_of_two(numentries);
3326 3333
diff --git a/mm/slab.c b/mm/slab.c
index 0d4e57431de4..c6100628a6ef 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3281,7 +3281,7 @@ retry:
3281 flags | GFP_THISNODE, nid); 3281 flags | GFP_THISNODE, nid);
3282 } 3282 }
3283 3283
3284 if (!obj) { 3284 if (!obj && !(flags & __GFP_NO_GROW)) {
3285 /* 3285 /*
3286 * This allocation will be performed within the constraints 3286 * This allocation will be performed within the constraints
3287 * of the current cpuset / memory policy requirements. 3287 * of the current cpuset / memory policy requirements.
@@ -3310,7 +3310,7 @@ retry:
3310 */ 3310 */
3311 goto retry; 3311 goto retry;
3312 } else { 3312 } else {
3313 kmem_freepages(cache, obj); 3313 /* cache_grow already freed obj */
3314 obj = NULL; 3314 obj = NULL;
3315 } 3315 }
3316 } 3316 }
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b9fc0e5de6d5..a2d9bb4e80df 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -434,7 +434,7 @@ void free_swap_and_cache(swp_entry_t entry)
434 * 434 *
435 * This is needed for the suspend to disk (aka swsusp). 435 * This is needed for the suspend to disk (aka swsusp).
436 */ 436 */
437int swap_type_of(dev_t device, sector_t offset) 437int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
438{ 438{
439 struct block_device *bdev = NULL; 439 struct block_device *bdev = NULL;
440 int i; 440 int i;
@@ -450,6 +450,9 @@ int swap_type_of(dev_t device, sector_t offset)
450 continue; 450 continue;
451 451
452 if (!bdev) { 452 if (!bdev) {
453 if (bdev_p)
454 *bdev_p = sis->bdev;
455
453 spin_unlock(&swap_lock); 456 spin_unlock(&swap_lock);
454 return i; 457 return i;
455 } 458 }
@@ -459,6 +462,9 @@ int swap_type_of(dev_t device, sector_t offset)
459 se = list_entry(sis->extent_list.next, 462 se = list_entry(sis->extent_list.next,
460 struct swap_extent, list); 463 struct swap_extent, list);
461 if (se->start_block == offset) { 464 if (se->start_block == offset) {
465 if (bdev_p)
466 *bdev_p = sis->bdev;
467
462 spin_unlock(&swap_lock); 468 spin_unlock(&swap_lock);
463 bdput(bdev); 469 bdput(bdev);
464 return i; 470 return i;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 40fea4918390..7430df68cb64 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1406,6 +1406,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1406 return ret; 1406 return ret;
1407} 1407}
1408 1408
1409static unsigned long count_lru_pages(void)
1410{
1411 struct zone *zone;
1412 unsigned long ret = 0;
1413
1414 for_each_zone(zone)
1415 ret += zone->nr_active + zone->nr_inactive;
1416 return ret;
1417}
1418
1409/* 1419/*
1410 * Try to free `nr_pages' of memory, system-wide, and return the number of 1420 * Try to free `nr_pages' of memory, system-wide, and return the number of
1411 * freed pages. 1421 * freed pages.
@@ -1420,7 +1430,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1420 unsigned long ret = 0; 1430 unsigned long ret = 0;
1421 int pass; 1431 int pass;
1422 struct reclaim_state reclaim_state; 1432 struct reclaim_state reclaim_state;
1423 struct zone *zone;
1424 struct scan_control sc = { 1433 struct scan_control sc = {
1425 .gfp_mask = GFP_KERNEL, 1434 .gfp_mask = GFP_KERNEL,
1426 .may_swap = 0, 1435 .may_swap = 0,
@@ -1431,10 +1440,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1431 1440
1432 current->reclaim_state = &reclaim_state; 1441 current->reclaim_state = &reclaim_state;
1433 1442
1434 lru_pages = 0; 1443 lru_pages = count_lru_pages();
1435 for_each_zone(zone)
1436 lru_pages += zone->nr_active + zone->nr_inactive;
1437
1438 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); 1444 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1439 /* If slab caches are huge, it's better to hit them first */ 1445 /* If slab caches are huge, it's better to hit them first */
1440 while (nr_slab >= lru_pages) { 1446 while (nr_slab >= lru_pages) {
@@ -1461,13 +1467,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1461 for (pass = 0; pass < 5; pass++) { 1467 for (pass = 0; pass < 5; pass++) {
1462 int prio; 1468 int prio;
1463 1469
1464 /* Needed for shrinking slab caches later on */
1465 if (!lru_pages)
1466 for_each_zone(zone) {
1467 lru_pages += zone->nr_active;
1468 lru_pages += zone->nr_inactive;
1469 }
1470
1471 /* Force reclaiming mapped pages in the passes #3 and #4 */ 1470 /* Force reclaiming mapped pages in the passes #3 and #4 */
1472 if (pass > 2) { 1471 if (pass > 2) {
1473 sc.may_swap = 1; 1472 sc.may_swap = 1;
@@ -1483,7 +1482,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1483 goto out; 1482 goto out;
1484 1483
1485 reclaim_state.reclaimed_slab = 0; 1484 reclaim_state.reclaimed_slab = 0;
1486 shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages); 1485 shrink_slab(sc.nr_scanned, sc.gfp_mask,
1486 count_lru_pages());
1487 ret += reclaim_state.reclaimed_slab; 1487 ret += reclaim_state.reclaimed_slab;
1488 if (ret >= nr_pages) 1488 if (ret >= nr_pages)
1489 goto out; 1489 goto out;
@@ -1491,20 +1491,19 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1491 if (sc.nr_scanned && prio < DEF_PRIORITY - 2) 1491 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1492 congestion_wait(WRITE, HZ / 10); 1492 congestion_wait(WRITE, HZ / 10);
1493 } 1493 }
1494
1495 lru_pages = 0;
1496 } 1494 }
1497 1495
1498 /* 1496 /*
1499 * If ret = 0, we could not shrink LRUs, but there may be something 1497 * If ret = 0, we could not shrink LRUs, but there may be something
1500 * in slab caches 1498 * in slab caches
1501 */ 1499 */
1502 if (!ret) 1500 if (!ret) {
1503 do { 1501 do {
1504 reclaim_state.reclaimed_slab = 0; 1502 reclaim_state.reclaimed_slab = 0;
1505 shrink_slab(nr_pages, sc.gfp_mask, lru_pages); 1503 shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
1506 ret += reclaim_state.reclaimed_slab; 1504 ret += reclaim_state.reclaimed_slab;
1507 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); 1505 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
1506 }
1508 1507
1509out: 1508out:
1510 current->reclaim_state = NULL; 1509 current->reclaim_state = NULL;