aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/bounce.c4
-rw-r--r--mm/memory.c2
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/oom_kill.c19
-rw-r--r--mm/page-writeback.c45
-rw-r--r--mm/page_alloc.c32
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slob.c11
-rw-r--r--mm/swapfile.c8
-rw-r--r--mm/truncate.c24
-rw-r--r--mm/vmscan.c35
13 files changed, 139 insertions, 57 deletions
diff --git a/mm/bounce.c b/mm/bounce.c
index e4b62d2a40..643efbe824 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -237,6 +237,8 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
237 if (!bio) 237 if (!bio)
238 return; 238 return;
239 239
240 blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
241
240 /* 242 /*
241 * at least one page was bounced, fill in possible non-highmem 243 * at least one page was bounced, fill in possible non-highmem
242 * pages 244 * pages
@@ -291,8 +293,6 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
291 pool = isa_page_pool; 293 pool = isa_page_pool;
292 } 294 }
293 295
294 blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
295
296 /* 296 /*
297 * slow path 297 * slow path
298 */ 298 */
diff --git a/mm/memory.c b/mm/memory.c
index 563792f4f6..af227d26e1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1091,7 +1091,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1091 if (pages) { 1091 if (pages) {
1092 pages[i] = page; 1092 pages[i] = page;
1093 1093
1094 flush_anon_page(page, start); 1094 flush_anon_page(vma, page, start);
1095 flush_dcache_page(page); 1095 flush_dcache_page(page);
1096 } 1096 }
1097 if (vmas) 1097 if (vmas)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0c055a090f..84279127fc 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -67,11 +67,13 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
67 zone_type = zone - pgdat->node_zones; 67 zone_type = zone - pgdat->node_zones;
68 if (!populated_zone(zone)) { 68 if (!populated_zone(zone)) {
69 int ret = 0; 69 int ret = 0;
70 ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages); 70 ret = init_currently_empty_zone(zone, phys_start_pfn,
71 nr_pages, MEMMAP_HOTPLUG);
71 if (ret < 0) 72 if (ret < 0)
72 return ret; 73 return ret;
73 } 74 }
74 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn); 75 memmap_init_zone(nr_pages, nid, zone_type,
76 phys_start_pfn, MEMMAP_HOTPLUG);
75 return 0; 77 return 0;
76} 78}
77 79
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index da94639465..c2aec0e109 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -884,6 +884,10 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
884 err = get_nodes(&nodes, nmask, maxnode); 884 err = get_nodes(&nodes, nmask, maxnode);
885 if (err) 885 if (err)
886 return err; 886 return err;
887#ifdef CONFIG_CPUSETS
888 /* Restrict the nodes to the allowed nodes in the cpuset */
889 nodes_and(nodes, nodes, current->mems_allowed);
890#endif
887 return do_mbind(start, len, mode, &nodes, flags); 891 return do_mbind(start, len, mode, &nodes, flags);
888} 892}
889 893
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 64cf3c2146..b278b8d60e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -61,12 +61,6 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
61 } 61 }
62 62
63 /* 63 /*
64 * swapoff can easily use up all memory, so kill those first.
65 */
66 if (p->flags & PF_SWAPOFF)
67 return ULONG_MAX;
68
69 /*
70 * The memory size of the process is the basis for the badness. 64 * The memory size of the process is the basis for the badness.
71 */ 65 */
72 points = mm->total_vm; 66 points = mm->total_vm;
@@ -77,6 +71,12 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
77 task_unlock(p); 71 task_unlock(p);
78 72
79 /* 73 /*
74 * swapoff can easily use up all memory, so kill those first.
75 */
76 if (p->flags & PF_SWAPOFF)
77 return ULONG_MAX;
78
79 /*
80 * Processes which fork a lot of child processes are likely 80 * Processes which fork a lot of child processes are likely
81 * a good choice. We add half the vmsize of the children if they 81 * a good choice. We add half the vmsize of the children if they
82 * have an own mm. This prevents forking servers to flood the 82 * have an own mm. This prevents forking servers to flood the
@@ -174,7 +174,12 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
174{ 174{
175#ifdef CONFIG_NUMA 175#ifdef CONFIG_NUMA
176 struct zone **z; 176 struct zone **z;
177 nodemask_t nodes = node_online_map; 177 nodemask_t nodes;
178 int node;
179 /* node has memory ? */
180 for_each_online_node(node)
181 if (NODE_DATA(node)->node_present_pages)
182 node_set(node, nodes);
178 183
179 for (z = zonelist->zones; *z; z++) 184 for (z = zonelist->zones; *z; z++)
180 if (cpuset_zone_allowed_softwall(*z, gfp_mask)) 185 if (cpuset_zone_allowed_softwall(*z, gfp_mask))
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b3a198c924..1d2fc89ca5 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -862,17 +862,46 @@ int clear_page_dirty_for_io(struct page *page)
862{ 862{
863 struct address_space *mapping = page_mapping(page); 863 struct address_space *mapping = page_mapping(page);
864 864
865 if (!mapping) 865 if (mapping && mapping_cap_account_dirty(mapping)) {
866 return TestClearPageDirty(page); 866 /*
867 867 * Yes, Virginia, this is indeed insane.
868 if (TestClearPageDirty(page)) { 868 *
869 if (mapping_cap_account_dirty(mapping)) { 869 * We use this sequence to make sure that
870 page_mkclean(page); 870 * (a) we account for dirty stats properly
871 * (b) we tell the low-level filesystem to
872 * mark the whole page dirty if it was
873 * dirty in a pagetable. Only to then
874 * (c) clean the page again and return 1 to
875 * cause the writeback.
876 *
877 * This way we avoid all nasty races with the
878 * dirty bit in multiple places and clearing
879 * them concurrently from different threads.
880 *
881 * Note! Normally the "set_page_dirty(page)"
882 * has no effect on the actual dirty bit - since
883 * that will already usually be set. But we
884 * need the side effects, and it can help us
885 * avoid races.
886 *
887 * We basically use the page "master dirty bit"
888 * as a serialization point for all the different
889 * threads doing their things.
890 *
891 * FIXME! We still have a race here: if somebody
892 * adds the page back to the page tables in
893 * between the "page_mkclean()" and the "TestClearPageDirty()",
894 * we might have it mapped without the dirty bit set.
895 */
896 if (page_mkclean(page))
897 set_page_dirty(page);
898 if (TestClearPageDirty(page)) {
871 dec_zone_page_state(page, NR_FILE_DIRTY); 899 dec_zone_page_state(page, NR_FILE_DIRTY);
900 return 1;
872 } 901 }
873 return 1; 902 return 0;
874 } 903 }
875 return 0; 904 return TestClearPageDirty(page);
876} 905}
877EXPORT_SYMBOL(clear_page_dirty_for_io); 906EXPORT_SYMBOL(clear_page_dirty_for_io);
878 907
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8c1a116875..fc5b5442e9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -711,6 +711,9 @@ static void __drain_pages(unsigned int cpu)
711 for_each_zone(zone) { 711 for_each_zone(zone) {
712 struct per_cpu_pageset *pset; 712 struct per_cpu_pageset *pset;
713 713
714 if (!populated_zone(zone))
715 continue;
716
714 pset = zone_pcp(zone, cpu); 717 pset = zone_pcp(zone, cpu);
715 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 718 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
716 struct per_cpu_pages *pcp; 719 struct per_cpu_pages *pcp;
@@ -1953,17 +1956,24 @@ static inline unsigned long wait_table_bits(unsigned long size)
1953 * done. Non-atomic initialization, single-pass. 1956 * done. Non-atomic initialization, single-pass.
1954 */ 1957 */
1955void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1958void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1956 unsigned long start_pfn) 1959 unsigned long start_pfn, enum memmap_context context)
1957{ 1960{
1958 struct page *page; 1961 struct page *page;
1959 unsigned long end_pfn = start_pfn + size; 1962 unsigned long end_pfn = start_pfn + size;
1960 unsigned long pfn; 1963 unsigned long pfn;
1961 1964
1962 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1965 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1963 if (!early_pfn_valid(pfn)) 1966 /*
1964 continue; 1967 * There can be holes in boot-time mem_map[]s
1965 if (!early_pfn_in_nid(pfn, nid)) 1968 * handed to this function. They do not
1966 continue; 1969 * exist on hotplugged memory.
1970 */
1971 if (context == MEMMAP_EARLY) {
1972 if (!early_pfn_valid(pfn))
1973 continue;
1974 if (!early_pfn_in_nid(pfn, nid))
1975 continue;
1976 }
1967 page = pfn_to_page(pfn); 1977 page = pfn_to_page(pfn);
1968 set_page_links(page, zone, nid, pfn); 1978 set_page_links(page, zone, nid, pfn);
1969 init_page_count(page); 1979 init_page_count(page);
@@ -1990,7 +2000,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1990 2000
1991#ifndef __HAVE_ARCH_MEMMAP_INIT 2001#ifndef __HAVE_ARCH_MEMMAP_INIT
1992#define memmap_init(size, nid, zone, start_pfn) \ 2002#define memmap_init(size, nid, zone, start_pfn) \
1993 memmap_init_zone((size), (nid), (zone), (start_pfn)) 2003 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1994#endif 2004#endif
1995 2005
1996static int __cpuinit zone_batchsize(struct zone *zone) 2006static int __cpuinit zone_batchsize(struct zone *zone)
@@ -2236,7 +2246,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
2236 2246
2237__meminit int init_currently_empty_zone(struct zone *zone, 2247__meminit int init_currently_empty_zone(struct zone *zone,
2238 unsigned long zone_start_pfn, 2248 unsigned long zone_start_pfn,
2239 unsigned long size) 2249 unsigned long size,
2250 enum memmap_context context)
2240{ 2251{
2241 struct pglist_data *pgdat = zone->zone_pgdat; 2252 struct pglist_data *pgdat = zone->zone_pgdat;
2242 int ret; 2253 int ret;
@@ -2680,7 +2691,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2680 if (!size) 2691 if (!size)
2681 continue; 2692 continue;
2682 2693
2683 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 2694 ret = init_currently_empty_zone(zone, zone_start_pfn,
2695 size, MEMMAP_EARLY);
2684 BUG_ON(ret); 2696 BUG_ON(ret);
2685 zone_start_pfn += size; 2697 zone_start_pfn += size;
2686 } 2698 }
@@ -3321,6 +3333,10 @@ void *__init alloc_large_system_hash(const char *tablename,
3321 numentries >>= (scale - PAGE_SHIFT); 3333 numentries >>= (scale - PAGE_SHIFT);
3322 else 3334 else
3323 numentries <<= (PAGE_SHIFT - scale); 3335 numentries <<= (PAGE_SHIFT - scale);
3336
3337 /* Make sure we've got at least a 0-order allocation.. */
3338 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
3339 numentries = PAGE_SIZE / bucketsize;
3324 } 3340 }
3325 numentries = roundup_pow_of_two(numentries); 3341 numentries = roundup_pow_of_two(numentries);
3326 3342
diff --git a/mm/rmap.c b/mm/rmap.c
index 57306fa011..669acb22b5 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -452,7 +452,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
452 entry = ptep_clear_flush(vma, address, pte); 452 entry = ptep_clear_flush(vma, address, pte);
453 entry = pte_wrprotect(entry); 453 entry = pte_wrprotect(entry);
454 entry = pte_mkclean(entry); 454 entry = pte_mkclean(entry);
455 set_pte_at(vma, address, pte, entry); 455 set_pte_at(mm, address, pte, entry);
456 lazy_mmu_prot_update(entry); 456 lazy_mmu_prot_update(entry);
457 ret = 1; 457 ret = 1;
458 } 458 }
diff --git a/mm/slab.c b/mm/slab.c
index 0d4e57431d..c6100628a6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3281,7 +3281,7 @@ retry:
3281 flags | GFP_THISNODE, nid); 3281 flags | GFP_THISNODE, nid);
3282 } 3282 }
3283 3283
3284 if (!obj) { 3284 if (!obj && !(flags & __GFP_NO_GROW)) {
3285 /* 3285 /*
3286 * This allocation will be performed within the constraints 3286 * This allocation will be performed within the constraints
3287 * of the current cpuset / memory policy requirements. 3287 * of the current cpuset / memory policy requirements.
@@ -3310,7 +3310,7 @@ retry:
3310 */ 3310 */
3311 goto retry; 3311 goto retry;
3312 } else { 3312 } else {
3313 kmem_freepages(cache, obj); 3313 /* cache_grow already freed obj */
3314 obj = NULL; 3314 obj = NULL;
3315 } 3315 }
3316 } 3316 }
diff --git a/mm/slob.c b/mm/slob.c
index 2e9236e10e..5adc29cb58 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -60,6 +60,8 @@ static DEFINE_SPINLOCK(slob_lock);
60static DEFINE_SPINLOCK(block_lock); 60static DEFINE_SPINLOCK(block_lock);
61 61
62static void slob_free(void *b, int size); 62static void slob_free(void *b, int size);
63static void slob_timer_cbk(void);
64
63 65
64static void *slob_alloc(size_t size, gfp_t gfp, int align) 66static void *slob_alloc(size_t size, gfp_t gfp, int align)
65{ 67{
@@ -326,7 +328,7 @@ const char *kmem_cache_name(struct kmem_cache *c)
326EXPORT_SYMBOL(kmem_cache_name); 328EXPORT_SYMBOL(kmem_cache_name);
327 329
328static struct timer_list slob_timer = TIMER_INITIALIZER( 330static struct timer_list slob_timer = TIMER_INITIALIZER(
329 (void (*)(unsigned long))kmem_cache_init, 0, 0); 331 (void (*)(unsigned long))slob_timer_cbk, 0, 0);
330 332
331int kmem_cache_shrink(struct kmem_cache *d) 333int kmem_cache_shrink(struct kmem_cache *d)
332{ 334{
@@ -339,7 +341,12 @@ int kmem_ptr_validate(struct kmem_cache *a, const void *b)
339 return 0; 341 return 0;
340} 342}
341 343
342void kmem_cache_init(void) 344void __init kmem_cache_init(void)
345{
346 slob_timer_cbk();
347}
348
349static void slob_timer_cbk(void)
343{ 350{
344 void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1); 351 void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1);
345 352
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b9fc0e5de6..a2d9bb4e80 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -434,7 +434,7 @@ void free_swap_and_cache(swp_entry_t entry)
434 * 434 *
435 * This is needed for the suspend to disk (aka swsusp). 435 * This is needed for the suspend to disk (aka swsusp).
436 */ 436 */
437int swap_type_of(dev_t device, sector_t offset) 437int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
438{ 438{
439 struct block_device *bdev = NULL; 439 struct block_device *bdev = NULL;
440 int i; 440 int i;
@@ -450,6 +450,9 @@ int swap_type_of(dev_t device, sector_t offset)
450 continue; 450 continue;
451 451
452 if (!bdev) { 452 if (!bdev) {
453 if (bdev_p)
454 *bdev_p = sis->bdev;
455
453 spin_unlock(&swap_lock); 456 spin_unlock(&swap_lock);
454 return i; 457 return i;
455 } 458 }
@@ -459,6 +462,9 @@ int swap_type_of(dev_t device, sector_t offset)
459 se = list_entry(sis->extent_list.next, 462 se = list_entry(sis->extent_list.next,
460 struct swap_extent, list); 463 struct swap_extent, list);
461 if (se->start_block == offset) { 464 if (se->start_block == offset) {
465 if (bdev_p)
466 *bdev_p = sis->bdev;
467
462 spin_unlock(&swap_lock); 468 spin_unlock(&swap_lock);
463 bdput(bdev); 469 bdput(bdev);
464 return i; 470 return i;
diff --git a/mm/truncate.c b/mm/truncate.c
index 4a38dd1a4c..6c79ca4a1c 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -60,12 +60,16 @@ void cancel_dirty_page(struct page *page, unsigned int account_size)
60 WARN_ON(++warncount < 5); 60 WARN_ON(++warncount < 5);
61 } 61 }
62 62
63 if (TestClearPageDirty(page) && account_size && 63 if (TestClearPageDirty(page)) {
64 mapping_cap_account_dirty(page->mapping)) { 64 struct address_space *mapping = page->mapping;
65 dec_zone_page_state(page, NR_FILE_DIRTY); 65 if (mapping && mapping_cap_account_dirty(mapping)) {
66 task_io_account_cancelled_write(account_size); 66 dec_zone_page_state(page, NR_FILE_DIRTY);
67 if (account_size)
68 task_io_account_cancelled_write(account_size);
69 }
67 } 70 }
68} 71}
72EXPORT_SYMBOL(cancel_dirty_page);
69 73
70/* 74/*
71 * If truncate cannot remove the fs-private metadata from the page, the page 75 * If truncate cannot remove the fs-private metadata from the page, the page
@@ -337,6 +341,15 @@ failed:
337 return 0; 341 return 0;
338} 342}
339 343
344static int do_launder_page(struct address_space *mapping, struct page *page)
345{
346 if (!PageDirty(page))
347 return 0;
348 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
349 return 0;
350 return mapping->a_ops->launder_page(page);
351}
352
340/** 353/**
341 * invalidate_inode_pages2_range - remove range of pages from an address_space 354 * invalidate_inode_pages2_range - remove range of pages from an address_space
342 * @mapping: the address_space 355 * @mapping: the address_space
@@ -401,7 +414,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
401 PAGE_CACHE_SIZE, 0); 414 PAGE_CACHE_SIZE, 0);
402 } 415 }
403 } 416 }
404 if (!invalidate_complete_page2(mapping, page)) 417 ret = do_launder_page(mapping, page);
418 if (ret == 0 && !invalidate_complete_page2(mapping, page))
405 ret = -EIO; 419 ret = -EIO;
406 unlock_page(page); 420 unlock_page(page);
407 } 421 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 63eb9ab003..7430df68cb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -692,7 +692,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
692 __count_vm_events(KSWAPD_STEAL, nr_freed); 692 __count_vm_events(KSWAPD_STEAL, nr_freed);
693 } else 693 } else
694 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); 694 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
695 __count_vm_events(PGACTIVATE, nr_freed); 695 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
696 696
697 if (nr_taken == 0) 697 if (nr_taken == 0)
698 goto done; 698 goto done;
@@ -1406,6 +1406,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1406 return ret; 1406 return ret;
1407} 1407}
1408 1408
1409static unsigned long count_lru_pages(void)
1410{
1411 struct zone *zone;
1412 unsigned long ret = 0;
1413
1414 for_each_zone(zone)
1415 ret += zone->nr_active + zone->nr_inactive;
1416 return ret;
1417}
1418
1409/* 1419/*
1410 * Try to free `nr_pages' of memory, system-wide, and return the number of 1420 * Try to free `nr_pages' of memory, system-wide, and return the number of
1411 * freed pages. 1421 * freed pages.
@@ -1420,7 +1430,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1420 unsigned long ret = 0; 1430 unsigned long ret = 0;
1421 int pass; 1431 int pass;
1422 struct reclaim_state reclaim_state; 1432 struct reclaim_state reclaim_state;
1423 struct zone *zone;
1424 struct scan_control sc = { 1433 struct scan_control sc = {
1425 .gfp_mask = GFP_KERNEL, 1434 .gfp_mask = GFP_KERNEL,
1426 .may_swap = 0, 1435 .may_swap = 0,
@@ -1431,10 +1440,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1431 1440
1432 current->reclaim_state = &reclaim_state; 1441 current->reclaim_state = &reclaim_state;
1433 1442
1434 lru_pages = 0; 1443 lru_pages = count_lru_pages();
1435 for_each_zone(zone)
1436 lru_pages += zone->nr_active + zone->nr_inactive;
1437
1438 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); 1444 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1439 /* If slab caches are huge, it's better to hit them first */ 1445 /* If slab caches are huge, it's better to hit them first */
1440 while (nr_slab >= lru_pages) { 1446 while (nr_slab >= lru_pages) {
@@ -1461,13 +1467,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1461 for (pass = 0; pass < 5; pass++) { 1467 for (pass = 0; pass < 5; pass++) {
1462 int prio; 1468 int prio;
1463 1469
1464 /* Needed for shrinking slab caches later on */
1465 if (!lru_pages)
1466 for_each_zone(zone) {
1467 lru_pages += zone->nr_active;
1468 lru_pages += zone->nr_inactive;
1469 }
1470
1471 /* Force reclaiming mapped pages in the passes #3 and #4 */ 1470 /* Force reclaiming mapped pages in the passes #3 and #4 */
1472 if (pass > 2) { 1471 if (pass > 2) {
1473 sc.may_swap = 1; 1472 sc.may_swap = 1;
@@ -1483,7 +1482,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1483 goto out; 1482 goto out;
1484 1483
1485 reclaim_state.reclaimed_slab = 0; 1484 reclaim_state.reclaimed_slab = 0;
1486 shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages); 1485 shrink_slab(sc.nr_scanned, sc.gfp_mask,
1486 count_lru_pages());
1487 ret += reclaim_state.reclaimed_slab; 1487 ret += reclaim_state.reclaimed_slab;
1488 if (ret >= nr_pages) 1488 if (ret >= nr_pages)
1489 goto out; 1489 goto out;
@@ -1491,20 +1491,19 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1491 if (sc.nr_scanned && prio < DEF_PRIORITY - 2) 1491 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1492 congestion_wait(WRITE, HZ / 10); 1492 congestion_wait(WRITE, HZ / 10);
1493 } 1493 }
1494
1495 lru_pages = 0;
1496 } 1494 }
1497 1495
1498 /* 1496 /*
1499 * If ret = 0, we could not shrink LRUs, but there may be something 1497 * If ret = 0, we could not shrink LRUs, but there may be something
1500 * in slab caches 1498 * in slab caches
1501 */ 1499 */
1502 if (!ret) 1500 if (!ret) {
1503 do { 1501 do {
1504 reclaim_state.reclaimed_slab = 0; 1502 reclaim_state.reclaimed_slab = 0;
1505 shrink_slab(nr_pages, sc.gfp_mask, lru_pages); 1503 shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
1506 ret += reclaim_state.reclaimed_slab; 1504 ret += reclaim_state.reclaimed_slab;
1507 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); 1505 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
1506 }
1508 1507
1509out: 1508out:
1510 current->reclaim_state = NULL; 1509 current->reclaim_state = NULL;