aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mlock.c7
-rw-r--r--mm/page-writeback.c15
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/shmem.c43
-rw-r--r--mm/slab.c1
-rw-r--r--mm/slob.c41
-rw-r--r--mm/slub.c83
-rw-r--r--mm/swapfile.c4
-rw-r--r--mm/util.c20
-rw-r--r--mm/vmalloc.c20
-rw-r--r--mm/vmscan.c32
12 files changed, 192 insertions, 103 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 037161d61b4e..cbe9e0581b75 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -660,7 +660,7 @@ void *alloc_locked_buffer(size_t size)
660 return buffer; 660 return buffer;
661} 661}
662 662
663void free_locked_buffer(void *buffer, size_t size) 663void release_locked_buffer(void *buffer, size_t size)
664{ 664{
665 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; 665 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
666 666
@@ -670,6 +670,11 @@ void free_locked_buffer(void *buffer, size_t size)
670 current->mm->locked_vm -= pgsz; 670 current->mm->locked_vm -= pgsz;
671 671
672 up_write(&current->mm->mmap_sem); 672 up_write(&current->mm->mmap_sem);
673}
674
675void free_locked_buffer(void *buffer, size_t size)
676{
677 release_locked_buffer(buffer, size);
673 678
674 kfree(buffer); 679 kfree(buffer);
675} 680}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6106a5c7ed44..74dc57c74349 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -240,7 +240,7 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
240} 240}
241EXPORT_SYMBOL_GPL(bdi_writeout_inc); 241EXPORT_SYMBOL_GPL(bdi_writeout_inc);
242 242
243static inline void task_dirty_inc(struct task_struct *tsk) 243void task_dirty_inc(struct task_struct *tsk)
244{ 244{
245 prop_inc_single(&vm_dirties, &tsk->dirties); 245 prop_inc_single(&vm_dirties, &tsk->dirties);
246} 246}
@@ -1079,7 +1079,7 @@ continue_unlock:
1079 pagevec_release(&pvec); 1079 pagevec_release(&pvec);
1080 cond_resched(); 1080 cond_resched();
1081 } 1081 }
1082 if (!cycled) { 1082 if (!cycled && !done) {
1083 /* 1083 /*
1084 * range_cyclic: 1084 * range_cyclic:
1085 * We hit the last page and there is more work to be done: wrap 1085 * We hit the last page and there is more work to be done: wrap
@@ -1230,6 +1230,7 @@ int __set_page_dirty_nobuffers(struct page *page)
1230 __inc_zone_page_state(page, NR_FILE_DIRTY); 1230 __inc_zone_page_state(page, NR_FILE_DIRTY);
1231 __inc_bdi_stat(mapping->backing_dev_info, 1231 __inc_bdi_stat(mapping->backing_dev_info,
1232 BDI_RECLAIMABLE); 1232 BDI_RECLAIMABLE);
1233 task_dirty_inc(current);
1233 task_io_account_write(PAGE_CACHE_SIZE); 1234 task_io_account_write(PAGE_CACHE_SIZE);
1234 } 1235 }
1235 radix_tree_tag_set(&mapping->page_tree, 1236 radix_tree_tag_set(&mapping->page_tree,
@@ -1262,7 +1263,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
1262 * If the mapping doesn't provide a set_page_dirty a_op, then 1263 * If the mapping doesn't provide a set_page_dirty a_op, then
1263 * just fall through and assume that it wants buffer_heads. 1264 * just fall through and assume that it wants buffer_heads.
1264 */ 1265 */
1265static int __set_page_dirty(struct page *page) 1266int set_page_dirty(struct page *page)
1266{ 1267{
1267 struct address_space *mapping = page_mapping(page); 1268 struct address_space *mapping = page_mapping(page);
1268 1269
@@ -1280,14 +1281,6 @@ static int __set_page_dirty(struct page *page)
1280 } 1281 }
1281 return 0; 1282 return 0;
1282} 1283}
1283
1284int set_page_dirty(struct page *page)
1285{
1286 int ret = __set_page_dirty(page);
1287 if (ret)
1288 task_dirty_inc(current);
1289 return ret;
1290}
1291EXPORT_SYMBOL(set_page_dirty); 1284EXPORT_SYMBOL(set_page_dirty);
1292 1285
1293/* 1286/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5675b3073854..5c44ed49ca93 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2989,7 +2989,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
2989 * was used and there are no special requirements, this is a convenient 2989 * was used and there are no special requirements, this is a convenient
2990 * alternative 2990 * alternative
2991 */ 2991 */
2992int __meminit early_pfn_to_nid(unsigned long pfn) 2992int __meminit __early_pfn_to_nid(unsigned long pfn)
2993{ 2993{
2994 int i; 2994 int i;
2995 2995
@@ -3000,10 +3000,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
3000 if (start_pfn <= pfn && pfn < end_pfn) 3000 if (start_pfn <= pfn && pfn < end_pfn)
3001 return early_node_map[i].nid; 3001 return early_node_map[i].nid;
3002 } 3002 }
3003 /* This is a memory hole */
3004 return -1;
3005}
3006#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3007
3008int __meminit early_pfn_to_nid(unsigned long pfn)
3009{
3010 int nid;
3003 3011
3012 nid = __early_pfn_to_nid(pfn);
3013 if (nid >= 0)
3014 return nid;
3015 /* just returns 0 */
3004 return 0; 3016 return 0;
3005} 3017}
3006#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 3018
3019#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3020bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3021{
3022 int nid;
3023
3024 nid = __early_pfn_to_nid(pfn);
3025 if (nid >= 0 && nid != node)
3026 return false;
3027 return true;
3028}
3029#endif
3007 3030
3008/* Basic iterator support to walk early_node_map[] */ 3031/* Basic iterator support to walk early_node_map[] */
3009#define for_each_active_range_index_in_nid(i, nid) \ 3032#define for_each_active_range_index_in_nid(i, nid) \
diff --git a/mm/page_io.c b/mm/page_io.c
index dc6ce0afbded..3023c475e041 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -111,7 +111,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
111 goto out; 111 goto out;
112 } 112 }
113 if (wbc->sync_mode == WB_SYNC_ALL) 113 if (wbc->sync_mode == WB_SYNC_ALL)
114 rw |= (1 << BIO_RW_SYNC); 114 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
115 count_vm_event(PSWPOUT); 115 count_vm_event(PSWPOUT);
116 set_page_writeback(page); 116 set_page_writeback(page);
117 unlock_page(page); 117 unlock_page(page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 19d566ccdeea..4103a239ce84 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -169,13 +169,13 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
169 */ 169 */
170static inline int shmem_acct_size(unsigned long flags, loff_t size) 170static inline int shmem_acct_size(unsigned long flags, loff_t size)
171{ 171{
172 return (flags & VM_ACCOUNT) ? 172 return (flags & VM_NORESERVE) ?
173 security_vm_enough_memory_kern(VM_ACCT(size)) : 0; 173 0 : security_vm_enough_memory_kern(VM_ACCT(size));
174} 174}
175 175
176static inline void shmem_unacct_size(unsigned long flags, loff_t size) 176static inline void shmem_unacct_size(unsigned long flags, loff_t size)
177{ 177{
178 if (flags & VM_ACCOUNT) 178 if (!(flags & VM_NORESERVE))
179 vm_unacct_memory(VM_ACCT(size)); 179 vm_unacct_memory(VM_ACCT(size));
180} 180}
181 181
@@ -187,13 +187,13 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
187 */ 187 */
188static inline int shmem_acct_block(unsigned long flags) 188static inline int shmem_acct_block(unsigned long flags)
189{ 189{
190 return (flags & VM_ACCOUNT) ? 190 return (flags & VM_NORESERVE) ?
191 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)); 191 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
192} 192}
193 193
194static inline void shmem_unacct_blocks(unsigned long flags, long pages) 194static inline void shmem_unacct_blocks(unsigned long flags, long pages)
195{ 195{
196 if (!(flags & VM_ACCOUNT)) 196 if (flags & VM_NORESERVE)
197 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 197 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
198} 198}
199 199
@@ -1515,8 +1515,8 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1515 return 0; 1515 return 0;
1516} 1516}
1517 1517
1518static struct inode * 1518static struct inode *shmem_get_inode(struct super_block *sb, int mode,
1519shmem_get_inode(struct super_block *sb, int mode, dev_t dev) 1519 dev_t dev, unsigned long flags)
1520{ 1520{
1521 struct inode *inode; 1521 struct inode *inode;
1522 struct shmem_inode_info *info; 1522 struct shmem_inode_info *info;
@@ -1537,6 +1537,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1537 info = SHMEM_I(inode); 1537 info = SHMEM_I(inode);
1538 memset(info, 0, (char *)inode - (char *)info); 1538 memset(info, 0, (char *)inode - (char *)info);
1539 spin_lock_init(&info->lock); 1539 spin_lock_init(&info->lock);
1540 info->flags = flags & VM_NORESERVE;
1540 INIT_LIST_HEAD(&info->swaplist); 1541 INIT_LIST_HEAD(&info->swaplist);
1541 1542
1542 switch (mode & S_IFMT) { 1543 switch (mode & S_IFMT) {
@@ -1779,9 +1780,10 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1779static int 1780static int
1780shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1781shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1781{ 1782{
1782 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); 1783 struct inode *inode;
1783 int error = -ENOSPC; 1784 int error = -ENOSPC;
1784 1785
1786 inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
1785 if (inode) { 1787 if (inode) {
1786 error = security_inode_init_security(inode, dir, NULL, NULL, 1788 error = security_inode_init_security(inode, dir, NULL, NULL,
1787 NULL); 1789 NULL);
@@ -1920,7 +1922,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
1920 if (len > PAGE_CACHE_SIZE) 1922 if (len > PAGE_CACHE_SIZE)
1921 return -ENAMETOOLONG; 1923 return -ENAMETOOLONG;
1922 1924
1923 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); 1925 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1924 if (!inode) 1926 if (!inode)
1925 return -ENOSPC; 1927 return -ENOSPC;
1926 1928
@@ -2332,7 +2334,7 @@ static int shmem_fill_super(struct super_block *sb,
2332 sb->s_flags |= MS_POSIXACL; 2334 sb->s_flags |= MS_POSIXACL;
2333#endif 2335#endif
2334 2336
2335 inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0); 2337 inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2336 if (!inode) 2338 if (!inode)
2337 goto failed; 2339 goto failed;
2338 inode->i_uid = sbinfo->uid; 2340 inode->i_uid = sbinfo->uid;
@@ -2574,12 +2576,12 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
2574 return 0; 2576 return 0;
2575} 2577}
2576 2578
2577#define shmem_file_operations ramfs_file_operations 2579#define shmem_vm_ops generic_file_vm_ops
2578#define shmem_vm_ops generic_file_vm_ops 2580#define shmem_file_operations ramfs_file_operations
2579#define shmem_get_inode ramfs_get_inode 2581#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
2580#define shmem_acct_size(a, b) 0 2582#define shmem_acct_size(flags, size) 0
2581#define shmem_unacct_size(a, b) do {} while (0) 2583#define shmem_unacct_size(flags, size) do {} while (0)
2582#define SHMEM_MAX_BYTES LLONG_MAX 2584#define SHMEM_MAX_BYTES LLONG_MAX
2583 2585
2584#endif /* CONFIG_SHMEM */ 2586#endif /* CONFIG_SHMEM */
2585 2587
@@ -2589,7 +2591,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
2589 * shmem_file_setup - get an unlinked file living in tmpfs 2591 * shmem_file_setup - get an unlinked file living in tmpfs
2590 * @name: name for dentry (to be seen in /proc/<pid>/maps 2592 * @name: name for dentry (to be seen in /proc/<pid>/maps
2591 * @size: size to be set for the file 2593 * @size: size to be set for the file
2592 * @flags: vm_flags 2594 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2593 */ 2595 */
2594struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) 2596struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2595{ 2597{
@@ -2623,13 +2625,10 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2623 goto put_dentry; 2625 goto put_dentry;
2624 2626
2625 error = -ENOSPC; 2627 error = -ENOSPC;
2626 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 2628 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
2627 if (!inode) 2629 if (!inode)
2628 goto close_file; 2630 goto close_file;
2629 2631
2630#ifdef CONFIG_SHMEM
2631 SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT;
2632#endif
2633 d_instantiate(dentry, inode); 2632 d_instantiate(dentry, inode);
2634 inode->i_size = size; 2633 inode->i_size = size;
2635 inode->i_nlink = 0; /* It is unlinked */ 2634 inode->i_nlink = 0; /* It is unlinked */
diff --git a/mm/slab.c b/mm/slab.c
index ddc41f337d58..4d00855629c4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4457,3 +4457,4 @@ size_t ksize(const void *objp)
4457 4457
4458 return obj_size(virt_to_cache(objp)); 4458 return obj_size(virt_to_cache(objp));
4459} 4459}
4460EXPORT_SYMBOL(ksize);
diff --git a/mm/slob.c b/mm/slob.c
index f901653707a4..0bfa680a8981 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium);
126static LIST_HEAD(free_slob_large); 126static LIST_HEAD(free_slob_large);
127 127
128/* 128/*
129 * slob_page: True for all slob pages (false for bigblock pages) 129 * is_slob_page: True for all slob pages (false for bigblock pages)
130 */ 130 */
131static inline int slob_page(struct slob_page *sp) 131static inline int is_slob_page(struct slob_page *sp)
132{ 132{
133 return PageSlobPage((struct page *)sp); 133 return PageSlobPage((struct page *)sp);
134} 134}
@@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp)
143 __ClearPageSlobPage((struct page *)sp); 143 __ClearPageSlobPage((struct page *)sp);
144} 144}
145 145
146static inline struct slob_page *slob_page(const void *addr)
147{
148 return (struct slob_page *)virt_to_page(addr);
149}
150
146/* 151/*
147 * slob_page_free: true for pages on free_slob_pages list. 152 * slob_page_free: true for pages on free_slob_pages list.
148 */ 153 */
@@ -230,7 +235,7 @@ static int slob_last(slob_t *s)
230 return !((unsigned long)slob_next(s) & ~PAGE_MASK); 235 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
231} 236}
232 237
233static void *slob_new_page(gfp_t gfp, int order, int node) 238static void *slob_new_pages(gfp_t gfp, int order, int node)
234{ 239{
235 void *page; 240 void *page;
236 241
@@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node)
247 return page_address(page); 252 return page_address(page);
248} 253}
249 254
255static void slob_free_pages(void *b, int order)
256{
257 free_pages((unsigned long)b, order);
258}
259
250/* 260/*
251 * Allocate a slob block within a given slob_page sp. 261 * Allocate a slob block within a given slob_page sp.
252 */ 262 */
253static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) 263static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
254{ 264{
255 slob_t *prev, *cur, *aligned = 0; 265 slob_t *prev, *cur, *aligned = NULL;
256 int delta = 0, units = SLOB_UNITS(size); 266 int delta = 0, units = SLOB_UNITS(size);
257 267
258 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { 268 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
@@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
349 359
350 /* Not enough space: must allocate a new page */ 360 /* Not enough space: must allocate a new page */
351 if (!b) { 361 if (!b) {
352 b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); 362 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
353 if (!b) 363 if (!b)
354 return 0; 364 return NULL;
355 sp = (struct slob_page *)virt_to_page(b); 365 sp = slob_page(b);
356 set_slob_page(sp); 366 set_slob_page(sp);
357 367
358 spin_lock_irqsave(&slob_lock, flags); 368 spin_lock_irqsave(&slob_lock, flags);
@@ -384,7 +394,7 @@ static void slob_free(void *block, int size)
384 return; 394 return;
385 BUG_ON(!size); 395 BUG_ON(!size);
386 396
387 sp = (struct slob_page *)virt_to_page(block); 397 sp = slob_page(block);
388 units = SLOB_UNITS(size); 398 units = SLOB_UNITS(size);
389 399
390 spin_lock_irqsave(&slob_lock, flags); 400 spin_lock_irqsave(&slob_lock, flags);
@@ -477,7 +487,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
477 } else { 487 } else {
478 void *ret; 488 void *ret;
479 489
480 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); 490 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
481 if (ret) { 491 if (ret) {
482 struct page *page; 492 struct page *page;
483 page = virt_to_page(ret); 493 page = virt_to_page(ret);
@@ -495,8 +505,8 @@ void kfree(const void *block)
495 if (unlikely(ZERO_OR_NULL_PTR(block))) 505 if (unlikely(ZERO_OR_NULL_PTR(block)))
496 return; 506 return;
497 507
498 sp = (struct slob_page *)virt_to_page(block); 508 sp = slob_page(block);
499 if (slob_page(sp)) { 509 if (is_slob_page(sp)) {
500 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 510 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
501 unsigned int *m = (unsigned int *)(block - align); 511 unsigned int *m = (unsigned int *)(block - align);
502 slob_free(m, *m + align); 512 slob_free(m, *m + align);
@@ -514,14 +524,15 @@ size_t ksize(const void *block)
514 if (unlikely(block == ZERO_SIZE_PTR)) 524 if (unlikely(block == ZERO_SIZE_PTR))
515 return 0; 525 return 0;
516 526
517 sp = (struct slob_page *)virt_to_page(block); 527 sp = slob_page(block);
518 if (slob_page(sp)) { 528 if (is_slob_page(sp)) {
519 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 529 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
520 unsigned int *m = (unsigned int *)(block - align); 530 unsigned int *m = (unsigned int *)(block - align);
521 return SLOB_UNITS(*m) * SLOB_UNIT; 531 return SLOB_UNITS(*m) * SLOB_UNIT;
522 } else 532 } else
523 return sp->page.private; 533 return sp->page.private;
524} 534}
535EXPORT_SYMBOL(ksize);
525 536
526struct kmem_cache { 537struct kmem_cache {
527 unsigned int size, align; 538 unsigned int size, align;
@@ -573,7 +584,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
573 if (c->size < PAGE_SIZE) 584 if (c->size < PAGE_SIZE)
574 b = slob_alloc(c->size, flags, c->align, node); 585 b = slob_alloc(c->size, flags, c->align, node);
575 else 586 else
576 b = slob_new_page(flags, get_order(c->size), node); 587 b = slob_new_pages(flags, get_order(c->size), node);
577 588
578 if (c->ctor) 589 if (c->ctor)
579 c->ctor(b); 590 c->ctor(b);
@@ -587,7 +598,7 @@ static void __kmem_cache_free(void *b, int size)
587 if (size < PAGE_SIZE) 598 if (size < PAGE_SIZE)
588 slob_free(b, size); 599 slob_free(b, size);
589 else 600 else
590 free_pages((unsigned long)b, get_order(size)); 601 slob_free_pages(b, get_order(size));
591} 602}
592 603
593static void kmem_rcu_free(struct rcu_head *head) 604static void kmem_rcu_free(struct rcu_head *head)
diff --git a/mm/slub.c b/mm/slub.c
index bdc9abb08a23..c65a4edafc33 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object,
374static void set_track(struct kmem_cache *s, void *object, 374static void set_track(struct kmem_cache *s, void *object,
375 enum track_item alloc, unsigned long addr) 375 enum track_item alloc, unsigned long addr)
376{ 376{
377 struct track *p; 377 struct track *p = get_track(s, object, alloc);
378
379 if (s->offset)
380 p = object + s->offset + sizeof(void *);
381 else
382 p = object + s->inuse;
383 378
384 p += alloc;
385 if (addr) { 379 if (addr) {
386 p->addr = addr; 380 p->addr = addr;
387 p->cpu = smp_processor_id(); 381 p->cpu = smp_processor_id();
@@ -1335,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1335 n = get_node(s, zone_to_nid(zone)); 1329 n = get_node(s, zone_to_nid(zone));
1336 1330
1337 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1338 n->nr_partial > n->min_partial) { 1332 n->nr_partial > s->min_partial) {
1339 page = get_partial_node(n); 1333 page = get_partial_node(n);
1340 if (page) 1334 if (page)
1341 return page; 1335 return page;
@@ -1387,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1387 slab_unlock(page); 1381 slab_unlock(page);
1388 } else { 1382 } else {
1389 stat(c, DEACTIVATE_EMPTY); 1383 stat(c, DEACTIVATE_EMPTY);
1390 if (n->nr_partial < n->min_partial) { 1384 if (n->nr_partial < s->min_partial) {
1391 /* 1385 /*
1392 * Adding an empty slab to the partial slabs in order 1386 * Adding an empty slab to the partial slabs in order
1393 * to avoid page allocator overhead. This slab needs 1387 * to avoid page allocator overhead. This slab needs
@@ -1724,7 +1718,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
1724 c = get_cpu_slab(s, smp_processor_id()); 1718 c = get_cpu_slab(s, smp_processor_id());
1725 debug_check_no_locks_freed(object, c->objsize); 1719 debug_check_no_locks_freed(object, c->objsize);
1726 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1720 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1727 debug_check_no_obj_freed(object, s->objsize); 1721 debug_check_no_obj_freed(object, c->objsize);
1728 if (likely(page == c->page && c->node >= 0)) { 1722 if (likely(page == c->page && c->node >= 0)) {
1729 object[c->offset] = c->freelist; 1723 object[c->offset] = c->freelist;
1730 c->freelist = object; 1724 c->freelist = object;
@@ -1844,6 +1838,7 @@ static inline int calculate_order(int size)
1844 int order; 1838 int order;
1845 int min_objects; 1839 int min_objects;
1846 int fraction; 1840 int fraction;
1841 int max_objects;
1847 1842
1848 /* 1843 /*
1849 * Attempt to find best configuration for a slab. This 1844 * Attempt to find best configuration for a slab. This
@@ -1856,6 +1851,9 @@ static inline int calculate_order(int size)
1856 min_objects = slub_min_objects; 1851 min_objects = slub_min_objects;
1857 if (!min_objects) 1852 if (!min_objects)
1858 min_objects = 4 * (fls(nr_cpu_ids) + 1); 1853 min_objects = 4 * (fls(nr_cpu_ids) + 1);
1854 max_objects = (PAGE_SIZE << slub_max_order)/size;
1855 min_objects = min(min_objects, max_objects);
1856
1859 while (min_objects > 1) { 1857 while (min_objects > 1) {
1860 fraction = 16; 1858 fraction = 16;
1861 while (fraction >= 4) { 1859 while (fraction >= 4) {
@@ -1865,7 +1863,7 @@ static inline int calculate_order(int size)
1865 return order; 1863 return order;
1866 fraction /= 2; 1864 fraction /= 2;
1867 } 1865 }
1868 min_objects /= 2; 1866 min_objects --;
1869 } 1867 }
1870 1868
1871 /* 1869 /*
@@ -1928,17 +1926,6 @@ static void
1928init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 1926init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1929{ 1927{
1930 n->nr_partial = 0; 1928 n->nr_partial = 0;
1931
1932 /*
1933 * The larger the object size is, the more pages we want on the partial
1934 * list to avoid pounding the page allocator excessively.
1935 */
1936 n->min_partial = ilog2(s->size);
1937 if (n->min_partial < MIN_PARTIAL)
1938 n->min_partial = MIN_PARTIAL;
1939 else if (n->min_partial > MAX_PARTIAL)
1940 n->min_partial = MAX_PARTIAL;
1941
1942 spin_lock_init(&n->list_lock); 1929 spin_lock_init(&n->list_lock);
1943 INIT_LIST_HEAD(&n->partial); 1930 INIT_LIST_HEAD(&n->partial);
1944#ifdef CONFIG_SLUB_DEBUG 1931#ifdef CONFIG_SLUB_DEBUG
@@ -2181,6 +2168,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2181} 2168}
2182#endif 2169#endif
2183 2170
2171static void set_min_partial(struct kmem_cache *s, unsigned long min)
2172{
2173 if (min < MIN_PARTIAL)
2174 min = MIN_PARTIAL;
2175 else if (min > MAX_PARTIAL)
2176 min = MAX_PARTIAL;
2177 s->min_partial = min;
2178}
2179
2184/* 2180/*
2185 * calculate_sizes() determines the order and the distribution of data within 2181 * calculate_sizes() determines the order and the distribution of data within
2186 * a slab object. 2182 * a slab object.
@@ -2319,6 +2315,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2319 if (!calculate_sizes(s, -1)) 2315 if (!calculate_sizes(s, -1))
2320 goto error; 2316 goto error;
2321 2317
2318 /*
2319 * The larger the object size is, the more pages we want on the partial
2320 * list to avoid pounding the page allocator excessively.
2321 */
2322 set_min_partial(s, ilog2(s->size));
2322 s->refcount = 1; 2323 s->refcount = 1;
2323#ifdef CONFIG_NUMA 2324#ifdef CONFIG_NUMA
2324 s->remote_node_defrag_ratio = 1000; 2325 s->remote_node_defrag_ratio = 1000;
@@ -2475,7 +2476,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2475 * Kmalloc subsystem 2476 * Kmalloc subsystem
2476 *******************************************************************/ 2477 *******************************************************************/
2477 2478
2478struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; 2479struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
2479EXPORT_SYMBOL(kmalloc_caches); 2480EXPORT_SYMBOL(kmalloc_caches);
2480 2481
2481static int __init setup_slub_min_order(char *str) 2482static int __init setup_slub_min_order(char *str)
@@ -2537,7 +2538,7 @@ panic:
2537} 2538}
2538 2539
2539#ifdef CONFIG_ZONE_DMA 2540#ifdef CONFIG_ZONE_DMA
2540static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; 2541static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
2541 2542
2542static void sysfs_add_func(struct work_struct *w) 2543static void sysfs_add_func(struct work_struct *w)
2543{ 2544{
@@ -2658,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2658{ 2659{
2659 struct kmem_cache *s; 2660 struct kmem_cache *s;
2660 2661
2661 if (unlikely(size > PAGE_SIZE)) 2662 if (unlikely(size > SLUB_MAX_SIZE))
2662 return kmalloc_large(size, flags); 2663 return kmalloc_large(size, flags);
2663 2664
2664 s = get_slab(size, flags); 2665 s = get_slab(size, flags);
@@ -2686,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2686{ 2687{
2687 struct kmem_cache *s; 2688 struct kmem_cache *s;
2688 2689
2689 if (unlikely(size > PAGE_SIZE)) 2690 if (unlikely(size > SLUB_MAX_SIZE))
2690 return kmalloc_large_node(size, flags, node); 2691 return kmalloc_large_node(size, flags, node);
2691 2692
2692 s = get_slab(size, flags); 2693 s = get_slab(size, flags);
@@ -2736,6 +2737,7 @@ size_t ksize(const void *object)
2736 */ 2737 */
2737 return s->size; 2738 return s->size;
2738} 2739}
2740EXPORT_SYMBOL(ksize);
2739 2741
2740void kfree(const void *x) 2742void kfree(const void *x)
2741{ 2743{
@@ -2985,7 +2987,7 @@ void __init kmem_cache_init(void)
2985 caches++; 2987 caches++;
2986 } 2988 }
2987 2989
2988 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { 2990 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
2989 create_kmalloc_cache(&kmalloc_caches[i], 2991 create_kmalloc_cache(&kmalloc_caches[i],
2990 "kmalloc", 1 << i, GFP_KERNEL); 2992 "kmalloc", 1 << i, GFP_KERNEL);
2991 caches++; 2993 caches++;
@@ -3022,7 +3024,7 @@ void __init kmem_cache_init(void)
3022 slab_state = UP; 3024 slab_state = UP;
3023 3025
3024 /* Provide the correct kmalloc names now that the caches are up */ 3026 /* Provide the correct kmalloc names now that the caches are up */
3025 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) 3027 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
3026 kmalloc_caches[i]. name = 3028 kmalloc_caches[i]. name =
3027 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3029 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3028 3030
@@ -3222,7 +3224,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3222{ 3224{
3223 struct kmem_cache *s; 3225 struct kmem_cache *s;
3224 3226
3225 if (unlikely(size > PAGE_SIZE)) 3227 if (unlikely(size > SLUB_MAX_SIZE))
3226 return kmalloc_large(size, gfpflags); 3228 return kmalloc_large(size, gfpflags);
3227 3229
3228 s = get_slab(size, gfpflags); 3230 s = get_slab(size, gfpflags);
@@ -3238,7 +3240,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3238{ 3240{
3239 struct kmem_cache *s; 3241 struct kmem_cache *s;
3240 3242
3241 if (unlikely(size > PAGE_SIZE)) 3243 if (unlikely(size > SLUB_MAX_SIZE))
3242 return kmalloc_large_node(size, gfpflags, node); 3244 return kmalloc_large_node(size, gfpflags, node);
3243 3245
3244 s = get_slab(size, gfpflags); 3246 s = get_slab(size, gfpflags);
@@ -3835,6 +3837,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf)
3835} 3837}
3836SLAB_ATTR(order); 3838SLAB_ATTR(order);
3837 3839
3840static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
3841{
3842 return sprintf(buf, "%lu\n", s->min_partial);
3843}
3844
3845static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
3846 size_t length)
3847{
3848 unsigned long min;
3849 int err;
3850
3851 err = strict_strtoul(buf, 10, &min);
3852 if (err)
3853 return err;
3854
3855 set_min_partial(s, min);
3856 return length;
3857}
3858SLAB_ATTR(min_partial);
3859
3838static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3860static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3839{ 3861{
3840 if (s->ctor) { 3862 if (s->ctor) {
@@ -4150,6 +4172,7 @@ static struct attribute *slab_attrs[] = {
4150 &object_size_attr.attr, 4172 &object_size_attr.attr,
4151 &objs_per_slab_attr.attr, 4173 &objs_per_slab_attr.attr,
4152 &order_attr.attr, 4174 &order_attr.attr,
4175 &min_partial_attr.attr,
4153 &objects_attr.attr, 4176 &objects_attr.attr,
4154 &objects_partial_attr.attr, 4177 &objects_partial_attr.attr,
4155 &total_objects_attr.attr, 4178 &total_objects_attr.attr,
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7e6304dfafab..312fafe0ab6e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -635,7 +635,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
635 635
636 if (!bdev) { 636 if (!bdev) {
637 if (bdev_p) 637 if (bdev_p)
638 *bdev_p = sis->bdev; 638 *bdev_p = bdget(sis->bdev->bd_dev);
639 639
640 spin_unlock(&swap_lock); 640 spin_unlock(&swap_lock);
641 return i; 641 return i;
@@ -647,7 +647,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
647 struct swap_extent, list); 647 struct swap_extent, list);
648 if (se->start_block == offset) { 648 if (se->start_block == offset) {
649 if (bdev_p) 649 if (bdev_p)
650 *bdev_p = sis->bdev; 650 *bdev_p = bdget(sis->bdev->bd_dev);
651 651
652 spin_unlock(&swap_lock); 652 spin_unlock(&swap_lock);
653 bdput(bdev); 653 bdput(bdev);
diff --git a/mm/util.c b/mm/util.c
index cb00b748ce47..37eaccdf3054 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -129,6 +129,26 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
129} 129}
130EXPORT_SYMBOL(krealloc); 130EXPORT_SYMBOL(krealloc);
131 131
132/**
133 * kzfree - like kfree but zero memory
134 * @p: object to free memory of
135 *
136 * The memory of the object @p points to is zeroed before freed.
137 * If @p is %NULL, kzfree() does nothing.
138 */
139void kzfree(const void *p)
140{
141 size_t ks;
142 void *mem = (void *)p;
143
144 if (unlikely(ZERO_OR_NULL_PTR(mem)))
145 return;
146 ks = ksize(mem);
147 memset(mem, 0, ks);
148 kfree(mem);
149}
150EXPORT_SYMBOL(kzfree);
151
132/* 152/*
133 * strndup_user - duplicate an existing string from user space 153 * strndup_user - duplicate an existing string from user space
134 * @s: The string to duplicate 154 * @s: The string to duplicate
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 75f49d312e8c..520a75980269 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -323,6 +323,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
323 unsigned long addr; 323 unsigned long addr;
324 int purged = 0; 324 int purged = 0;
325 325
326 BUG_ON(!size);
326 BUG_ON(size & ~PAGE_MASK); 327 BUG_ON(size & ~PAGE_MASK);
327 328
328 va = kmalloc_node(sizeof(struct vmap_area), 329 va = kmalloc_node(sizeof(struct vmap_area),
@@ -334,6 +335,9 @@ retry:
334 addr = ALIGN(vstart, align); 335 addr = ALIGN(vstart, align);
335 336
336 spin_lock(&vmap_area_lock); 337 spin_lock(&vmap_area_lock);
338 if (addr + size - 1 < addr)
339 goto overflow;
340
337 /* XXX: could have a last_hole cache */ 341 /* XXX: could have a last_hole cache */
338 n = vmap_area_root.rb_node; 342 n = vmap_area_root.rb_node;
339 if (n) { 343 if (n) {
@@ -365,6 +369,8 @@ retry:
365 369
366 while (addr + size > first->va_start && addr + size <= vend) { 370 while (addr + size > first->va_start && addr + size <= vend) {
367 addr = ALIGN(first->va_end + PAGE_SIZE, align); 371 addr = ALIGN(first->va_end + PAGE_SIZE, align);
372 if (addr + size - 1 < addr)
373 goto overflow;
368 374
369 n = rb_next(&first->rb_node); 375 n = rb_next(&first->rb_node);
370 if (n) 376 if (n)
@@ -375,6 +381,7 @@ retry:
375 } 381 }
376found: 382found:
377 if (addr + size > vend) { 383 if (addr + size > vend) {
384overflow:
378 spin_unlock(&vmap_area_lock); 385 spin_unlock(&vmap_area_lock);
379 if (!purged) { 386 if (!purged) {
380 purge_vmap_area_lazy(); 387 purge_vmap_area_lazy();
@@ -498,6 +505,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
498 static DEFINE_SPINLOCK(purge_lock); 505 static DEFINE_SPINLOCK(purge_lock);
499 LIST_HEAD(valist); 506 LIST_HEAD(valist);
500 struct vmap_area *va; 507 struct vmap_area *va;
508 struct vmap_area *n_va;
501 int nr = 0; 509 int nr = 0;
502 510
503 /* 511 /*
@@ -537,7 +545,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
537 545
538 if (nr) { 546 if (nr) {
539 spin_lock(&vmap_area_lock); 547 spin_lock(&vmap_area_lock);
540 list_for_each_entry(va, &valist, purge_list) 548 list_for_each_entry_safe(va, n_va, &valist, purge_list)
541 __free_vmap_area(va); 549 __free_vmap_area(va);
542 spin_unlock(&vmap_area_lock); 550 spin_unlock(&vmap_area_lock);
543 } 551 }
@@ -1012,6 +1020,8 @@ void __init vmalloc_init(void)
1012void unmap_kernel_range(unsigned long addr, unsigned long size) 1020void unmap_kernel_range(unsigned long addr, unsigned long size)
1013{ 1021{
1014 unsigned long end = addr + size; 1022 unsigned long end = addr + size;
1023
1024 flush_cache_vunmap(addr, end);
1015 vunmap_page_range(addr, end); 1025 vunmap_page_range(addr, end);
1016 flush_tlb_kernel_range(addr, end); 1026 flush_tlb_kernel_range(addr, end);
1017} 1027}
@@ -1106,6 +1116,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1106} 1116}
1107EXPORT_SYMBOL_GPL(__get_vm_area); 1117EXPORT_SYMBOL_GPL(__get_vm_area);
1108 1118
1119struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1120 unsigned long start, unsigned long end,
1121 void *caller)
1122{
1123 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1124 caller);
1125}
1126
1109/** 1127/**
1110 * get_vm_area - reserve a contiguous kernel virtual area 1128 * get_vm_area - reserve a contiguous kernel virtual area
1111 * @size: size of the area 1129 * @size: size of the area
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9a27c44aa327..56ddf41149eb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1262,7 +1262,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1262 * Move the pages to the [file or anon] inactive list. 1262 * Move the pages to the [file or anon] inactive list.
1263 */ 1263 */
1264 pagevec_init(&pvec, 1); 1264 pagevec_init(&pvec, 1);
1265 pgmoved = 0;
1266 lru = LRU_BASE + file * LRU_FILE; 1265 lru = LRU_BASE + file * LRU_FILE;
1267 1266
1268 spin_lock_irq(&zone->lru_lock); 1267 spin_lock_irq(&zone->lru_lock);
@@ -1274,6 +1273,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1274 */ 1273 */
1275 reclaim_stat->recent_rotated[!!file] += pgmoved; 1274 reclaim_stat->recent_rotated[!!file] += pgmoved;
1276 1275
1276 pgmoved = 0;
1277 while (!list_empty(&l_inactive)) { 1277 while (!list_empty(&l_inactive)) {
1278 page = lru_to_page(&l_inactive); 1278 page = lru_to_page(&l_inactive);
1279 prefetchw_prev_lru_page(page, &l_inactive, flags); 1279 prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1469,7 +1469,7 @@ static void shrink_zone(int priority, struct zone *zone,
1469 int file = is_file_lru(l); 1469 int file = is_file_lru(l);
1470 int scan; 1470 int scan;
1471 1471
1472 scan = zone_page_state(zone, NR_LRU_BASE + l); 1472 scan = zone_nr_pages(zone, sc, l);
1473 if (priority) { 1473 if (priority) {
1474 scan >>= priority; 1474 scan >>= priority;
1475 scan = (scan * percent[file]) / 100; 1475 scan = (scan * percent[file]) / 100;
@@ -2057,31 +2057,31 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2057 int pass, struct scan_control *sc) 2057 int pass, struct scan_control *sc)
2058{ 2058{
2059 struct zone *zone; 2059 struct zone *zone;
2060 unsigned long nr_to_scan, ret = 0; 2060 unsigned long ret = 0;
2061 enum lru_list l;
2062 2061
2063 for_each_zone(zone) { 2062 for_each_zone(zone) {
2063 enum lru_list l;
2064 2064
2065 if (!populated_zone(zone)) 2065 if (!populated_zone(zone))
2066 continue; 2066 continue;
2067
2068 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) 2067 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2069 continue; 2068 continue;
2070 2069
2071 for_each_evictable_lru(l) { 2070 for_each_evictable_lru(l) {
2071 enum zone_stat_item ls = NR_LRU_BASE + l;
2072 unsigned long lru_pages = zone_page_state(zone, ls);
2073
2072 /* For pass = 0, we don't shrink the active list */ 2074 /* For pass = 0, we don't shrink the active list */
2073 if (pass == 0 && 2075 if (pass == 0 && (l == LRU_ACTIVE_ANON ||
2074 (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE)) 2076 l == LRU_ACTIVE_FILE))
2075 continue; 2077 continue;
2076 2078
2077 zone->lru[l].nr_scan += 2079 zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
2078 (zone_page_state(zone, NR_LRU_BASE + l)
2079 >> prio) + 1;
2080 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { 2080 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
2081 unsigned long nr_to_scan;
2082
2081 zone->lru[l].nr_scan = 0; 2083 zone->lru[l].nr_scan = 0;
2082 nr_to_scan = min(nr_pages, 2084 nr_to_scan = min(nr_pages, lru_pages);
2083 zone_page_state(zone,
2084 NR_LRU_BASE + l));
2085 ret += shrink_list(l, nr_to_scan, zone, 2085 ret += shrink_list(l, nr_to_scan, zone,
2086 sc, prio); 2086 sc, prio);
2087 if (ret >= nr_pages) 2087 if (ret >= nr_pages)
@@ -2089,7 +2089,6 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2089 } 2089 }
2090 } 2090 }
2091 } 2091 }
2092
2093 return ret; 2092 return ret;
2094} 2093}
2095 2094
@@ -2112,7 +2111,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2112 .may_swap = 0, 2111 .may_swap = 0,
2113 .swap_cluster_max = nr_pages, 2112 .swap_cluster_max = nr_pages,
2114 .may_writepage = 1, 2113 .may_writepage = 1,
2115 .swappiness = vm_swappiness,
2116 .isolate_pages = isolate_pages_global, 2114 .isolate_pages = isolate_pages_global,
2117 }; 2115 };
2118 2116
@@ -2146,10 +2144,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2146 int prio; 2144 int prio;
2147 2145
2148 /* Force reclaiming mapped pages in the passes #3 and #4 */ 2146 /* Force reclaiming mapped pages in the passes #3 and #4 */
2149 if (pass > 2) { 2147 if (pass > 2)
2150 sc.may_swap = 1; 2148 sc.may_swap = 1;
2151 sc.swappiness = 100;
2152 }
2153 2149
2154 for (prio = DEF_PRIORITY; prio >= 0; prio--) { 2150 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
2155 unsigned long nr_to_scan = nr_pages - ret; 2151 unsigned long nr_to_scan = nr_pages - ret;