From 29c0177e6a4ac094302bed54a1d4bbb6b740a9ef Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 13 Dec 2008 21:20:25 +1030 Subject: cpumask: change cpumask_scnprintf, cpumask_parse_user, cpulist_parse, and cpulist_scnprintf to take pointers. Impact: change calling convention of existing cpumask APIs Most cpumask functions started with cpus_: these have been replaced by cpumask_ ones which take struct cpumask pointers as expected. These four functions don't have good replacement names; fortunately they're rarely used, so we just change them over. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Acked-by: Ingo Molnar Cc: paulus@samba.org Cc: mingo@redhat.com Cc: tony.luck@intel.com Cc: ralf@linux-mips.org Cc: Greg Kroah-Hartman Cc: cl@linux-foundation.org Cc: srostedt@redhat.com --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index a2cd47d89e0a..8e516e29f989 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3626,7 +3626,7 @@ static int list_locations(struct kmem_cache *s, char *buf, len < PAGE_SIZE - 60) { len += sprintf(buf + len, " cpus="); len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, - l->cpus); + &l->cpus); } if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && -- cgit v1.2.2 From ea319518ba3de282c13ae1cf4bf2215c5e03e67e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 26 Dec 2008 15:08:55 +0100 Subject: locking, percpu counters: introduce separate lock classes Impact: fix lockdep false positives Classify percpu_counter instances similar to regular lock objects -- that is, per instantiation site. The networking code has increased its use of percpu_counters, which leads to false positives if they are treated as a single class. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- mm/backing-dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/backing-dev.c b/mm/backing-dev.c index f2e574dbc300..f3b125857827 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -220,7 +220,7 @@ int bdi_init(struct backing_dev_info *bdi) bdi->max_prop_frac = PROP_FRAC_BASE; for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { - err = percpu_counter_init_irq(&bdi->bdi_stat[i], 0); + err = percpu_counter_init(&bdi->bdi_stat[i], 0); if (err) goto err; } -- cgit v1.2.2 From 3e597945384dee1457240158eb81e3afb90b68c2 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 1 Jan 2009 10:12:24 +1030 Subject: cpumask: remove any_online_cpu() users: mm/ Impact: Remove obsolete API usage any_online_cpu() is a good name, but it takes a cpumask_t, not a pointer. There are several places where any_online_cpu() doesn't really want a mask arg at all. Replace all callers with cpumask_any() and cpumask_any_and(). Signed-off-by: Rusty Russell Signed-off-by: Mike Travis --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 62e7f62fb559..240f062f71f1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2141,7 +2141,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb, pg_data_t *pgdat = NODE_DATA(nid); node_to_cpumask_ptr(mask, pgdat->node_id); - if (any_online_cpu(*mask) < nr_cpu_ids) + if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ set_cpus_allowed_ptr(pgdat->kswapd, mask); } -- cgit v1.2.2 From 174596a0b9f21e8844d70566a6bb29bf48a87750 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 1 Jan 2009 10:12:29 +1030 Subject: cpumask: convert mm/ Impact: Use new API Convert kernel mm functions to use struct cpumask. We skip include/linux/percpu.h and mm/allocpercpu.c, which are in flux. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Reviewed-by: Christoph Lameter --- mm/pdflush.c | 16 +++++++++++++--- mm/slab.c | 2 +- mm/slub.c | 20 +++++++++++--------- mm/vmscan.c | 2 +- mm/vmstat.c | 4 ++-- 5 files changed, 28 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/pdflush.c b/mm/pdflush.c index a0a14c4d5072..15de509b68fd 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c @@ -172,7 +172,16 @@ static int __pdflush(struct pdflush_work *my_work) static int pdflush(void *dummy) { struct pdflush_work my_work; - cpumask_t cpus_allowed; + cpumask_var_t cpus_allowed; + + /* + * Since the caller doesn't even check kthread_run() worked, let's not + * freak out too much if this fails. + */ + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { + printk(KERN_WARNING "pdflush failed to allocate cpumask\n"); + return 0; + } /* * pdflush can spend a lot of time doing encryption via dm-crypt. We @@ -187,8 +196,9 @@ static int pdflush(void *dummy) * This is needed as pdflush's are dynamically created and destroyed. * The boottime pdflush's are easily placed w/o these 2 lines. */ - cpuset_cpus_allowed(current, &cpus_allowed); - set_cpus_allowed_ptr(current, &cpus_allowed); + cpuset_cpus_allowed(current, cpus_allowed); + set_cpus_allowed_ptr(current, cpus_allowed); + free_cpumask_var(cpus_allowed); return __pdflush(&my_work); } diff --git a/mm/slab.c b/mm/slab.c index f97e564bdf11..ddc41f337d58 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2157,7 +2157,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, /* * We use cache_chain_mutex to ensure a consistent view of - * cpu_online_map as well. Please see cpuup_callback + * cpu_online_mask as well. Please see cpuup_callback */ get_online_cpus(); mutex_lock(&cache_chain_mutex); diff --git a/mm/slub.c b/mm/slub.c index 0d861c3154b6..f0e2892fe403 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1970,7 +1970,7 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu, kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); -static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; +static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, int cpu, gfp_t flags) @@ -2045,13 +2045,13 @@ static void init_alloc_cpu_cpu(int cpu) { int i; - if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) + if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once))) return; for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); - cpu_set(cpu, kmem_cach_cpu_free_init_once); + cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)); } static void __init init_alloc_cpu(void) @@ -3451,7 +3451,7 @@ struct location { long max_time; long min_pid; long max_pid; - cpumask_t cpus; + DECLARE_BITMAP(cpus, NR_CPUS); nodemask_t nodes; }; @@ -3526,7 +3526,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, if (track->pid > l->max_pid) l->max_pid = track->pid; - cpu_set(track->cpu, l->cpus); + cpumask_set_cpu(track->cpu, + to_cpumask(l->cpus)); } node_set(page_to_nid(virt_to_page(track)), l->nodes); return 1; @@ -3556,8 +3557,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, l->max_time = age; l->min_pid = track->pid; l->max_pid = track->pid; - cpus_clear(l->cpus); - cpu_set(track->cpu, l->cpus); + cpumask_clear(to_cpumask(l->cpus)); + cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); nodes_clear(l->nodes); node_set(page_to_nid(virt_to_page(track)), l->nodes); return 1; @@ -3638,11 +3639,12 @@ static int list_locations(struct kmem_cache *s, char *buf, len += sprintf(buf + len, " pid=%ld", l->min_pid); - if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && + if (num_online_cpus() > 1 && + !cpumask_empty(to_cpumask(l->cpus)) && len < PAGE_SIZE - 60) { len += sprintf(buf + len, " cpus="); len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, - &l->cpus); + to_cpumask(l->cpus)); } if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && diff --git a/mm/vmscan.c b/mm/vmscan.c index 240f062f71f1..d196f46c8808 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1902,7 +1902,7 @@ static int kswapd(void *p) }; node_to_cpumask_ptr(cpumask, pgdat->node_id); - if (!cpus_empty(*cpumask)) + if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); current->reclaim_state = &reclaim_state; diff --git a/mm/vmstat.c b/mm/vmstat.c index c3ccfda23adc..91149746bb8d 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -20,7 +20,7 @@ DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; EXPORT_PER_CPU_SYMBOL(vm_event_states); -static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) +static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) { int cpu; int i; @@ -43,7 +43,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) void all_vm_events(unsigned long *ret) { get_online_cpus(); - sum_vm_events(ret, &cpu_online_map); + sum_vm_events(ret, cpu_online_mask); put_online_cpus(); } EXPORT_SYMBOL_GPL(all_vm_events); -- cgit v1.2.2 From 2e4e27c7d082b2198b63041310609d7191185a9d Mon Sep 17 00:00:00 2001 From: Adam Lackorzynski Date: Sun, 4 Jan 2009 12:00:46 -0800 Subject: vmalloc.c: fix flushing in vmap_page_range() The flush_cache_vmap in vmap_page_range() is called with the end of the range twice. The following patch fixes this for me. Signed-off-by: Adam Lackorzynski Cc: Nick Piggin Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 1ddb77ba3995..7465f22fec0c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -151,11 +151,12 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, * * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] */ -static int vmap_page_range(unsigned long addr, unsigned long end, +static int vmap_page_range(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) { pgd_t *pgd; unsigned long next; + unsigned long addr = start; int err = 0; int nr = 0; @@ -167,7 +168,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end, if (err) break; } while (pgd++, addr = next, addr != end); - flush_cache_vmap(addr, end); + flush_cache_vmap(start, end); if (unlikely(err)) return err; -- cgit v1.2.2 From 54566b2c1594c2326a645a3551f9d989f7ba3c5e Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 4 Jan 2009 12:00:53 -0800 Subject: fs: symlink write_begin allocation context fix With the write_begin/write_end aops, page_symlink was broken because it could no longer pass a GFP_NOFS type mask into the point where the allocations happened. They are done in write_begin, which would always assume that the filesystem can be entered from reclaim. This bug could cause filesystem deadlocks. The funny thing with having a gfp_t mask there is that it doesn't really allow the caller to arbitrarily tinker with the context in which it can be called. It couldn't ever be GFP_ATOMIC, for example, because it needs to take the page lock. The only thing any callers care about is __GFP_FS anyway, so turn that into a single flag. Add a new flag for write_begin, AOP_FLAG_NOFS. Filesystems can now act on this flag in their write_begin function. Change __grab_cache_page to accept a nofs argument as well, to honour that flag (while we're there, change the name to grab_cache_page_write_begin which is more instructive and does away with random leading underscores). This is really a more flexible way to go in the end anyway -- if a filesystem happens to want any extra allocations aside from the pagecache ones in ints write_begin function, it may now use GFP_KERNEL (rather than GFP_NOFS) for common case allocations (eg. ocfs2_alloc_write_ctxt, for a random example). [kosaki.motohiro@jp.fujitsu.com: fix ubifs] [kosaki.motohiro@jp.fujitsu.com: fix fuse] Signed-off-by: Nick Piggin Reviewed-by: KOSAKI Motohiro Cc: [2.6.28.x] Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton [ Cleaned up the calling convention: just pass in the AOP flags untouched to the grab_cache_page_write_begin() function. That just simplifies everybody, and may even allow future expansion of the logic. - Linus ] Signed-off-by: Linus Torvalds --- mm/filemap.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index f3e5f8944d17..f8c69273c37f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2140,19 +2140,24 @@ EXPORT_SYMBOL(generic_file_direct_write); * Find or create a page at the given pagecache position. Return the locked * page. This function is specifically for buffered writes. */ -struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index) +struct page *grab_cache_page_write_begin(struct address_space *mapping, + pgoff_t index, unsigned flags) { int status; struct page *page; + gfp_t gfp_notmask = 0; + if (flags & AOP_FLAG_NOFS) + gfp_notmask = __GFP_FS; repeat: page = find_lock_page(mapping, index); if (likely(page)) return page; - page = page_cache_alloc(mapping); + page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask); if (!page) return NULL; - status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); + status = add_to_page_cache_lru(page, mapping, index, + GFP_KERNEL & ~gfp_notmask); if (unlikely(status)) { page_cache_release(page); if (status == -EEXIST) @@ -2161,7 +2166,7 @@ repeat: } return page; } -EXPORT_SYMBOL(__grab_cache_page); +EXPORT_SYMBOL(grab_cache_page_write_begin); static ssize_t generic_perform_write(struct file *file, struct iov_iter *i, loff_t pos) -- cgit v1.2.2 From 7f5ff766a7babd72fc192125e12ef5570effff4c Mon Sep 17 00:00:00 2001 From: Dmitri Monakhov Date: Mon, 1 Dec 2008 14:34:56 -0800 Subject: kill suid bit only for regular files We don't have to do it because it is useless for non regular files. In fact block device may trigger this path without dentry->d_inode->i_mutex. (akpm: concerns were expressed (by me) about S_ISDIR inodes) Signed-off-by: Dmitri Monakhov Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- mm/filemap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index f3e5f8944d17..ed53ce876259 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1766,7 +1766,7 @@ int should_remove_suid(struct dentry *dentry) if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) kill |= ATTR_KILL_SGID; - if (unlikely(kill && !capable(CAP_FSETID))) + if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) return kill; return 0; -- cgit v1.2.2 From acfa4380efe77e290d3a96b11cd4c9f24f4fbb18 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 4 Dec 2008 10:06:33 -0500 Subject: inode->i_op is never NULL We used to have rather schizophrenic set of checks for NULL ->i_op even though it had been eliminated years ago. You'd need to go out of your way to set it to NULL explicitly _and_ a bunch of code would die on such inodes anyway. After killing two remaining places that still did that bogosity, all that crap can go away. Signed-off-by: Al Viro --- mm/memory.c | 4 ++-- mm/nommu.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 0a2010a9518c..7b9db658aca2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2266,7 +2266,7 @@ int vmtruncate(struct inode * inode, loff_t offset) unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); } - if (inode->i_op && inode->i_op->truncate) + if (inode->i_op->truncate) inode->i_op->truncate(inode); return 0; @@ -2286,7 +2286,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) * a way to truncate a range of blocks (punch a hole) - * we should return failure right now. */ - if (!inode->i_op || !inode->i_op->truncate_range) + if (!inode->i_op->truncate_range) return -ENOSYS; mutex_lock(&inode->i_mutex); diff --git a/mm/nommu.c b/mm/nommu.c index 7695dc850785..1c28ea3a4e9c 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -86,7 +86,7 @@ do_expand: i_size_write(inode, offset); out_truncate: - if (inode->i_op && inode->i_op->truncate) + if (inode->i_op->truncate) inode->i_op->truncate(inode); return 0; out_sig: -- cgit v1.2.2 From 4c728ef583b3d82266584da5cb068294c09df31e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 Dec 2008 21:11:15 +0100 Subject: add a vfs_fsync helper Fsync currently has a fdatawrite/fdatawait pair around the method call, and a mutex_lock/unlock of the inode mutex. All callers of fsync have to duplicate this, but we have a few and most of them don't quite get it right. This patch adds a new vfs_fsync that takes care of this. It's a little more complicated as usual as ->fsync might get a NULL file pointer and just a dentry from nfsd, but otherwise gets afile and we want to take the mapping and file operations from it when it is there. Notes on the fsync callers: - ecryptfs wasn't calling filemap_fdatawrite / filemap_fdatawait on the lower file - coda wasn't calling filemap_fdatawrite / filemap_fdatawait on the host file, and returning 0 when ->fsync was missing - shm wasn't calling either filemap_fdatawrite / filemap_fdatawait nor taking i_mutex. Now given that shared memory doesn't have disk backing not doing anything in fsync seems fine and I left it out of the vfs_fsync conversion for now, but in that case we might just not pass it through to the lower file at all but just call the no-op simple_sync_file directly. [and now actually export vfs_fsync] Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- mm/msync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/msync.c b/mm/msync.c index 144a7570535d..07dae08cf31c 100644 --- a/mm/msync.c +++ b/mm/msync.c @@ -82,7 +82,7 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags) (vma->vm_flags & VM_SHARED)) { get_file(file); up_read(&mm->mmap_sem); - error = do_fsync(file, 0); + error = vfs_fsync(file, file->f_path.dentry, 0); fput(file); if (error || start >= end) goto out; -- cgit v1.2.2 From 046c68842bce6b77509cf56e94a561029124b0ce Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Mon, 5 Jan 2009 14:06:29 +0000 Subject: mm: update my address Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- mm/mmap.c | 2 +- mm/mprotect.c | 2 +- mm/mremap.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index d4855a682ab6..2c778fcfd9bd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3,7 +3,7 @@ * * Written by obz. * - * Address space accounting code + * Address space accounting code */ #include diff --git a/mm/mprotect.c b/mm/mprotect.c index fded06f923f4..cfb4c4852062 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -4,7 +4,7 @@ * (C) Copyright 1994 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig * - * Address space accounting code + * Address space accounting code * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ diff --git a/mm/mremap.c b/mm/mremap.c index 58a2908f42f5..646de959aa58 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -3,7 +3,7 @@ * * (C) Copyright 1996 Linus Torvalds * - * Address space accounting code + * Address space accounting code * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ -- cgit v1.2.2 From 0211a9c8508b2183e0e539509aad60414f1c3813 Mon Sep 17 00:00:00 2001 From: Frederik Schwarzer Date: Mon, 29 Dec 2008 22:14:56 +0100 Subject: trivial: fix an -> a typos in documentation and comments It is always "an" if there is a vowel _spoken_ (not written). So it is: "an hour" (spoken vowel) but "a uniform" (spoken 'j') Signed-off-by: Frederik Schwarzer Signed-off-by: Jiri Kosina --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index f0e2892fe403..6392ae5cc6b1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2254,7 +2254,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) * Add some empty padding so that we can catch * overwrites from earlier objects rather than let * tracking information or the free pointer be - * corrupted if an user writes before the start + * corrupted if a user writes before the start * of the object. */ size += sizeof(void *); -- cgit v1.2.2 From 08fba69986e20c1c9e5fe2e6064d146cc4f42480 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 6 Jan 2009 14:38:53 -0800 Subject: mm: report the pagesize backing a VMA in /proc/pid/smaps It is useful to verify a hugepage-aware application is using the expected pagesizes for its memory regions. This patch creates an entry called KernelPageSize in /proc/pid/smaps that is the size of page used by the kernel to back a VMA. The entry is not called PageSize as it is possible the MMU uses a different size. This extension should not break any sensible parser that skips lines containing unrecognised information. Signed-off-by: Mel Gorman Acked-by: "KOSAKI Motohiro" Cc: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6058b53dcb89..5cb8bc7c80f7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -219,6 +219,22 @@ static pgoff_t vma_hugecache_offset(struct hstate *h, (vma->vm_pgoff >> huge_page_order(h)); } +/* + * Return the size of the pages allocated when backing a VMA. In the majority + * cases this will be same size as used by the page table entries. + */ +unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) +{ + struct hstate *hstate; + + if (!is_vm_hugetlb_page(vma)) + return PAGE_SIZE; + + hstate = hstate_vma(vma); + + return 1UL << (hstate->order + PAGE_SHIFT); +} + /* * Flags for MAP_PRIVATE reservations. These are stored in the bottom * bits of the reservation map pointer, which are always clear due to -- cgit v1.2.2 From 3340289ddf29ca75c3acfb3a6b72f234b2f74d5c Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 6 Jan 2009 14:38:54 -0800 Subject: mm: report the MMU pagesize in /proc/pid/smaps The KernelPageSize entry in /proc/pid/smaps is the pagesize used by the kernel to back a VMA. This matches the size used by the MMU in the majority of cases. However, one counter-example occurs on PPC64 kernels whereby a kernel using 64K as a base pagesize may still use 4K pages for the MMU on older processor. To distinguish, this patch reports MMUPageSize as the pagesize used by the MMU in /proc/pid/smaps. Signed-off-by: Mel Gorman Cc: "KOSAKI Motohiro" Cc: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5cb8bc7c80f7..9595278b5ab4 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -235,6 +235,19 @@ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) return 1UL << (hstate->order + PAGE_SHIFT); } +/* + * Return the page size being used by the MMU to back a VMA. In the majority + * of cases, the page size used by the kernel matches the MMU size. On + * architectures where it differs, an architecture-specific version of this + * function is required. + */ +#ifndef vma_mmu_pagesize +unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) +{ + return vma_kernel_pagesize(vma); +} +#endif + /* * Flags for MAP_PRIVATE reservations. These are stored in the bottom * bits of the reservation map pointer, which are always clear due to -- cgit v1.2.2 From bf3f3bc5e734706730c12a323f9b2068052aa1f0 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:38:55 -0800 Subject: mm: don't mark_page_accessed in fault path Doing a mark_page_accessed at fault-time, then doing SetPageReferenced at unmap-time if the pte is young has a number of problems. mark_page_accessed is supposed to be roughly the equivalent of a young pte for unmapped references. Unfortunately it doesn't come with any context: after being called, reclaim doesn't know who or why the page was touched. So calling mark_page_accessed not only adds extra lru or PG_referenced manipulations for pages that are already going to have pte_young ptes anyway, but it also adds these references which are difficult to work with from the context of vma specific references (eg. MADV_SEQUENTIAL pte_young may not wish to contribute to the page being referenced). Then, simply doing SetPageReferenced when zapping a pte and finding it is young, is not a really good solution either. SetPageReferenced does not correctly promote the page to the active list for example. So after removing mark_page_accessed from the fault path, several mmap()+touch+munmap() would have a very different result from several read(2) calls for example, which is not really desirable. Signed-off-by: Nick Piggin Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 1 - mm/memory.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index f5769b4dc075..f9d88183f697 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1530,7 +1530,6 @@ retry_find: /* * Found the page and have a reference on it. */ - mark_page_accessed(page); ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT; vmf->page = page; return ret | VM_FAULT_LOCKED; diff --git a/mm/memory.c b/mm/memory.c index 7b9db658aca2..5e0e91cc6b67 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -768,7 +768,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (pte_dirty(ptent)) set_page_dirty(page); if (pte_young(ptent)) - SetPageReferenced(page); + mark_page_accessed(page); file_rss--; } page_remove_rmap(page, vma); -- cgit v1.2.2 From 390722baa7fc447b0a4f0c3c3f537ed056dbc944 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:38:57 -0800 Subject: mm: don't mark_page_accessed in shmem_fault Following "mm: don't mark_page_accessed in fault path", which now places a mark_page_accessed() in zap_pte_range(), we should remove the mark_page_accessed() from shmem_fault(). Signed-off-by: Hugh Dickins Cc: Nick Piggin Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index f1b0d4871f3a..24f18fdee6e3 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1444,7 +1444,6 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (error) return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); - mark_page_accessed(vmf->page); return ret | VM_FAULT_LOCKED; } -- cgit v1.2.2 From 3140a2273009c01c27d316f35ab76a37e105fdd8 Mon Sep 17 00:00:00 2001 From: Brice Goglin Date: Tue, 6 Jan 2009 14:38:57 -0800 Subject: mm: rework do_pages_move() to work on page_sized chunks Rework do_pages_move() to work by page-sized chunks of struct page_to_node that are passed to do_move_page_to_node_array(). We now only have to allocate a single page instead a possibly very large vmalloc area to store all page_to_node entries. As a result, new_page_node() will now have a very small lookup, hidding much of the overall sys_move_pages() overhead. Signed-off-by: Brice Goglin Signed-off-by: Nathalie Furmento Acked-by: Christoph Lameter Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 79 +++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 35 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 21631ab8c08b..0a75716cb736 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -919,41 +919,43 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, const int __user *nodes, int __user *status, int flags) { - struct page_to_node *pm = NULL; + struct page_to_node *pm; nodemask_t task_nodes; - int err = 0; - int i; + unsigned long chunk_nr_pages; + unsigned long chunk_start; + int err; task_nodes = cpuset_mems_allowed(task); - /* Limit nr_pages so that the multiplication may not overflow */ - if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) { - err = -E2BIG; - goto out; - } - - pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node)); - if (!pm) { - err = -ENOMEM; + err = -ENOMEM; + pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); + if (!pm) goto out; - } - /* - * Get parameters from user space and initialize the pm - * array. Return various errors if the user did something wrong. + * Store a chunk of page_to_node array in a page, + * but keep the last one as a marker */ - for (i = 0; i < nr_pages; i++) { - const void __user *p; + chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; - err = -EFAULT; - if (get_user(p, pages + i)) - goto out_pm; + for (chunk_start = 0; + chunk_start < nr_pages; + chunk_start += chunk_nr_pages) { + int j; - pm[i].addr = (unsigned long)p; - if (nodes) { + if (chunk_start + chunk_nr_pages > nr_pages) + chunk_nr_pages = nr_pages - chunk_start; + + /* fill the chunk pm with addrs and nodes from user-space */ + for (j = 0; j < chunk_nr_pages; j++) { + const void __user *p; int node; - if (get_user(node, nodes + i)) + err = -EFAULT; + if (get_user(p, pages + j + chunk_start)) + goto out_pm; + pm[j].addr = (unsigned long) p; + + if (get_user(node, nodes + j + chunk_start)) goto out_pm; err = -ENODEV; @@ -964,22 +966,29 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, if (!node_isset(node, task_nodes)) goto out_pm; - pm[i].node = node; - } else - pm[i].node = 0; /* anything to not match MAX_NUMNODES */ - } - /* End marker */ - pm[nr_pages].node = MAX_NUMNODES; + pm[j].node = node; + } + + /* End marker for this chunk */ + pm[chunk_nr_pages].node = MAX_NUMNODES; + + /* Migrate this chunk */ + err = do_move_page_to_node_array(mm, pm, + flags & MPOL_MF_MOVE_ALL); + if (err < 0) + goto out_pm; - err = do_move_page_to_node_array(mm, pm, flags & MPOL_MF_MOVE_ALL); - if (err >= 0) /* Return status information */ - for (i = 0; i < nr_pages; i++) - if (put_user(pm[i].status, status + i)) + for (j = 0; j < chunk_nr_pages; j++) + if (put_user(pm[j].status, status + j + chunk_start)) { err = -EFAULT; + goto out_pm; + } + } + err = 0; out_pm: - vfree(pm); + free_page((unsigned long)pm); out: return err; } -- cgit v1.2.2 From 5bd1455c239672081d0e7f086e899b8cbc7a9844 Mon Sep 17 00:00:00 2001 From: Brice Goglin Date: Tue, 6 Jan 2009 14:38:58 -0800 Subject: mm: move_pages: no need to set pp->page to ZERO_PAGE(0) by default pp->page is never used when not set to the right page, so there is no need to set it to ZERO_PAGE(0) by default. Signed-off-by: Brice Goglin Acked-by: Christoph Lameter Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 0a75716cb736..60510306c44a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -848,12 +848,6 @@ static int do_move_page_to_node_array(struct mm_struct *mm, struct vm_area_struct *vma; struct page *page; - /* - * A valid page pointer that will not match any of the - * pages that will be moved. - */ - pp->page = ZERO_PAGE(0); - err = -EFAULT; vma = find_vma(mm, pp->addr); if (!vma || !vma_migratable(vma)) -- cgit v1.2.2 From 1c0fe6e3bda0464728c23c8d84aa47567e8b716c Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:38:59 -0800 Subject: mm: invoke oom-killer from page fault Rather than have the pagefault handler kill a process directly if it gets a VM_FAULT_OOM, have it call into the OOM killer. With increasingly sophisticated oom behaviour (cpusets, memory cgroups, oom killing throttling, oom priority adjustment or selective disabling, panic on oom, etc), it's silly to unconditionally kill the faulting process at page fault time. Create a hook for pagefault oom path to call into instead. Only converted x86 and uml so far. [akpm@linux-foundation.org: make __out_of_memory() static] [akpm@linux-foundation.org: fix comment] Signed-off-by: Nick Piggin Cc: Jeff Dike Acked-by: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 94 +++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 65 insertions(+), 29 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 558f9afe6e4e..c592965dab2f 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -509,6 +509,69 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) spin_unlock(&zone_scan_mutex); } +/* + * Must be called with tasklist_lock held for read. + */ +static void __out_of_memory(gfp_t gfp_mask, int order) +{ + if (sysctl_oom_kill_allocating_task) { + oom_kill_process(current, gfp_mask, order, 0, NULL, + "Out of memory (oom_kill_allocating_task)"); + + } else { + unsigned long points; + struct task_struct *p; + +retry: + /* + * Rambo mode: Shoot down a process and hope it solves whatever + * issues we may have. + */ + p = select_bad_process(&points, NULL); + + if (PTR_ERR(p) == -1UL) + return; + + /* Found nothing?!?! Either we hang forever, or we panic. */ + if (!p) { + read_unlock(&tasklist_lock); + panic("Out of memory and no killable processes...\n"); + } + + if (oom_kill_process(p, gfp_mask, order, points, NULL, + "Out of memory")) + goto retry; + } +} + +/* + * pagefault handler calls into here because it is out of memory but + * doesn't know exactly how or why. + */ +void pagefault_out_of_memory(void) +{ + unsigned long freed = 0; + + blocking_notifier_call_chain(&oom_notify_list, 0, &freed); + if (freed > 0) + /* Got some memory back in the last second. */ + return; + + if (sysctl_panic_on_oom) + panic("out of memory from page fault. panic_on_oom is selected.\n"); + + read_lock(&tasklist_lock); + __out_of_memory(0, 0); /* unknown gfp_mask and order */ + read_unlock(&tasklist_lock); + + /* + * Give "p" a good chance of killing itself before we + * retry to allocate memory. + */ + if (!test_thread_flag(TIF_MEMDIE)) + schedule_timeout_uninterruptible(1); +} + /** * out_of_memory - kill the "best" process when we run out of memory * @zonelist: zonelist pointer @@ -522,8 +585,6 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) */ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) { - struct task_struct *p; - unsigned long points = 0; unsigned long freed = 0; enum oom_constraint constraint; @@ -544,7 +605,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) switch (constraint) { case CONSTRAINT_MEMORY_POLICY: - oom_kill_process(current, gfp_mask, order, points, NULL, + oom_kill_process(current, gfp_mask, order, 0, NULL, "No available memory (MPOL_BIND)"); break; @@ -553,35 +614,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) panic("out of memory. panic_on_oom is selected\n"); /* Fall-through */ case CONSTRAINT_CPUSET: - if (sysctl_oom_kill_allocating_task) { - oom_kill_process(current, gfp_mask, order, points, NULL, - "Out of memory (oom_kill_allocating_task)"); - break; - } -retry: - /* - * Rambo mode: Shoot down a process and hope it solves whatever - * issues we may have. - */ - p = select_bad_process(&points, NULL); - - if (PTR_ERR(p) == -1UL) - goto out; - - /* Found nothing?!?! Either we hang forever, or we panic. */ - if (!p) { - read_unlock(&tasklist_lock); - panic("Out of memory and no killable processes...\n"); - } - - if (oom_kill_process(p, gfp_mask, order, points, NULL, - "Out of memory")) - goto retry; - + __out_of_memory(gfp_mask, order); break; } -out: read_unlock(&tasklist_lock); /* -- cgit v1.2.2 From c7d4caeb1d68d07f77cc09fc20b7759d6d7aa3b1 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 6 Jan 2009 14:39:00 -0800 Subject: oom: fix zone_scan_mutex name zone_scan_mutex is actually a spinlock, so name it appropriately. Signed-off-by: David Rientjes Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index c592965dab2f..e5f50cfdca4d 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -31,7 +31,7 @@ int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; int sysctl_oom_dump_tasks; -static DEFINE_SPINLOCK(zone_scan_mutex); +static DEFINE_SPINLOCK(zone_scan_lock); /* #define DEBUG */ /** @@ -470,7 +470,7 @@ int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) struct zone *zone; int ret = 1; - spin_lock(&zone_scan_mutex); + spin_lock(&zone_scan_lock); for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { if (zone_is_oom_locked(zone)) { ret = 0; @@ -480,7 +480,7 @@ int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { /* - * Lock each zone in the zonelist under zone_scan_mutex so a + * Lock each zone in the zonelist under zone_scan_lock so a * parallel invocation of try_set_zone_oom() doesn't succeed * when it shouldn't. */ @@ -488,7 +488,7 @@ int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) } out: - spin_unlock(&zone_scan_mutex); + spin_unlock(&zone_scan_lock); return ret; } @@ -502,11 +502,11 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) struct zoneref *z; struct zone *zone; - spin_lock(&zone_scan_mutex); + spin_lock(&zone_scan_lock); for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { zone_clear_flag(zone, ZONE_OOM_LOCKED); } - spin_unlock(&zone_scan_mutex); + spin_unlock(&zone_scan_lock); } /* -- cgit v1.2.2 From 75aa199410359dc5fbcf9025ff7af98a9d20f0d5 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 6 Jan 2009 14:39:01 -0800 Subject: oom: print triggering task's cpuset and mems allowed When cpusets are enabled, it's necessary to print the triggering task's set of allowable nodes so the subsequently printed meminfo can be interpreted correctly. We also print the task's cpuset name for informational purposes. [rientjes@google.com: task lock current before dereferencing cpuset] Cc: Paul Menage Cc: Li Zefan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index e5f50cfdca4d..6b9e758c98a5 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -392,6 +392,9 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, printk(KERN_WARNING "%s invoked oom-killer: " "gfp_mask=0x%x, order=%d, oomkilladj=%d\n", current->comm, gfp_mask, order, current->oomkilladj); + task_lock(current); + cpuset_print_task_mems_allowed(current); + task_unlock(current); dump_stack(); show_mem(); if (sysctl_oom_dump_tasks) -- cgit v1.2.2 From 31a12666d8f0c22235297e1c1575f82061480029 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:39:04 -0800 Subject: mm: write_cache_pages cyclic fix In write_cache_pages, scanned == 1 is supposed to mean that cyclic writeback has circled through zero, thus we should not circle again. However it gets set to 1 after the first successful pagevec lookup. This leads to cases where not enough data gets written. Counterexample: file with first 10 pages dirty, writeback_index == 5, nr_to_write == 10. Then the 5 last pages will be found, and scanned will be set to 1, after writing those out, we will not cycle back to get the first 5. Rework this logic, now we'll always cycle unless we started off from index 0. When cycling, only write out as far as 1 page before the start page from the first cycle (so we don't write parts of the file twice). Signed-off-by: Nick Piggin Cc: Chris Mason Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2970e35fd03f..eb277bdd4c5d 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -868,9 +868,10 @@ int write_cache_pages(struct address_space *mapping, int done = 0; struct pagevec pvec; int nr_pages; + pgoff_t uninitialized_var(writeback_index); pgoff_t index; pgoff_t end; /* Inclusive */ - int scanned = 0; + int cycled; int range_whole = 0; long nr_to_write = wbc->nr_to_write; @@ -881,14 +882,19 @@ int write_cache_pages(struct address_space *mapping, pagevec_init(&pvec, 0); if (wbc->range_cyclic) { - index = mapping->writeback_index; /* Start from prev offset */ + writeback_index = mapping->writeback_index; /* prev offset */ + index = writeback_index; + if (index == 0) + cycled = 1; + else + cycled = 0; end = -1; } else { index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; - scanned = 1; + cycled = 1; /* ignore range_cyclic tests */ } retry: while (!done && (index <= end) && @@ -897,7 +903,6 @@ retry: min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { unsigned i; - scanned = 1; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; @@ -915,7 +920,11 @@ retry: continue; } - if (!wbc->range_cyclic && page->index > end) { + if (page->index > end) { + /* + * can't be range_cyclic (1st pass) because + * end == -1 in that case. + */ done = 1; unlock_page(page); continue; @@ -946,13 +955,15 @@ retry: pagevec_release(&pvec); cond_resched(); } - if (!scanned && !done) { + if (!cycled) { /* + * range_cyclic: * We hit the last page and there is more work to be done: wrap * back to the start of the file */ - scanned = 1; + cycled = 1; index = 0; + end = writeback_index - 1; goto retry; } if (!wbc->no_nrwrite_index_update) { -- cgit v1.2.2 From bd19e012f6fd3b7309689165ea865cbb7bb88c1e Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:39:06 -0800 Subject: mm: write_cache_pages early loop termination We'd like to break out of the loop early in many situations, however the existing code has been setting mapping->writeback_index past the final page in the pagevec lookup for cyclic writeback. This is a problem if we don't process all pages up to the final page. Currently the code mostly keeps writeback_index reasonable and hacked around this by not breaking out of the loop or writing pages outside the range in these cases. Keep track of a real "done index" that enables us to terminate the loop in a much more flexible manner. Needed by the subsequent patch to preserve writepage errors, and then further patches to break out of the loop early for other reasons. However there are no functional changes with this patch alone. Signed-off-by: Nick Piggin Cc: Chris Mason Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index eb277bdd4c5d..01b9cb8ccf68 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -871,6 +871,7 @@ int write_cache_pages(struct address_space *mapping, pgoff_t uninitialized_var(writeback_index); pgoff_t index; pgoff_t end; /* Inclusive */ + pgoff_t done_index; int cycled; int range_whole = 0; long nr_to_write = wbc->nr_to_write; @@ -897,6 +898,7 @@ int write_cache_pages(struct address_space *mapping, cycled = 1; /* ignore range_cyclic tests */ } retry: + done_index = index; while (!done && (index <= end) && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, @@ -906,6 +908,8 @@ retry: for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; + done_index = page->index + 1; + /* * At this point we hold neither mapping->tree_lock nor * lock on the page itself: the page may be truncated or @@ -968,7 +972,7 @@ retry: } if (!wbc->no_nrwrite_index_update) { if (wbc->range_cyclic || (range_whole && nr_to_write > 0)) - mapping->writeback_index = index; + mapping->writeback_index = done_index; wbc->nr_to_write = nr_to_write; } -- cgit v1.2.2 From 00266770b8b3a6a77f896ca501a0613739086832 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:39:06 -0800 Subject: mm: write_cache_pages writepage error fix In write_cache_pages, if ret signals a real error, but we still have some pages left in the pagevec, done would be set to 1, but the remaining pages would continue to be processed and ret will be overwritten in the process. It could easily be overwritten with success, and thus success will be returned even if there is an error. Thus the caller is told all writes succeeded, wheras in reality some did not. Fix this by bailing immediately if there is an error, and retaining the first error code. This is a data integrity bug. Signed-off-by: Nick Piggin Cc: Chris Mason Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 01b9cb8ccf68..2e847cdcad0e 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -944,12 +944,26 @@ retry: } ret = (*writepage)(page, wbc, data); - - if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) { - unlock_page(page); - ret = 0; - } - if (ret || (--nr_to_write <= 0)) + if (unlikely(ret)) { + if (ret == AOP_WRITEPAGE_ACTIVATE) { + unlock_page(page); + ret = 0; + } else { + /* + * done_index is set past this page, + * so media errors will not choke + * background writeout for the entire + * file. This has consequences for + * range_cyclic semantics (ie. it may + * not be suitable for data integrity + * writeout). + */ + done = 1; + break; + } + } + + if (--nr_to_write <= 0) done = 1; if (wbc->nonblocking && bdi_write_congested(bdi)) { wbc->encountered_congestion = 1; -- cgit v1.2.2 From 05fe478dd04e02fa230c305ab9b5616669821dd3 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:39:08 -0800 Subject: mm: write_cache_pages integrity fix In write_cache_pages, nr_to_write is heeded even for data-integrity syncs, so the function will return success after writing out nr_to_write pages, even if that was not sufficient to guarantee data integrity. The callers tend to set it to values that could break data interity semantics easily in practice. For example, nr_to_write can be set to mapping->nr_pages * 2, however if a file has a single, dirty page, then fsync is called, subsequent pages might be concurrently added and dirtied, then write_cache_pages might writeout two of these newly dirty pages, while not writing out the old page that should have been written out. Fix this by ignoring nr_to_write if it is a data integrity sync. This is a data integrity bug. The reason this has been done in the past is to avoid stalling sync operations behind page dirtiers. "If a file has one dirty page at offset 1000000000000000 then someone does an fsync() and someone else gets in first and starts madly writing pages at offset 0, we want to write that page at 1000000000000000. Somehow." What we do today is return success after an arbitrary amount of pages are written, whether or not we have provided the data-integrity semantics that the caller has asked for. Even this doesn't actually fix all stall cases completely: in the above situation, if the file has a huge number of pages in pagecache (but not dirty), then mapping->nrpages is going to be huge, even if pages are being dirtied. This change does indeed make the possibility of long stalls lager, and that's not a good thing, but lying about data integrity is even worse. We have to either perform the sync, or return -ELINUXISLAME so at least the caller knows what has happened. There are subsequent competing approaches in the works to solve the stall problems properly, without compromising data integrity. Signed-off-by: Nick Piggin Cc: Chris Mason Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 2 +- mm/page-writeback.c | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index f9d88183f697..9c5e6235cc74 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -210,7 +210,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, int ret; struct writeback_control wbc = { .sync_mode = sync_mode, - .nr_to_write = mapping->nrpages * 2, + .nr_to_write = LONG_MAX, .range_start = start, .range_end = end, }; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2e847cdcad0e..5edca676e2c3 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -963,8 +963,10 @@ retry: } } - if (--nr_to_write <= 0) - done = 1; + if (wbc->sync_mode == WB_SYNC_NONE) { + if (--wbc->nr_to_write <= 0) + done = 1; + } if (wbc->nonblocking && bdi_write_congested(bdi)) { wbc->encountered_congestion = 1; done = 1; -- cgit v1.2.2 From 5a3d5c9813db56a75934eb1015367fda23a8b0b4 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:39:09 -0800 Subject: mm: write_cache_pages cleanups Get rid of some complex expressions from flow control statements, add a comment, remove some duplicate code. Signed-off-by: Nick Piggin Cc: Chris Mason Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5edca676e2c3..c3fb38b1ea43 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -899,11 +899,14 @@ int write_cache_pages(struct address_space *mapping, } retry: done_index = index; - while (!done && (index <= end) && - (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, - PAGECACHE_TAG_DIRTY, - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { - unsigned i; + while (!done && (index <= end)) { + int i; + + nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, + PAGECACHE_TAG_DIRTY, + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); + if (nr_pages == 0) + break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; @@ -919,7 +922,16 @@ retry: */ lock_page(page); + /* + * Page truncated or invalidated. We can freely skip it + * then, even for data integrity operations: the page + * has disappeared concurrently, so there could be no + * real expectation of this data interity operation + * even if there is now a new, dirty page at the same + * pagecache address. + */ if (unlikely(page->mapping != mapping)) { +continue_unlock: unlock_page(page); continue; } @@ -930,18 +942,15 @@ retry: * end == -1 in that case. */ done = 1; - unlock_page(page); - continue; + goto continue_unlock; } if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); if (PageWriteback(page) || - !clear_page_dirty_for_io(page)) { - unlock_page(page); - continue; - } + !clear_page_dirty_for_io(page)) + goto continue_unlock; ret = (*writepage)(page, wbc, data); if (unlikely(ret)) { @@ -964,7 +973,8 @@ retry: } if (wbc->sync_mode == WB_SYNC_NONE) { - if (--wbc->nr_to_write <= 0) + wbc->nr_to_write--; + if (wbc->nr_to_write <= 0) done = 1; } if (wbc->nonblocking && bdi_write_congested(bdi)) { -- cgit v1.2.2 From 515f4a037fb9ab736f8bad733fcd2ffd350cf265 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:39:10 -0800 Subject: mm: write_cache_pages optimise page cleaning In write_cache_pages, if we get stuck behind another process that is cleaning pages, we will be forced to wait for them to finish, then perform our own writeout (if it was redirtied during the long wait), then wait for that. If a page under writeout is still clean, we can skip waiting for it (if we're part of a data integrity sync, we'll be waiting for all writeout pages afterwards, so we'll still be waiting for the other guy's write that's cleaned the page). Signed-off-by: Nick Piggin Cc: Chris Mason Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c3fb38b1ea43..2e8c2b01d5d5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -945,11 +945,20 @@ continue_unlock: goto continue_unlock; } - if (wbc->sync_mode != WB_SYNC_NONE) - wait_on_page_writeback(page); + if (!PageDirty(page)) { + /* someone wrote it for us */ + goto continue_unlock; + } + + if (PageWriteback(page)) { + if (wbc->sync_mode != WB_SYNC_NONE) + wait_on_page_writeback(page); + else + goto continue_unlock; + } - if (PageWriteback(page) || - !clear_page_dirty_for_io(page)) + BUG_ON(PageWriteback(page)); + if (!clear_page_dirty_for_io(page)) goto continue_unlock; ret = (*writepage)(page, wbc, data); -- cgit v1.2.2 From d5482cdf8a0aacb1e6468a97d5544f5829c8d8c4 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:39:11 -0800 Subject: mm: write_cache_pages terminate quickly Terminate the write_cache_pages loop upon encountering the first page past end, without locking the page. Pages cannot have their index change when we have a reference on them (truncate, eg truncate_inode_pages_range performs the same check without the page lock). Signed-off-by: Nick Piggin Cc: Chris Mason Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2e8c2b01d5d5..0d986c13d473 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -911,15 +911,24 @@ retry: for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; - done_index = page->index + 1; - /* - * At this point we hold neither mapping->tree_lock nor - * lock on the page itself: the page may be truncated or - * invalidated (changing page->mapping to NULL), or even - * swizzled back from swapper_space to tmpfs file - * mapping + * At this point, the page may be truncated or + * invalidated (changing page->mapping to NULL), or + * even swizzled back from swapper_space to tmpfs file + * mapping. However, page->index will not change + * because we have a reference on the page. */ + if (page->index > end) { + /* + * can't be range_cyclic (1st pass) because + * end == -1 in that case. + */ + done = 1; + break; + } + + done_index = page->index + 1; + lock_page(page); /* @@ -936,15 +945,6 @@ continue_unlock: continue; } - if (page->index > end) { - /* - * can't be range_cyclic (1st pass) because - * end == -1 in that case. - */ - done = 1; - goto continue_unlock; - } - if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; -- cgit v1.2.2 From 82fd1a9a8ced9607312b54859572bcc6211e8919 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 6 Jan 2009 14:39:11 -0800 Subject: mm: write_cache_pages more terminate quickly Now that we have the early-termination logic in place, it makes sense to bail out early in all other cases where done is set to 1. Signed-off-by: Nick Piggin Cc: Chris Mason Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0d986c13d473..08d2b960b294 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -983,12 +983,15 @@ continue_unlock: if (wbc->sync_mode == WB_SYNC_NONE) { wbc->nr_to_write--; - if (wbc->nr_to_write <= 0) + if (wbc->nr_to_write <= 0) { done = 1; + break; + } } if (wbc->nonblocking && bdi_write_congested(bdi)) { wbc->encountered_congestion = 1; done = 1; + break; } } pagevec_release(&pvec); -- cgit v1.2.2 From c04fc586c1a480ba198f03ae7b6cbd7b57380b91 Mon Sep 17 00:00:00 2001 From: Gary Hade Date: Tue, 6 Jan 2009 14:39:14 -0800 Subject: mm: show node to memory section relationship with symlinks in sysfs Show node to memory section relationship with symlinks in sysfs Add /sys/devices/system/node/nodeX/memoryY symlinks for all the memory sections located on nodeX. For example: /sys/devices/system/node/node1/memory135 -> ../../memory/memory135 indicates that memory section 135 resides on node1. Also revises documentation to cover this change as well as updating Documentation/ABI/testing/sysfs-devices-memory to include descriptions of memory hotremove files 'phys_device', 'phys_index', and 'state' that were previously not described there. In addition to it always being a good policy to provide users with the maximum possible amount of physical location information for resources that can be hot-added and/or hot-removed, the following are some (but likely not all) of the user benefits provided by this change. Immediate: - Provides information needed to determine the specific node on which a defective DIMM is located. This will reduce system downtime when the node or defective DIMM is swapped out. - Prevents unintended onlining of a memory section that was previously offlined due to a defective DIMM. This could happen during node hot-add when the user or node hot-add assist script onlines _all_ offlined sections due to user or script inability to identify the specific memory sections located on the hot-added node. The consequences of reintroducing the defective memory could be ugly. - Provides information needed to vary the amount and distribution of memory on specific nodes for testing or debugging purposes. Future: - Will provide information needed to identify the memory sections that need to be offlined prior to physical removal of a specific node. Symlink creation during boot was tested on 2-node x86_64, 2-node ppc64, and 2-node ia64 systems. Symlink creation during physical memory hot-add tested on a 2-node x86_64 system. Signed-off-by: Gary Hade Signed-off-by: Badari Pulavarty Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b17371185468..2ba38bc07b47 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -216,7 +216,8 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) return 0; } -static int __meminit __add_section(struct zone *zone, unsigned long phys_start_pfn) +static int __meminit __add_section(int nid, struct zone *zone, + unsigned long phys_start_pfn) { int nr_pages = PAGES_PER_SECTION; int ret; @@ -234,7 +235,7 @@ static int __meminit __add_section(struct zone *zone, unsigned long phys_start_p if (ret < 0) return ret; - return register_new_memory(__pfn_to_section(phys_start_pfn)); + return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); } #ifdef CONFIG_SPARSEMEM_VMEMMAP @@ -273,8 +274,8 @@ static int __remove_section(struct zone *zone, struct mem_section *ms) * call this function after deciding the zone to which to * add the new pages. */ -int __ref __add_pages(struct zone *zone, unsigned long phys_start_pfn, - unsigned long nr_pages) +int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, + unsigned long nr_pages) { unsigned long i; int err = 0; @@ -284,7 +285,7 @@ int __ref __add_pages(struct zone *zone, unsigned long phys_start_pfn, end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); for (i = start_sec; i <= end_sec; i++) { - err = __add_section(zone, i << PFN_SECTION_SHIFT); + err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); /* * EEXIST is finally dealt with by ioresource collision -- cgit v1.2.2 From 5594c8c813d9e907ff55da7080d42653478b73e8 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 6 Jan 2009 14:39:14 -0800 Subject: mm: print out memmap number only if it is not zero Don't print the size of the zone's memmap array if it does not have one. Impact: cleanup Signed-off-by: Yinghai Lu Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d8ac01474563..2f644c3e3da3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3469,9 +3469,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; if (realsize >= memmap_pages) { realsize -= memmap_pages; - printk(KERN_DEBUG - " %s zone: %lu pages used for memmap\n", - zone_names[j], memmap_pages); + if (memmap_pages) + printk(KERN_DEBUG + " %s zone: %lu pages used for memmap\n", + zone_names[j], memmap_pages); } else printk(KERN_WARNING " %s zone: %lu pages exceeds realsize %lu\n", -- cgit v1.2.2 From 1b0bd118862cd9fe9ac2872137a1b8107e83ff9d Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:39:15 -0800 Subject: mm: get rid of pagevec_release_nonlru() speculative page references patch (commit: e286781d5f2e9c846e012a39653a166e9d31777d) removed last pagevec_release_nonlru() caller. So this function can be removed now. This patch doesn't have any functional change. Signed-off-by: KOSAKI Motohiro Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swap.c | 22 ---------------------- 1 file changed, 22 deletions(-) (limited to 'mm') diff --git a/mm/swap.c b/mm/swap.c index b135ec90cdeb..21a566fc4570 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -397,28 +397,6 @@ void __pagevec_release(struct pagevec *pvec) EXPORT_SYMBOL(__pagevec_release); -/* - * pagevec_release() for pages which are known to not be on the LRU - * - * This function reinitialises the caller's pagevec. - */ -void __pagevec_release_nonlru(struct pagevec *pvec) -{ - int i; - struct pagevec pages_to_free; - - pagevec_init(&pages_to_free, pvec->cold); - for (i = 0; i < pagevec_count(pvec); i++) { - struct page *page = pvec->pages[i]; - - VM_BUG_ON(PageLRU(page)); - if (put_page_testzero(page)) - pagevec_add(&pages_to_free, page); - } - pagevec_free(&pages_to_free); - pagevec_reinit(pvec); -} - /* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. -- cgit v1.2.2 From 64cdd548ffe26849d4cd113ac640f60606063b14 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:39:16 -0800 Subject: mm: cleanup: remove #ifdef CONFIG_MIGRATION #ifdef in *.c file decrease source readability a bit. removing is better. This patch doesn't have any functional change. Signed-off-by: KOSAKI Motohiro Cc: Christoph Lameter Cc: Mel Gorman Cc: Lee Schermerhorn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mprotect.c | 6 ++---- mm/rmap.c | 10 +++------- 2 files changed, 5 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/mprotect.c b/mm/mprotect.c index cfb4c4852062..d0f6e7ce09f1 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -59,8 +60,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, ptent = pte_mkwrite(ptent); ptep_modify_prot_commit(mm, addr, pte, ptent); -#ifdef CONFIG_MIGRATION - } else if (!pte_file(oldpte)) { + } else if (PAGE_MIGRATION && !pte_file(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); if (is_write_migration_entry(entry)) { @@ -72,9 +72,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, set_pte_at(mm, addr, pte, swp_entry_to_pte(entry)); } -#endif } - } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); diff --git a/mm/rmap.c b/mm/rmap.c index 10993942d6c9..53c56dacd725 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -50,6 +50,7 @@ #include #include #include +#include #include @@ -818,8 +819,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, spin_unlock(&mmlist_lock); } dec_mm_counter(mm, anon_rss); -#ifdef CONFIG_MIGRATION - } else { + } else if (PAGE_MIGRATION) { /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration @@ -827,19 +827,15 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, */ BUG_ON(!migration); entry = make_migration_entry(page, pte_write(pteval)); -#endif } set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); BUG_ON(pte_file(*pte)); - } else -#ifdef CONFIG_MIGRATION - if (migration) { + } else if (PAGE_MIGRATION && migration) { /* Establish migration entry for a file page */ swp_entry_t entry; entry = make_migration_entry(page, pte_write(pteval)); set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); } else -#endif dec_mm_counter(mm, file_rss); -- cgit v1.2.2 From 4917e5d0499b5ae7b26b56fccaefddf9aec9369c Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 6 Jan 2009 14:39:17 -0800 Subject: mm: more likely reclaim MADV_SEQUENTIAL mappings File pages mapped only in sequentially read mappings are perfect reclaim canditates. This patch makes these mappings behave like weak references, their pages will be reclaimed unless they have a strong reference from a normal mapping as well. It changes the reclaim and the unmap path where they check if the page has been referenced. In both cases, accesses through sequentially read mappings will be ignored. Benchmark results from KOSAKI Motohiro: http://marc.info/?l=linux-mm&m=122485301925098&w=2 Signed-off-by: Johannes Weiner Signed-off-by: Rik van Riel Acked-by: KOSAKI Motohiro Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 3 ++- mm/rmap.c | 13 +++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 5e0e91cc6b67..99e8d5c7b312 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -767,7 +767,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, else { if (pte_dirty(ptent)) set_page_dirty(page); - if (pte_young(ptent)) + if (pte_young(ptent) && + likely(!VM_SequentialReadHint(vma))) mark_page_accessed(page); file_rss--; } diff --git a/mm/rmap.c b/mm/rmap.c index 53c56dacd725..f01e92244c53 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -360,8 +360,17 @@ static int page_referenced_one(struct page *page, goto out_unmap; } - if (ptep_clear_flush_young_notify(vma, address, pte)) - referenced++; + if (ptep_clear_flush_young_notify(vma, address, pte)) { + /* + * Don't treat a reference through a sequentially read + * mapping as such. If the page has been used in + * another mapping, we will catch it; if this other + * mapping is already gone, the unmap path will have + * set PG_referenced or activated the page. + */ + if (likely(!VM_SequentialReadHint(vma))) + referenced++; + } /* Pretend the page is referenced if the task has the swap token and is in the middle of a page fault. */ -- cgit v1.2.2 From c1279c4ef37a06ba708e6b1f6fd98b45c52770f6 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 6 Jan 2009 14:39:18 -0800 Subject: mm: vmalloc tweak failure printk If we can't service a vmalloc allocation, show size of the allocation that actually failed. Useful for debugging. Signed-off-by: Glauber Costa Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7465f22fec0c..2644afb9d6ab 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -381,8 +381,9 @@ found: goto retry; } if (printk_ratelimit()) - printk(KERN_WARNING "vmap allocation failed: " - "use vmalloc= to increase size.\n"); + printk(KERN_WARNING + "vmap allocation for size %lu failed: " + "use vmalloc= to increase size.\n", size); return ERR_PTR(-EBUSY); } -- cgit v1.2.2 From 848778483351e90f9a2c587bdbe0c78b17c1e30b Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 6 Jan 2009 14:39:19 -0800 Subject: mm: vmalloc improve vmallocinfo If we do that, output of files like /proc/vmallocinfo will show things like "vmalloc_32", "vmalloc_user", or whomever the caller was as the caller. This info is not as useful as the real caller of the allocation. So, proposal is to call __vmalloc_node node directly, with matching parameters to save the caller information Signed-off-by: Glauber Costa Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2644afb9d6ab..b62ea569aa43 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1376,7 +1376,8 @@ void *vmalloc_user(unsigned long size) struct vm_struct *area; void *ret; - ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); + ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, + PAGE_KERNEL, -1, __builtin_return_address(0)); if (ret) { area = find_vm_area(ret); area->flags |= VM_USERMAP; @@ -1421,7 +1422,8 @@ EXPORT_SYMBOL(vmalloc_node); void *vmalloc_exec(unsigned long size) { - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); + return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, + -1, __builtin_return_address(0)); } #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) @@ -1441,7 +1443,8 @@ void *vmalloc_exec(unsigned long size) */ void *vmalloc_32(unsigned long size) { - return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); + return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, + -1, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32); @@ -1457,7 +1460,8 @@ void *vmalloc_32_user(unsigned long size) struct vm_struct *area; void *ret; - ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); + ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, + -1, __builtin_return_address(0)); if (ret) { area = find_vm_area(ret); area->flags |= VM_USERMAP; -- cgit v1.2.2 From e97a630eb0f5b8b380fd67504de6cedebb489003 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:39:19 -0800 Subject: mm: vmalloc use mutex for purge The vmalloc purge lock can be a mutex so we can sleep while a purge is going on (purge involves a global kernel TLB invalidate, so it can take quite a while). Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b62ea569aa43..78689cba178f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -473,7 +474,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, int sync, int force_flush) { - static DEFINE_SPINLOCK(purge_lock); + static DEFINE_MUTEX(purge_lock); LIST_HEAD(valist); struct vmap_area *va; int nr = 0; @@ -484,10 +485,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, * the case that isn't actually used at the moment anyway. */ if (!sync && !force_flush) { - if (!spin_trylock(&purge_lock)) + if (!mutex_trylock(&purge_lock)) return; } else - spin_lock(&purge_lock); + mutex_lock(&purge_lock); rcu_read_lock(); list_for_each_entry_rcu(va, &vmap_area_list, list) { @@ -519,7 +520,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, __free_vmap_area(va); spin_unlock(&vmap_area_lock); } - spin_unlock(&purge_lock); + mutex_unlock(&purge_lock); } /* -- cgit v1.2.2 From cd52858c73f9f7df859a08fb08496ca39b9b3d8d Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:39:20 -0800 Subject: mm: vmalloc make lazy unmapping configurable Lazy unmapping in the vmalloc code has now opened the possibility for use after free bugs to go undetected. We can catch those by forcing an unmap and flush (which is going to be slow, but that's what happens). Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 78689cba178f..c5db9a7264d9 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -434,6 +434,27 @@ static void unmap_vmap_area(struct vmap_area *va) vunmap_page_range(va->va_start, va->va_end); } +static void vmap_debug_free_range(unsigned long start, unsigned long end) +{ + /* + * Unmap page tables and force a TLB flush immediately if + * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free + * bugs similarly to those in linear kernel virtual address + * space after a page has been freed. + * + * All the lazy freeing logic is still retained, in order to + * minimise intrusiveness of this debugging feature. + * + * This is going to be *slow* (linear kernel virtual address + * debugging doesn't do a broadcast TLB flush so it is a lot + * faster). + */ +#ifdef CONFIG_DEBUG_PAGEALLOC + vunmap_page_range(start, end); + flush_tlb_kernel_range(start, end); +#endif +} + /* * lazy_max_pages is the maximum amount of virtual address space we gather up * before attempting to purge with a TLB flush. @@ -914,6 +935,7 @@ void vm_unmap_ram(const void *mem, unsigned int count) BUG_ON(addr & (PAGE_SIZE-1)); debug_check_no_locks_freed(mem, size); + vmap_debug_free_range(addr, addr+size); if (likely(count <= VMAP_MAX_ALLOC)) vb_free(mem, size); @@ -1130,6 +1152,8 @@ struct vm_struct *remove_vm_area(const void *addr) if (va && va->flags & VM_VM_AREA) { struct vm_struct *vm = va->private; struct vm_struct *tmp, **p; + + vmap_debug_free_range(va->va_start, va->va_end); free_unmap_vmap_area(va); vm->size -= PAGE_SIZE; -- cgit v1.2.2 From 38e0edb15bd07c6a0caf0cfe39f8f90bd98601b2 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 6 Jan 2009 14:39:21 -0800 Subject: mm/apply_to_range: call pte function with lazy updates Make the pte-level function in apply_to_range be called in lazy mmu mode, so that any pagetable modifications can be batched. Signed-off-by: Jeremy Fitzhardinge Cc: Johannes Weiner Cc: Nick Piggin Cc: Venkatesh Pallipadi Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 99e8d5c7b312..b5af358b8b22 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1645,6 +1645,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, BUG_ON(pmd_huge(*pmd)); + arch_enter_lazy_mmu_mode(); + token = pmd_pgtable(*pmd); do { @@ -1653,6 +1655,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, break; } while (pte++, addr += PAGE_SIZE, addr != end); + arch_leave_lazy_mmu_mode(); + if (mm != &init_mm) pte_unmap_unlock(pte-1, ptl); return err; -- cgit v1.2.2 From 3c1d43787b48c798f44dc32a6e6deb5ca2da3e68 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:23 -0800 Subject: mm: remove GFP_HIGHUSER_PAGECACHE GFP_HIGHUSER_PAGECACHE is just an alias for GFP_HIGHUSER_MOVABLE, making that harder to track down: remove it, and its out-of-work brothers GFP_NOFS_PAGECACHE and GFP_USER_PAGECACHE. Since we're making that improvement to hotremove_migrate_alloc(), I think we can now also remove one of the "o"s from its comment. Signed-off-by: Hugh Dickins Acked-by: Mel Gorman Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2ba38bc07b47..c083cf5fd6df 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -627,15 +627,12 @@ int scan_lru_pages(unsigned long start, unsigned long end) } static struct page * -hotremove_migrate_alloc(struct page *page, - unsigned long private, - int **x) +hotremove_migrate_alloc(struct page *page, unsigned long private, int **x) { - /* This should be improoooooved!! */ - return alloc_page(GFP_HIGHUSER_PAGECACHE); + /* This should be improooooved!! */ + return alloc_page(GFP_HIGHUSER_MOVABLE); } - #define NR_OFFLINE_AT_ONCE_PAGES (256) static int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) -- cgit v1.2.2 From 6d91add09f4bad5f4d4233b13faa392f0c4b16be Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:24 -0800 Subject: mm: add Set,ClearPageSwapCache stubs If we add NOOP stubs for SetPageSwapCache() and ClearPageSwapCache(), then we can remove the #ifdef CONFIG_SWAPs from mm/migrate.c. Signed-off-by: Hugh Dickins Acked-by: Christoph Lameter Cc: Nick Piggin Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 60510306c44a..55373983c9c6 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -300,12 +300,10 @@ static int migrate_page_move_mapping(struct address_space *mapping, * Now we know that no one else is looking at the page. */ get_page(newpage); /* add cache reference */ -#ifdef CONFIG_SWAP if (PageSwapCache(page)) { SetPageSwapCache(newpage); set_page_private(newpage, page_private(page)); } -#endif radix_tree_replace_slot(pslot, newpage); @@ -373,9 +371,7 @@ static void migrate_page_copy(struct page *newpage, struct page *page) mlock_migrate_page(newpage, page); -#ifdef CONFIG_SWAP ClearPageSwapCache(page); -#endif ClearPagePrivate(page); set_page_private(page, 0); /* page->mapping contains a flag for PageAnon() */ -- cgit v1.2.2 From 51726b1222863852c46ca21ed0115b85d1edfd89 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:25 -0800 Subject: mm: replace some BUG_ONs by VM_BUG_ONs The swap code is over-provisioned with BUG_ONs on assorted page flags, mostly dating back to 2.3. They're good documentation, and guard against developer error, but a waste of space on most systems: change them to VM_BUG_ONs, conditional on CONFIG_DEBUG_VM. Just delete the PagePrivate ones: they're later, from 2.5.69, but even less interesting now. Signed-off-by: Hugh Dickins Reviewed-by: Christoph Lameter Cc: Nick Piggin Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_io.c | 4 ++-- mm/swap_state.c | 19 +++++++++---------- mm/swapfile.c | 8 +++----- 3 files changed, 14 insertions(+), 17 deletions(-) (limited to 'mm') diff --git a/mm/page_io.c b/mm/page_io.c index 065c4480eaf0..d277a80efa71 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -125,8 +125,8 @@ int swap_readpage(struct file *file, struct page *page) struct bio *bio; int ret = 0; - BUG_ON(!PageLocked(page)); - BUG_ON(PageUptodate(page)); + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(PageUptodate(page)); bio = get_swap_bio(GFP_KERNEL, page_private(page), page, end_swap_bio_read); if (bio == NULL) { diff --git a/mm/swap_state.c b/mm/swap_state.c index 3353c9029cef..e793fdea275d 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -72,10 +72,10 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) { int error; - BUG_ON(!PageLocked(page)); - BUG_ON(PageSwapCache(page)); - BUG_ON(PagePrivate(page)); - BUG_ON(!PageSwapBacked(page)); + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(PageSwapCache(page)); + VM_BUG_ON(!PageSwapBacked(page)); + error = radix_tree_preload(gfp_mask); if (!error) { page_cache_get(page); @@ -108,10 +108,9 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) */ void __delete_from_swap_cache(struct page *page) { - BUG_ON(!PageLocked(page)); - BUG_ON(!PageSwapCache(page)); - BUG_ON(PageWriteback(page)); - BUG_ON(PagePrivate(page)); + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(!PageSwapCache(page)); + VM_BUG_ON(PageWriteback(page)); radix_tree_delete(&swapper_space.page_tree, page_private(page)); set_page_private(page, 0); @@ -134,8 +133,8 @@ int add_to_swap(struct page * page, gfp_t gfp_mask) swp_entry_t entry; int err; - BUG_ON(!PageLocked(page)); - BUG_ON(!PageUptodate(page)); + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(!PageUptodate(page)); for (;;) { entry = get_swap_page(); diff --git a/mm/swapfile.c b/mm/swapfile.c index 54a9f87e5162..214e90b94946 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -333,7 +333,7 @@ int can_share_swap_page(struct page *page) { int count; - BUG_ON(!PageLocked(page)); + VM_BUG_ON(!PageLocked(page)); count = page_mapcount(page); if (count <= 1 && PageSwapCache(page)) count += page_swapcount(page); @@ -350,8 +350,7 @@ static int remove_exclusive_swap_page_count(struct page *page, int count) struct swap_info_struct * p; swp_entry_t entry; - BUG_ON(PagePrivate(page)); - BUG_ON(!PageLocked(page)); + VM_BUG_ON(!PageLocked(page)); if (!PageSwapCache(page)) return 0; @@ -432,7 +431,6 @@ void free_swap_and_cache(swp_entry_t entry) if (page) { int one_user; - BUG_ON(PagePrivate(page)); one_user = (page_count(page) == 2); /* Only cache user (+us), or swap space full? Free it! */ /* Also recheck PageSwapCache after page is locked (above) */ @@ -1209,7 +1207,7 @@ int page_queue_congested(struct page *page) { struct backing_dev_info *bdi; - BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ + VM_BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ if (PageSwapCache(page)) { swp_entry_t entry = { .val = page_private(page) }; -- cgit v1.2.2 From b5934c531849ff4a51ce0f290141efe564290e40 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:25 -0800 Subject: mm: add_active_or_unevictable into rmap lru_cache_add_active_or_unevictable() and page_add_new_anon_rmap() always appear together. Save some symbol table space and some jumping around by removing lru_cache_add_active_or_unevictable(), folding its code into page_add_new_anon_rmap(): like how we add file pages to lru just after adding them to page cache. Remove the nearby "TODO: is this safe?" comments (yes, it is safe), and change page_add_new_anon_rmap()'s address BUG_ON to VM_BUG_ON as originally intended. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Cc: Lee Schermerhorn Cc: Nick Piggin Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 6 ------ mm/rmap.c | 7 ++++++- mm/swap.c | 19 ------------------- 3 files changed, 6 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index b5af358b8b22..a138c50dc39a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1949,10 +1949,7 @@ gotten: */ ptep_clear_flush_notify(vma, address, page_table); SetPageSwapBacked(new_page); - lru_cache_add_active_or_unevictable(new_page, vma); page_add_new_anon_rmap(new_page, vma, address); - -//TODO: is this safe? do_anonymous_page() does it this way. set_pte_at(mm, address, page_table, entry); update_mmu_cache(vma, address, entry); if (old_page) { @@ -2448,7 +2445,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, goto release; inc_mm_counter(mm, anon_rss); SetPageSwapBacked(page); - lru_cache_add_active_or_unevictable(page, vma); page_add_new_anon_rmap(page, vma, address); set_pte_at(mm, address, page_table, entry); @@ -2597,7 +2593,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (anon) { inc_mm_counter(mm, anon_rss); SetPageSwapBacked(page); - lru_cache_add_active_or_unevictable(page, vma); page_add_new_anon_rmap(page, vma, address); } else { inc_mm_counter(mm, file_rss); @@ -2607,7 +2602,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, get_page(dirty_page); } } -//TODO: is this safe? do_anonymous_page() does it this way. set_pte_at(mm, address, page_table, entry); /* no need to invalidate: a not-present page won't be cached */ diff --git a/mm/rmap.c b/mm/rmap.c index f01e92244c53..10da68202b10 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -671,9 +672,13 @@ void page_add_anon_rmap(struct page *page, void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { - BUG_ON(address < vma->vm_start || address >= vma->vm_end); + VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ __page_set_anon_rmap(page, vma, address); + if (page_evictable(page, vma)) + lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page)); + else + add_page_to_unevictable_list(page); } /** diff --git a/mm/swap.c b/mm/swap.c index 21a566fc4570..ff0b290475fd 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -246,25 +246,6 @@ void add_page_to_unevictable_list(struct page *page) spin_unlock_irq(&zone->lru_lock); } -/** - * lru_cache_add_active_or_unevictable - * @page: the page to be added to LRU - * @vma: vma in which page is mapped for determining reclaimability - * - * place @page on active or unevictable LRU list, depending on - * page_evictable(). Note that if the page is not evictable, - * it goes directly back onto it's zone's unevictable list. It does - * NOT use a per cpu pagevec. - */ -void lru_cache_add_active_or_unevictable(struct page *page, - struct vm_area_struct *vma) -{ - if (page_evictable(page, vma)) - lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page)); - else - add_page_to_unevictable_list(page); -} - /* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been -- cgit v1.2.2 From 2afd1c928f1132b8d0099866e75ce8ad713a1180 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:26 -0800 Subject: mm: make page_lock_anon_vma() static page_lock_anon_vma() and page_unlock_anon_vma() were made available to show_page_path() in vmscan.c; but now that has been removed, make them static in rmap.c again, they're better kept private if possible. Signed-off-by: Hugh Dickins Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index 10da68202b10..892e1877366b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -193,7 +193,7 @@ void __init anon_vma_init(void) * Getting a lock on a stable anon_vma from a page off the LRU is * tricky: page_lock_anon_vma rely on RCU to guard against the races. */ -struct anon_vma *page_lock_anon_vma(struct page *page) +static struct anon_vma *page_lock_anon_vma(struct page *page) { struct anon_vma *anon_vma; unsigned long anon_mapping; @@ -213,7 +213,7 @@ out: return NULL; } -void page_unlock_anon_vma(struct anon_vma *anon_vma) +static void page_unlock_anon_vma(struct anon_vma *anon_vma) { spin_unlock(&anon_vma->lock); rcu_read_unlock(); -- cgit v1.2.2 From cbf84b7add8103b92aaa84928e335df726bfc8da Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:27 -0800 Subject: mm: further cleanup page_add_new_anon_rmap Moving lru_cache_add_active_or_unevictable() into page_add_new_anon_rmap() was good but stupid: we can and should SetPageSwapBacked() there too; and we know for sure that this anonymous, swap-backed page is not file cache. Signed-off-by: Hugh Dickins Cc: Lee Schermerhorn Cc: Nick Piggin Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 3 --- mm/rmap.c | 6 +++--- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index a138c50dc39a..122d965e820f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1948,7 +1948,6 @@ gotten: * thread doing COW. */ ptep_clear_flush_notify(vma, address, page_table); - SetPageSwapBacked(new_page); page_add_new_anon_rmap(new_page, vma, address); set_pte_at(mm, address, page_table, entry); update_mmu_cache(vma, address, entry); @@ -2444,7 +2443,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (!pte_none(*page_table)) goto release; inc_mm_counter(mm, anon_rss); - SetPageSwapBacked(page); page_add_new_anon_rmap(page, vma, address); set_pte_at(mm, address, page_table, entry); @@ -2592,7 +2590,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (anon) { inc_mm_counter(mm, anon_rss); - SetPageSwapBacked(page); page_add_new_anon_rmap(page, vma, address); } else { inc_mm_counter(mm, file_rss); diff --git a/mm/rmap.c b/mm/rmap.c index 892e1877366b..b1770b11a571 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -673,10 +672,11 @@ void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); - atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ + SetPageSwapBacked(page); + atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ __page_set_anon_rmap(page, vma, address); if (page_evictable(page, vma)) - lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page)); + lru_cache_add_lru(page, LRU_ACTIVE_ANON); else add_page_to_unevictable_list(page); } -- cgit v1.2.2 From 58a01a45721bf7bd3a41a86248c3cb02a6b0c501 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 6 Jan 2009 14:39:28 -0800 Subject: mm/page_alloc.c: eliminate NULL test and memset after alloc_bootmem As noted by Akinobu Mita in patch b1fceac2b9e04d278316b2faddf276015fc06e3b, alloc_bootmem and related functions never return NULL and always return a zeroed region of memory. Thus a NULL test or memset after calls to these functions is unnecessary. This was fixed using the following semantic patch. (http://www.emn.fr/x-info/coccinelle/) // @@ expression E; statement S; @@ E = \(alloc_bootmem\|alloc_bootmem_low\|alloc_bootmem_pages\|alloc_bootmem_low_pages\|alloc_bootmem_node\|alloc_bootmem_low_pages_node\|alloc_bootmem_pages_node\)(...) ... when != E ( - BUG_ON (E == NULL); | - if (E == NULL) S ) @@ expression E,E1; @@ E = \(alloc_bootmem\|alloc_bootmem_low\|alloc_bootmem_pages\|alloc_bootmem_low_pages\|alloc_bootmem_node\|alloc_bootmem_low_pages_node\|alloc_bootmem_pages_node\)(...) ... when != E - memset(E,0,E1); // Signed-off-by: Julia Lawall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2f644c3e3da3..ce2a026219bc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3381,10 +3381,8 @@ static void __init setup_usemap(struct pglist_data *pgdat, { unsigned long usemapsize = usemap_size(zonesize); zone->pageblock_flags = NULL; - if (usemapsize) { + if (usemapsize) zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); - memset(zone->pageblock_flags, 0, usemapsize); - } } #else static void inline setup_usemap(struct pglist_data *pgdat, -- cgit v1.2.2 From 364aeb2849789b51bf4b9af2ddd02fee7285c54e Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 6 Jan 2009 14:39:29 -0800 Subject: mm: change dirty limit type specifiers to unsigned long The background dirty and dirty limits are better defined with type specifiers of unsigned long since negative writeback thresholds are not possible. These values, as returned by get_dirty_limits(), are normally compared with ZVC values to determine whether writeback shall commence or be throttled. Such page counts cannot be negative, so declaring the page limits as signed is unnecessary. Acked-by: Peter Zijlstra Cc: Dave Chinner Cc: Christoph Lameter Signed-off-by: David Rientjes Cc: Andrea Righi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/backing-dev.c | 6 +++--- mm/page-writeback.c | 22 +++++++++++----------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 801c08b046e6..6f80beddd8a4 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -24,9 +24,9 @@ static void bdi_debug_init(void) static int bdi_debug_stats_show(struct seq_file *m, void *v) { struct backing_dev_info *bdi = m->private; - long background_thresh; - long dirty_thresh; - long bdi_thresh; + unsigned long background_thresh; + unsigned long dirty_thresh; + unsigned long bdi_thresh; get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 08d2b960b294..4d4074cff300 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -362,13 +362,13 @@ unsigned long determine_dirtyable_memory(void) } void -get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty, - struct backing_dev_info *bdi) +get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, + unsigned long *pbdi_dirty, struct backing_dev_info *bdi) { int background_ratio; /* Percentages */ int dirty_ratio; - long background; - long dirty; + unsigned long background; + unsigned long dirty; unsigned long available_memory = determine_dirtyable_memory(); struct task_struct *tsk; @@ -423,9 +423,9 @@ static void balance_dirty_pages(struct address_space *mapping) { long nr_reclaimable, bdi_nr_reclaimable; long nr_writeback, bdi_nr_writeback; - long background_thresh; - long dirty_thresh; - long bdi_thresh; + unsigned long background_thresh; + unsigned long dirty_thresh; + unsigned long bdi_thresh; unsigned long pages_written = 0; unsigned long write_chunk = sync_writeback_pages(); @@ -580,8 +580,8 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); void throttle_vm_writeout(gfp_t gfp_mask) { - long background_thresh; - long dirty_thresh; + unsigned long background_thresh; + unsigned long dirty_thresh; for ( ; ; ) { get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); @@ -624,8 +624,8 @@ static void background_writeout(unsigned long _min_pages) }; for ( ; ; ) { - long background_thresh; - long dirty_thresh; + unsigned long background_thresh; + unsigned long dirty_thresh; get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); if (global_page_state(NR_FILE_DIRTY) + -- cgit v1.2.2 From 2da02997e08d3efe8174c7a47696e6f7cbe69ba9 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 6 Jan 2009 14:39:31 -0800 Subject: mm: add dirty_background_bytes and dirty_bytes sysctls This change introduces two new sysctls to /proc/sys/vm: dirty_background_bytes and dirty_bytes. dirty_background_bytes is the counterpart to dirty_background_ratio and dirty_bytes is the counterpart to dirty_ratio. With growing memory capacities of individual machines, it's no longer sufficient to specify dirty thresholds as a percentage of the amount of dirtyable memory over the entire system. dirty_background_bytes and dirty_bytes specify quantities of memory, in bytes, that represent the dirty limits for the entire system. If either of these values is set, its value represents the amount of dirty memory that is needed to commence either background or direct writeback. When a `bytes' or `ratio' file is written, its counterpart becomes a function of the written value. For example, if dirty_bytes is written to be 8096, 8K of memory is required to commence direct writeback. dirty_ratio is then functionally equivalent to 8K / the amount of dirtyable memory: dirtyable_memory = free pages + mapped pages + file cache dirty_background_bytes = dirty_background_ratio * dirtyable_memory -or- dirty_background_ratio = dirty_background_bytes / dirtyable_memory AND dirty_bytes = dirty_ratio * dirtyable_memory -or- dirty_ratio = dirty_bytes / dirtyable_memory Only one of dirty_background_bytes and dirty_background_ratio may be specified at a time, and only one of dirty_bytes and dirty_ratio may be specified. When one sysctl is written, the other appears as 0 when read. The `bytes' files operate on a page size granularity since dirty limits are compared with ZVC values, which are in page units. Prior to this change, the minimum dirty_ratio was 5 as implemented by get_dirty_limits() although /proc/sys/vm/dirty_ratio would show any user written value between 0 and 100. This restriction is maintained, but dirty_bytes has a lower limit of only one page. Also prior to this change, the dirty_background_ratio could not equal or exceed dirty_ratio. This restriction is maintained in addition to restricting dirty_background_bytes. If either background threshold equals or exceeds that of the dirty threshold, it is implicitly set to half the dirty threshold. Acked-by: Peter Zijlstra Cc: Dave Chinner Cc: Christoph Lameter Signed-off-by: David Rientjes Cc: Andrea Righi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 102 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 86 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4d4074cff300..b493db7841dc 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -68,6 +68,12 @@ static inline long sync_writeback_pages(void) */ int dirty_background_ratio = 5; +/* + * dirty_background_bytes starts at 0 (disabled) so that it is a function of + * dirty_background_ratio * the amount of dirtyable memory + */ +unsigned long dirty_background_bytes; + /* * free highmem will not be subtracted from the total free memory * for calculating free ratios if vm_highmem_is_dirtyable is true @@ -79,6 +85,12 @@ int vm_highmem_is_dirtyable; */ int vm_dirty_ratio = 10; +/* + * vm_dirty_bytes starts at 0 (disabled) so that it is a function of + * vm_dirty_ratio * the amount of dirtyable memory + */ +unsigned long vm_dirty_bytes; + /* * The interval between `kupdate'-style writebacks, in jiffies */ @@ -135,23 +147,75 @@ static int calc_period_shift(void) { unsigned long dirty_total; - dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / 100; + if (vm_dirty_bytes) + dirty_total = vm_dirty_bytes / PAGE_SIZE; + else + dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / + 100; return 2 + ilog2(dirty_total - 1); } /* - * update the period when the dirty ratio changes. + * update the period when the dirty threshold changes. */ +static void update_completion_period(void) +{ + int shift = calc_period_shift(); + prop_change_shift(&vm_completions, shift); + prop_change_shift(&vm_dirties, shift); +} + +int dirty_background_ratio_handler(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); + if (ret == 0 && write) + dirty_background_bytes = 0; + return ret; +} + +int dirty_background_bytes_handler(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); + if (ret == 0 && write) + dirty_background_ratio = 0; + return ret; +} + int dirty_ratio_handler(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { int old_ratio = vm_dirty_ratio; - int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); + int ret; + + ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_ratio != old_ratio) { - int shift = calc_period_shift(); - prop_change_shift(&vm_completions, shift); - prop_change_shift(&vm_dirties, shift); + update_completion_period(); + vm_dirty_bytes = 0; + } + return ret; +} + + +int dirty_bytes_handler(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int old_bytes = vm_dirty_bytes; + int ret; + + ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); + if (ret == 0 && write && vm_dirty_bytes != old_bytes) { + update_completion_period(); + vm_dirty_ratio = 0; } return ret; } @@ -365,23 +429,29 @@ void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, unsigned long *pbdi_dirty, struct backing_dev_info *bdi) { - int background_ratio; /* Percentages */ - int dirty_ratio; unsigned long background; unsigned long dirty; unsigned long available_memory = determine_dirtyable_memory(); struct task_struct *tsk; - dirty_ratio = vm_dirty_ratio; - if (dirty_ratio < 5) - dirty_ratio = 5; + if (vm_dirty_bytes) + dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); + else { + int dirty_ratio; - background_ratio = dirty_background_ratio; - if (background_ratio >= dirty_ratio) - background_ratio = dirty_ratio / 2; + dirty_ratio = vm_dirty_ratio; + if (dirty_ratio < 5) + dirty_ratio = 5; + dirty = (dirty_ratio * available_memory) / 100; + } + + if (dirty_background_bytes) + background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); + else + background = (dirty_background_ratio * available_memory) / 100; - background = (background_ratio * available_memory) / 100; - dirty = (dirty_ratio * available_memory) / 100; + if (background >= dirty) + background = dirty / 2; tsk = current; if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { background += background / 4; -- cgit v1.2.2 From 878b63ac889df706d01048f2c110e322ad2f996d Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:32 -0800 Subject: mm: gup persist for write permission do_wp_page()'s VM_FAULT_WRITE return value tells __get_user_pages() that COW has been done if necessary, though it may be leaving the pte without write permission - for the odd case of forced writing to a readonly vma for ptrace. At present GUP then retries the follow_page() without asking for write permission, to escape an endless loop when forced. But an application may be relying on GUP to guarantee a writable page which won't be COWed again when written from userspace, whereas a race here might leave a readonly pte in place? Change the VM_FAULT_WRITE handling to ask follow_page() for write permission again, except in that odd case of forced writing to a readonly vma. Signed-off-by: Hugh Dickins Cc: Lee Schermerhorn Cc: Rik van Riel Cc: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: Robin Holt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 122d965e820f..f594bb65a9f1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1264,9 +1264,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, * do_wp_page has broken COW when necessary, * even if maybe_mkwrite decided not to set * pte_write. We can thus safely do subsequent - * page lookups as if they were reads. + * page lookups as if they were reads. But only + * do so when looping for pte_write is futile: + * in some cases userspace may also be wanting + * to write to the gotten user page, which a + * read fault here might prevent (a readonly + * page might get reCOWed by userspace write). */ - if (ret & VM_FAULT_WRITE) + if ((ret & VM_FAULT_WRITE) && + !(vma->vm_flags & VM_WRITE)) foll_flags &= ~FOLL_WRITE; cond_resched(); -- cgit v1.2.2 From ab967d86015a19777955370deebc8262d50fed63 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:33 -0800 Subject: mm: wp lock page before deciding cow An application may rely on get_user_pages() to give it pages writable from userspace and shared with a driver, GUP breaking COW if necessary. It may mprotect() the pages' writability, off and on, from time to time. Normally this works fine (so long as the app does not fork); but just occasionally, under memory pressure, a readonly pte in a newly writable area is COWed unnecessarily, breaking the link with the driver: because do_wp_page() does trylock_page, and falls back to COW whenever that fails. For reliable behaviour in the unshared case, when the trylock_page fails, now unlock pagetable, lock page and relock pagetable, before deciding whether Copy-On-Write is really necessary. Reported-by: Zhou Yingchao Signed-off-by: Hugh Dickins Cc: Lee Schermerhorn Cc: Rik van Riel Cc: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: Robin Holt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index f594bb65a9f1..3922ffcf3dff 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1848,10 +1848,21 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, * not dirty accountable. */ if (PageAnon(old_page)) { - if (trylock_page(old_page)) { - reuse = can_share_swap_page(old_page); - unlock_page(old_page); + if (!trylock_page(old_page)) { + page_cache_get(old_page); + pte_unmap_unlock(page_table, ptl); + lock_page(old_page); + page_table = pte_offset_map_lock(mm, pmd, address, + &ptl); + if (!pte_same(*page_table, orig_pte)) { + unlock_page(old_page); + page_cache_release(old_page); + goto unlock; + } + page_cache_release(old_page); } + reuse = can_share_swap_page(old_page); + unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { /* -- cgit v1.2.2 From 7b1fe59793e61f826bef053107b57b23954833bb Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:34 -0800 Subject: mm: reuse_swap_page replaces can_share_swap_page A good place to free up old swap is where do_wp_page(), or do_swap_page(), is about to redirty the page: the data on disk is then stale and won't be read again; and if we do decide to write the page out later, using the previous swap location makes an unnecessary disk seek very likely. So give can_share_swap_page() the side-effect of delete_from_swap_cache() when it safely can. And can_share_swap_page() was always a misleading name, the more so if it has a side-effect: rename it reuse_swap_page(). Irrelevant cleanup nearby: remove swap_token_default_timeout definition from swap.h: it's used nowhere. Signed-off-by: Hugh Dickins Cc: Lee Schermerhorn Acked-by: Rik van Riel Cc: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: Robin Holt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 4 ++-- mm/swapfile.c | 15 +++++++++++---- 2 files changed, 13 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 3922ffcf3dff..8f471edcb985 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1861,7 +1861,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, } page_cache_release(old_page); } - reuse = can_share_swap_page(old_page); + reuse = reuse_swap_page(old_page); unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { @@ -2392,7 +2392,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, inc_mm_counter(mm, anon_rss); pte = mk_pte(page, vma->vm_page_prot); - if (write_access && can_share_swap_page(page)) { + if (write_access && reuse_swap_page(page)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); write_access = 0; } diff --git a/mm/swapfile.c b/mm/swapfile.c index 214e90b94946..bfd4ee59cb88 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -326,17 +326,24 @@ static inline int page_swapcount(struct page *page) } /* - * We can use this swap cache entry directly - * if there are no other references to it. + * We can write to an anon page without COW if there are no other references + * to it. And as a side-effect, free up its swap: because the old content + * on disk will never be read, and seeking back there to write new content + * later would only waste time away from clustering. */ -int can_share_swap_page(struct page *page) +int reuse_swap_page(struct page *page) { int count; VM_BUG_ON(!PageLocked(page)); count = page_mapcount(page); - if (count <= 1 && PageSwapCache(page)) + if (count <= 1 && PageSwapCache(page)) { count += page_swapcount(page); + if (count == 1 && !PageWriteback(page)) { + delete_from_swap_cache(page); + SetPageDirty(page); + } + } return count == 1; } -- cgit v1.2.2 From a2c43eed8334e878702fca713b212ae2a11d84b9 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:36 -0800 Subject: mm: try_to_free_swap replaces remove_exclusive_swap_page remove_exclusive_swap_page(): its problem is in living up to its name. It doesn't matter if someone else has a reference to the page (raised page_count); it doesn't matter if the page is mapped into userspace (raised page_mapcount - though that hints it may be worth keeping the swap): all that matters is that there be no more references to the swap (and no writeback in progress). swapoff (try_to_unuse) has been removing pages from swapcache for years, with no concern for page count or page mapcount, and we used to have a comment in lookup_swap_cache() recognizing that: if you go for a page of swapcache, you'll get the right page, but it could have been removed from swapcache by the time you get page lock. So, give up asking for exclusivity: get rid of remove_exclusive_swap_page(), and remove_exclusive_swap_page_ref() and remove_exclusive_swap_page_count() which were spawned for the recent LRU work: replace them by the simpler try_to_free_swap() which just checks page_swapcount(). Similarly, remove the page_count limitation from free_swap_and_count(), but assume that it's worth holding on to the swap if page is mapped and swap nowhere near full. Add a vm_swap_full() test in free_swap_cache()? It would be consistent, but I think we probably have enough for now. Signed-off-by: Hugh Dickins Cc: Lee Schermerhorn Cc: Rik van Riel Cc: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: Robin Holt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- mm/page_io.c | 2 +- mm/swap.c | 3 +-- mm/swap_state.c | 8 +++---- mm/swapfile.c | 70 ++++++++++----------------------------------------------- mm/vmscan.c | 2 +- 6 files changed, 20 insertions(+), 67 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 8f471edcb985..1a83fe5339a9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2403,7 +2403,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, swap_free(entry); if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) - remove_exclusive_swap_page(page); + try_to_free_swap(page); unlock_page(page); if (write_access) { diff --git a/mm/page_io.c b/mm/page_io.c index d277a80efa71..dc6ce0afbded 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -98,7 +98,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) struct bio *bio; int ret = 0, rw = WRITE; - if (remove_exclusive_swap_page(page)) { + if (try_to_free_swap(page)) { unlock_page(page); goto out; } diff --git a/mm/swap.c b/mm/swap.c index ff0b290475fd..ba2c0e8b8b54 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -454,8 +454,7 @@ void pagevec_swap_free(struct pagevec *pvec) struct page *page = pvec->pages[i]; if (PageSwapCache(page) && trylock_page(page)) { - if (PageSwapCache(page)) - remove_exclusive_swap_page_ref(page); + try_to_free_swap(page); unlock_page(page); } } diff --git a/mm/swap_state.c b/mm/swap_state.c index e793fdea275d..bcb472769299 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -195,14 +195,14 @@ void delete_from_swap_cache(struct page *page) * If we are the only user, then try to free up the swap cache. * * Its ok to check for PageSwapCache without the page lock - * here because we are going to recheck again inside - * exclusive_swap_page() _with_ the lock. + * here because we are going to recheck again inside + * try_to_free_swap() _with_ the lock. * - Marcelo */ static inline void free_swap_cache(struct page *page) { - if (PageSwapCache(page) && trylock_page(page)) { - remove_exclusive_swap_page(page); + if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { + try_to_free_swap(page); unlock_page(page); } } diff --git a/mm/swapfile.c b/mm/swapfile.c index bfd4ee59cb88..f43601827607 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -348,68 +348,23 @@ int reuse_swap_page(struct page *page) } /* - * Work out if there are any other processes sharing this - * swap cache page. Free it if you can. Return success. + * If swap is getting full, or if there are no more mappings of this page, + * then try_to_free_swap is called to free its swap space. */ -static int remove_exclusive_swap_page_count(struct page *page, int count) +int try_to_free_swap(struct page *page) { - int retval; - struct swap_info_struct * p; - swp_entry_t entry; - VM_BUG_ON(!PageLocked(page)); if (!PageSwapCache(page)) return 0; if (PageWriteback(page)) return 0; - if (page_count(page) != count) /* us + cache + ptes */ - return 0; - - entry.val = page_private(page); - p = swap_info_get(entry); - if (!p) + if (page_swapcount(page)) return 0; - /* Is the only swap cache user the cache itself? */ - retval = 0; - if (p->swap_map[swp_offset(entry)] == 1) { - /* Recheck the page count with the swapcache lock held.. */ - spin_lock_irq(&swapper_space.tree_lock); - if ((page_count(page) == count) && !PageWriteback(page)) { - __delete_from_swap_cache(page); - SetPageDirty(page); - retval = 1; - } - spin_unlock_irq(&swapper_space.tree_lock); - } - spin_unlock(&swap_lock); - - if (retval) { - swap_free(entry); - page_cache_release(page); - } - - return retval; -} - -/* - * Most of the time the page should have two references: one for the - * process and one for the swap cache. - */ -int remove_exclusive_swap_page(struct page *page) -{ - return remove_exclusive_swap_page_count(page, 2); -} - -/* - * The pageout code holds an extra reference to the page. That raises - * the reference count to test for to 2 for a page that is only in the - * swap cache plus 1 for each process that maps the page. - */ -int remove_exclusive_swap_page_ref(struct page *page) -{ - return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page)); + delete_from_swap_cache(page); + SetPageDirty(page); + return 1; } /* @@ -436,13 +391,12 @@ void free_swap_and_cache(swp_entry_t entry) spin_unlock(&swap_lock); } if (page) { - int one_user; - - one_user = (page_count(page) == 2); - /* Only cache user (+us), or swap space full? Free it! */ - /* Also recheck PageSwapCache after page is locked (above) */ + /* + * Not mapped elsewhere, or swap space full? Free it! + * Also recheck PageSwapCache now page is locked (above). + */ if (PageSwapCache(page) && !PageWriteback(page) && - (one_user || vm_swap_full())) { + (!page_mapped(page) || vm_swap_full())) { delete_from_swap_cache(page); SetPageDirty(page); } diff --git a/mm/vmscan.c b/mm/vmscan.c index d196f46c8808..c8601dd36603 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -759,7 +759,7 @@ cull_mlocked: activate_locked: /* Not a candidate for swapping, so reclaim swap space. */ if (PageSwapCache(page) && vm_swap_full()) - remove_exclusive_swap_page_ref(page); + try_to_free_swap(page); VM_BUG_ON(PageActive(page)); SetPageActive(page); pgactivate++; -- cgit v1.2.2 From 68bdc8d64742ccc5e340c5d122ebbab3f0cf2a74 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:37 -0800 Subject: mm: try_to_unuse check removing right swap There's a possible race in try_to_unuse() which Nick Piggin led me to two years ago. Where it does lock_page() after read_swap_cache_async(), what if another task removed that page from swapcache just before we locked it? It would sail though the (*swap_map > 1) tests doing nothing (because it could not have been removed from swapcache before its swap references were gone), until it reaches the delete_from_swap_cache(page) near the bottom. Now imagine that this page has been allocated to swap on a different swap area while we dropped page lock (perhaps at the top, perhaps in unuse_mm): we could wrongly remove from swap cache before the page has been written to swap, so a subsequent do_swap_page() would read in stale data from swap. I think this case could not happen before: remove_exclusive_swap_page() refused while page count was raised. But now with reuse_swap_page() and try_to_free_swap() removing from swap cache without minding page count, I think it could happen - the previous patch argued that it was safe because try_to_unuse() already ignored page count, but overlooked that it might be breaking the assumptions in try_to_unuse() itself. Signed-off-by: Hugh Dickins Cc: Lee Schermerhorn Cc: Rik van Riel Cc: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: Robin Holt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index f43601827607..9ce7f81c8abc 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -889,7 +889,16 @@ static int try_to_unuse(unsigned int type) lock_page(page); wait_on_page_writeback(page); } - if (PageSwapCache(page)) + + /* + * It is conceivable that a racing task removed this page from + * swap cache just before we acquired the page lock at the top, + * or while we dropped it in unuse_mm(). The page might even + * be back in swap cache on another swap area: that we must not + * delete, since it may not have been written out to swap yet. + */ + if (PageSwapCache(page) && + likely(page_private(page) == entry.val)) delete_from_swap_cache(page); /* -- cgit v1.2.2 From 63d6c5ad7fc27455ce5cb4706884671fb7e0df08 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:38 -0800 Subject: mm: remove try_to_munlock from vmscan An unfortunate feature of the Unevictable LRU work was that reclaiming an anonymous page involved an extra scan through the anon_vma: to check that the page is evictable before allocating swap, because the swap could not be freed reliably soon afterwards. Now try_to_free_swap() has replaced remove_exclusive_swap_page(), that's not an issue any more: remove try_to_munlock() call from shrink_page_list(), leaving it to try_to_munmap() to discover if the page is one to be culled to the unevictable list - in which case then try_to_free_swap(). Update unevictable-lru.txt to remove comments on the try_to_munlock() in shrink_page_list(), and shorten some lines over 80 columns. Signed-off-by: Hugh Dickins Cc: Lee Schermerhorn Acked-by: Rik van Riel Cc: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: Robin Holt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index c8601dd36603..74f875733e2b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -625,15 +625,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (PageAnon(page) && !PageSwapCache(page)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; - switch (try_to_munlock(page)) { - case SWAP_FAIL: /* shouldn't happen */ - case SWAP_AGAIN: - goto keep_locked; - case SWAP_MLOCK: - goto cull_mlocked; - case SWAP_SUCCESS: - ; /* fall thru'; add to swap cache */ - } if (!add_to_swap(page, GFP_ATOMIC)) goto activate_locked; may_enter_fs = 1; @@ -752,6 +743,8 @@ free_it: continue; cull_mlocked: + if (PageSwapCache(page)) + try_to_free_swap(page); unlock_page(page); putback_lru_page(page); continue; -- cgit v1.2.2 From ac47b003d03c2a4f28aef1d505b66d24ad191c4f Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:39 -0800 Subject: mm: remove gfp_mask from add_to_swap Remove gfp_mask argument from add_to_swap(): it's misleading because its only caller, shrink_page_list(), is not atomic at that point; and in due course (implementing discard) we'll sometimes want to allocate some memory with GFP_NOIO (as is used in swap_writepage) when allocating swap. No change to the gfp_mask passed down to add_to_swap_cache(): still use __GFP_HIGH without __GFP_WAIT (with nomemalloc and nowarn as before): though it's not obvious if that's the best combination to ask for here. Signed-off-by: Hugh Dickins Cc: Lee Schermerhorn Cc: Rik van Riel Cc: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: Robin Holt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swap_state.c | 4 ++-- mm/vmscan.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/swap_state.c b/mm/swap_state.c index bcb472769299..81c825f67a7f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -128,7 +128,7 @@ void __delete_from_swap_cache(struct page *page) * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */ -int add_to_swap(struct page * page, gfp_t gfp_mask) +int add_to_swap(struct page *page) { swp_entry_t entry; int err; @@ -153,7 +153,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask) * Add it to the swap cache and mark it dirty */ err = add_to_swap_cache(page, entry, - gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN); + __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); switch (err) { case 0: /* Success */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 74f875733e2b..cc7401571cba 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -625,7 +625,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (PageAnon(page) && !PageSwapCache(page)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; - if (!add_to_swap(page, GFP_ATOMIC)) + if (!add_to_swap(page)) goto activate_locked; may_enter_fs = 1; } -- cgit v1.2.2 From 60371d971a3d01afd102f0bbf2681f32ecc31d78 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:40 -0800 Subject: mm: add add_to_swap stub If we add a failing stub for add_to_swap(), then we can remove the #ifdef CONFIG_SWAP from mm/vmscan.c. This was intended as a source cleanup, but looking more closely, it turns out that the !CONFIG_SWAP case was going to keep_locked for an anonymous page, whereas now it goes to the more suitable activate_locked, like the CONFIG_SWAP nr_swap_pages 0 case. Signed-off-by: Hugh Dickins Cc: Lee Schermerhorn Acked-by: Rik van Riel Cc: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: Robin Holt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index cc7401571cba..f350523a8eee 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -617,7 +617,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, referenced && page_mapping_inuse(page)) goto activate_locked; -#ifdef CONFIG_SWAP /* * Anonymous process memory has backing store? * Try to allocate it some swap space here. @@ -629,7 +628,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, goto activate_locked; may_enter_fs = 1; } -#endif /* CONFIG_SWAP */ mapping = page_mapping(page); -- cgit v1.2.2 From b962716b459505a8d83aea313fea0abe76749f42 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:41 -0800 Subject: mm: optimize get_scan_ratio for no swap Rik suggests a simplified get_scan_ratio() for !CONFIG_SWAP. Yes, the gcc optimizer gives us that, when nr_swap_pages is #defined as 0L. Move usual declaration to swapfile.c: it never belonged in page_alloc.c. Signed-off-by: Hugh Dickins Cc: Lee Schermerhorn Acked-by: Rik van Riel Cc: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: Robin Holt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 1 - mm/swapfile.c | 1 + mm/vmscan.c | 12 ++++++------ 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ce2a026219bc..65133436fe3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -69,7 +69,6 @@ EXPORT_SYMBOL(node_states); unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; -long nr_swap_pages; int percpu_pagelist_fraction; #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE diff --git a/mm/swapfile.c b/mm/swapfile.c index 9ce7f81c8abc..725e56c362de 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -35,6 +35,7 @@ static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; +long nr_swap_pages; long total_swap_pages; static int swap_overflow; static int least_priority; diff --git a/mm/vmscan.c b/mm/vmscan.c index f350523a8eee..d500b906746c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1327,12 +1327,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, unsigned long anon_prio, file_prio; unsigned long ap, fp; - anon = zone_page_state(zone, NR_ACTIVE_ANON) + - zone_page_state(zone, NR_INACTIVE_ANON); - file = zone_page_state(zone, NR_ACTIVE_FILE) + - zone_page_state(zone, NR_INACTIVE_FILE); - free = zone_page_state(zone, NR_FREE_PAGES); - /* If we have no swap space, do not bother scanning anon pages. */ if (nr_swap_pages <= 0) { percent[0] = 0; @@ -1340,6 +1334,12 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, return; } + anon = zone_page_state(zone, NR_ACTIVE_ANON) + + zone_page_state(zone, NR_INACTIVE_ANON); + file = zone_page_state(zone, NR_ACTIVE_FILE) + + zone_page_state(zone, NR_INACTIVE_FILE); + free = zone_page_state(zone, NR_FREE_PAGES); + /* If we have very few page cache pages, force-scan anon pages. */ if (unlikely(file + free <= zone->pages_high)) { percent[0] = 100; -- cgit v1.2.2 From 077cbc5864cd9188fa4c4e181e48ff58317e6400 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:39:42 -0800 Subject: memcg: reclaim shouldn't change zone->recent_rotated statistics memcg reclaim shouldn't change zone->recent_rotated statistics. If memcgroup reclaim changes zone statistics, global reclaim can get a bit confused. Signed-off-by: KOSAKI Motohiro Acked-by: Rik van Riel Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index d500b906746c..da7c3a2304a7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1246,7 +1246,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * This helps balance scan pressure between file and anonymous * pages in get_scan_ratio. */ - zone->recent_rotated[!!file] += pgmoved; + if (scan_global_lru(sc)) + zone->recent_rotated[!!file] += pgmoved; /* * Move the pages to the [file or anon] inactive list. -- cgit v1.2.2 From feb166948876e2ff8f70b2da273b2a8e86957578 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:39:43 -0800 Subject: mm: make init_section_page_cgroup() static Sparse output following warning. mm/page_cgroup.c:100:15: warning: symbol 'init_section_page_cgroup' was not declared. Should it be static? cleanup here. Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_cgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index ab27ff750519..d6507a660ed6 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -101,7 +101,7 @@ struct page_cgroup *lookup_page_cgroup(struct page *page) } /* __alloc_bootmem...() is protected by !slab_available() */ -int __init_refok init_section_page_cgroup(unsigned long pfn) +static int __init_refok init_section_page_cgroup(unsigned long pfn) { struct mem_section *section; struct page_cgroup *base, *pc; -- cgit v1.2.2 From 2bc7273b0e3a509fb598abfc5b9fe50158b830d2 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:39:43 -0800 Subject: mm: make maddr __iomem sparse output following warnings. mm/memory.c:2936:8: warning: incorrect type in assignment (different address spaces) mm/memory.c:2936:8: expected void *maddr mm/memory.c:2936:8: got void [noderef] cleanup here. Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 1a83fe5339a9..89339c61f8e5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2966,7 +2966,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, { resource_size_t phys_addr; unsigned long prot = 0; - void *maddr; + void __iomem *maddr; int offset = addr & (PAGE_SIZE-1); if (follow_phys(vma, addr, write, &prot, &phys_addr)) -- cgit v1.2.2 From d38d2a7582012ecf53aac33683ca5c689093cf65 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:39:44 -0800 Subject: mm: make mem_cgroup_resize_limit() static Sparse output following warnings. mm/memcontrol.c:782:5: warning: symbol 'mem_cgroup_resize_limit' was not declared. Should it be static? cleanup here. Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 866dcc7eeb0c..51ee96545579 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -779,7 +779,8 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) return 0; } -int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val) +static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, + unsigned long long val) { int retry_count = MEM_CGROUP_RECLAIM_RETRIES; -- cgit v1.2.2 From ff30153bf9647c8646538810d4c01015a5e44787 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:39:44 -0800 Subject: mm: make scan_all_zones_unevictable_pages() static sparse output following warning. mm/vmscan.c:2549:6: warning: symbol 'scan_all_zones_unevictable_pages' was not declared. Should it be static? cleanup here. Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index da7c3a2304a7..062767d159da 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2506,7 +2506,7 @@ void scan_zone_unevictable_pages(struct zone *zone) * that has possibly/probably made some previously unevictable pages * evictable. */ -void scan_all_zones_unevictable_pages(void) +static void scan_all_zones_unevictable_pages(void) { struct zone *zone; -- cgit v1.2.2 From 14b90b22ec0f359ef4791033ab386b2b627bae07 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:39:45 -0800 Subject: mm: make scan_zone_unevictable_pages() static sparse output following warning mm/vmscan.c:2507:6: warning: symbol 'scan_zone_unevictable_pages' was not declared. Should it be static? cleanup here. Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 062767d159da..1ef5a2eeb298 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2464,7 +2464,7 @@ void scan_mapping_unevictable_pages(struct address_space *mapping) * back onto @zone's unevictable list. */ #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */ -void scan_zone_unevictable_pages(struct zone *zone) +static void scan_zone_unevictable_pages(struct zone *zone) { struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list; unsigned long scan; -- cgit v1.2.2 From efab81864161f8c546d4403873e7ae7831ed5b26 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:39:46 -0800 Subject: mm: make setup_per_zone_inactive_ratio() static Sparse output following warning. mm/page_alloc.c:4301:6: warning: symbol 'setup_per_zone_inactive_ratio' was not declared. Should it be static? cleanup here. Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 65133436fe3d..31c512410a99 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4314,7 +4314,7 @@ void setup_per_zone_pages_min(void) * 1TB 101 10GB * 10TB 320 32GB */ -void setup_per_zone_inactive_ratio(void) +static void setup_per_zone_inactive_ratio(void) { struct zone *zone; -- cgit v1.2.2 From 73fd8748ab0b9b3ddd178bea1d7ae03372033d96 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:47 -0800 Subject: swapfile: swapon needs larger size type sys_swapon()'s swapfilesize (better renamed swapfilepages) is declared as an int, but should be an unsigned long like the maxpages it's compared against: on 64-bit (with 4kB pages) a swapfile of 2^44 bytes was rejected with "Swap area shorter than signature indicates". Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index 725e56c362de..e2adc8eb9317 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1461,7 +1461,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) int nr_extents = 0; sector_t span; unsigned long maxpages = 1; - int swapfilesize; + unsigned long swapfilepages; unsigned short *swap_map = NULL; struct page *page = NULL; struct inode *inode = NULL; @@ -1539,7 +1539,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) goto bad_swap; } - swapfilesize = i_size_read(inode) >> PAGE_SHIFT; + swapfilepages = i_size_read(inode) >> PAGE_SHIFT; /* * Read the swap header. @@ -1616,7 +1616,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) error = -EINVAL; if (!maxpages) goto bad_swap; - if (swapfilesize && maxpages > swapfilesize) { + if (swapfilepages && maxpages > swapfilepages) { printk(KERN_WARNING "Swap area shorter than signature indicates\n"); goto bad_swap; -- cgit v1.2.2 From 22c6f8fdb31993cf49bdd4a47b64a7002391e1c7 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:48 -0800 Subject: swapfile: remove SWP_ACTIVE mask Remove the SWP_ACTIVE mask: it just obscures the SWP_WRITEOK flag. Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index e2adc8eb9317..915cb3fc43d7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1222,7 +1222,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile) spin_lock(&swap_lock); for (type = swap_list.head; type >= 0; type = swap_info[type].next) { p = swap_info + type; - if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) { + if (p->flags & SWP_WRITEOK) { if (p->swap_file->f_mapping == mapping) break; } @@ -1674,7 +1674,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) else p->prio = --least_priority; p->swap_map = swap_map; - p->flags = SWP_ACTIVE; + p->flags |= SWP_WRITEOK; nr_swap_pages += nr_good_pages; total_swap_pages += nr_good_pages; -- cgit v1.2.2 From 886bb7e9c3ed0bb3e4a2b1f336d8c6a6e5a4b782 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:48 -0800 Subject: swapfile: remove surplus whitespace Remove trailing whitespace from swapfile.c, and odd swap_show() alignment. Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index 915cb3fc43d7..c46c83d6aab0 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -92,7 +92,7 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) unsigned long offset, last_in_cluster; int latency_ration = LATENCY_LIMIT; - /* + /* * We try to cluster swap pages by allocating them sequentially * in swap. Once we've allocated SWAPFILE_CLUSTER pages this * way, however, we resort to first-free allocation, starting @@ -269,7 +269,7 @@ bad_nofile: printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val); out: return NULL; -} +} static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) { @@ -736,10 +736,10 @@ static int try_to_unuse(unsigned int type) break; } - /* + /* * Get a page for the entry, using the existing swap * cache page if there is one. Otherwise, get a clean - * page and read the swap into it. + * page and read the swap into it. */ swap_map = &si->swap_map[i]; entry = swp_entry(type, i); @@ -1202,7 +1202,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile) char * pathname; int i, type, prev; int err; - + if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1395,12 +1395,12 @@ static int swap_show(struct seq_file *swap, void *v) file = ptr->swap_file; len = seq_path(swap, &file->f_path, " \t\n\\"); seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", - len < 40 ? 40 - len : 1, " ", - S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? + len < 40 ? 40 - len : 1, " ", + S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? "partition" : "file\t", - ptr->pages << (PAGE_SHIFT - 10), - ptr->inuse_pages << (PAGE_SHIFT - 10), - ptr->prio); + ptr->pages << (PAGE_SHIFT - 10), + ptr->inuse_pages << (PAGE_SHIFT - 10), + ptr->prio); return 0; } @@ -1565,7 +1565,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) error = -EINVAL; goto bad_swap; } - + switch (swap_header_version) { case 1: printk(KERN_ERR "version 0 swap is no longer supported. " -- cgit v1.2.2 From 81e33971271ec8603fe696731ff9967afb99e729 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:49 -0800 Subject: swapfile: remove v0 SWAP-SPACE message The kernel has not supported v0 SWAP-SPACE since 2.5.22: I think we can now safely drop its "version 0 swap is no longer supported" message - just say "Unable to find swap-space signature" as usual. This removes one level of indentation from a stretch of sys_swapon(). I'd have liked to be specific, saying "Unable to find SWAPSPACE2 signature", but it's just too confusing that the version 1 signature shows the number 2. Irrelevant nearby cleanup: kmap(page) already gives page_address(page). Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 146 ++++++++++++++++++++++++++-------------------------------- 1 file changed, 65 insertions(+), 81 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index c46c83d6aab0..85ff603385c3 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1456,7 +1456,6 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) int i, prev; int error; union swap_header *swap_header = NULL; - int swap_header_version; unsigned int nr_good_pages = 0; int nr_extents = 0; sector_t span; @@ -1553,101 +1552,86 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) error = PTR_ERR(page); goto bad_swap; } - kmap(page); - swap_header = page_address(page); + swap_header = kmap(page); - if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10)) - swap_header_version = 1; - else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10)) - swap_header_version = 2; - else { + if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { printk(KERN_ERR "Unable to find swap-space signature\n"); error = -EINVAL; goto bad_swap; } - switch (swap_header_version) { - case 1: - printk(KERN_ERR "version 0 swap is no longer supported. " - "Use mkswap -v1 %s\n", name); + /* swap partition endianess hack... */ + if (swab32(swap_header->info.version) == 1) { + swab32s(&swap_header->info.version); + swab32s(&swap_header->info.last_page); + swab32s(&swap_header->info.nr_badpages); + for (i = 0; i < swap_header->info.nr_badpages; i++) + swab32s(&swap_header->info.badpages[i]); + } + /* Check the swap header's sub-version */ + if (swap_header->info.version != 1) { + printk(KERN_WARNING + "Unable to handle swap header version %d\n", + swap_header->info.version); error = -EINVAL; goto bad_swap; - case 2: - /* swap partition endianess hack... */ - if (swab32(swap_header->info.version) == 1) { - swab32s(&swap_header->info.version); - swab32s(&swap_header->info.last_page); - swab32s(&swap_header->info.nr_badpages); - for (i = 0; i < swap_header->info.nr_badpages; i++) - swab32s(&swap_header->info.badpages[i]); - } - /* Check the swap header's sub-version and the size of - the swap file and bad block lists */ - if (swap_header->info.version != 1) { - printk(KERN_WARNING - "Unable to handle swap header version %d\n", - swap_header->info.version); - error = -EINVAL; - goto bad_swap; - } + } - p->lowest_bit = 1; - p->cluster_next = 1; + p->lowest_bit = 1; + p->cluster_next = 1; - /* - * Find out how many pages are allowed for a single swap - * device. There are two limiting factors: 1) the number of - * bits for the swap offset in the swp_entry_t type and - * 2) the number of bits in the a swap pte as defined by - * the different architectures. In order to find the - * largest possible bit mask a swap entry with swap type 0 - * and swap offset ~0UL is created, encoded to a swap pte, - * decoded to a swp_entry_t again and finally the swap - * offset is extracted. This will mask all the bits from - * the initial ~0UL mask that can't be encoded in either - * the swp_entry_t or the architecture definition of a - * swap pte. - */ - maxpages = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0,~0UL)))) - 1; - if (maxpages > swap_header->info.last_page) - maxpages = swap_header->info.last_page; - p->highest_bit = maxpages - 1; + /* + * Find out how many pages are allowed for a single swap + * device. There are two limiting factors: 1) the number of + * bits for the swap offset in the swp_entry_t type and + * 2) the number of bits in the a swap pte as defined by + * the different architectures. In order to find the + * largest possible bit mask a swap entry with swap type 0 + * and swap offset ~0UL is created, encoded to a swap pte, + * decoded to a swp_entry_t again and finally the swap + * offset is extracted. This will mask all the bits from + * the initial ~0UL mask that can't be encoded in either + * the swp_entry_t or the architecture definition of a + * swap pte. + */ + maxpages = swp_offset(pte_to_swp_entry( + swp_entry_to_pte(swp_entry(0, ~0UL)))) - 1; + if (maxpages > swap_header->info.last_page) + maxpages = swap_header->info.last_page; + p->highest_bit = maxpages - 1; - error = -EINVAL; - if (!maxpages) - goto bad_swap; - if (swapfilepages && maxpages > swapfilepages) { - printk(KERN_WARNING - "Swap area shorter than signature indicates\n"); - goto bad_swap; - } - if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) - goto bad_swap; - if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) - goto bad_swap; + error = -EINVAL; + if (!maxpages) + goto bad_swap; + if (swapfilepages && maxpages > swapfilepages) { + printk(KERN_WARNING + "Swap area shorter than signature indicates\n"); + goto bad_swap; + } + if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) + goto bad_swap; + if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) + goto bad_swap; - /* OK, set up the swap map and apply the bad block list */ - swap_map = vmalloc(maxpages * sizeof(short)); - if (!swap_map) { - error = -ENOMEM; - goto bad_swap; - } + /* OK, set up the swap map and apply the bad block list */ + swap_map = vmalloc(maxpages * sizeof(short)); + if (!swap_map) { + error = -ENOMEM; + goto bad_swap; + } - error = 0; - memset(swap_map, 0, maxpages * sizeof(short)); - for (i = 0; i < swap_header->info.nr_badpages; i++) { - int page_nr = swap_header->info.badpages[i]; - if (page_nr <= 0 || page_nr >= swap_header->info.last_page) - error = -EINVAL; - else - swap_map[page_nr] = SWAP_MAP_BAD; - } - nr_good_pages = swap_header->info.last_page - - swap_header->info.nr_badpages - - 1 /* header page */; - if (error) + memset(swap_map, 0, maxpages * sizeof(short)); + for (i = 0; i < swap_header->info.nr_badpages; i++) { + int page_nr = swap_header->info.badpages[i]; + if (page_nr <= 0 || page_nr >= swap_header->info.last_page) { + error = -EINVAL; goto bad_swap; + } + swap_map[page_nr] = SWAP_MAP_BAD; } + nr_good_pages = swap_header->info.last_page - + swap_header->info.nr_badpages - + 1 /* header page */; if (nr_good_pages) { swap_map[0] = SWAP_MAP_BAD; -- cgit v1.2.2 From ebebbbe904634b0ca1c674457b399f68db5e05b1 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:50 -0800 Subject: swapfile: rearrange scan and swap_info Before making functional changes, rearrange scan_swap_map() to simplify subsequent diffs. Actually, there is one functional change in there: leave cluster_nr negative while scanning for a new cluster - resetting it early increased the likelihood that when we have difficulty finding a free cluster, another task may come in and try doing exactly the same - just a waste of cpu. Before making functional changes, rearrange struct swap_info_struct slightly: flags will be needed as an unsigned long (for wait_on_bit), next is a good int to pair with prio, old_block_size is uninteresting so shift it to the end. Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 66 +++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 29 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index 85ff603385c3..4d9855f86e7d 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -89,7 +89,8 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) static inline unsigned long scan_swap_map(struct swap_info_struct *si) { - unsigned long offset, last_in_cluster; + unsigned long offset; + unsigned long last_in_cluster; int latency_ration = LATENCY_LIMIT; /* @@ -103,10 +104,13 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) */ si->flags += SWP_SCANNING; - if (unlikely(!si->cluster_nr)) { - si->cluster_nr = SWAPFILE_CLUSTER - 1; - if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) - goto lowest; + offset = si->cluster_next; + + if (unlikely(!si->cluster_nr--)) { + if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { + si->cluster_nr = SWAPFILE_CLUSTER - 1; + goto checks; + } spin_unlock(&swap_lock); offset = si->lowest_bit; @@ -118,43 +122,47 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) last_in_cluster = offset + SWAPFILE_CLUSTER; else if (offset == last_in_cluster) { spin_lock(&swap_lock); - si->cluster_next = offset-SWAPFILE_CLUSTER+1; - goto cluster; + offset -= SWAPFILE_CLUSTER - 1; + si->cluster_next = offset; + si->cluster_nr = SWAPFILE_CLUSTER - 1; + goto checks; } if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT; } } + + offset = si->lowest_bit; spin_lock(&swap_lock); - goto lowest; + si->cluster_nr = SWAPFILE_CLUSTER - 1; } - si->cluster_nr--; -cluster: - offset = si->cluster_next; - if (offset > si->highest_bit) -lowest: offset = si->lowest_bit; -checks: if (!(si->flags & SWP_WRITEOK)) +checks: + if (!(si->flags & SWP_WRITEOK)) goto no_page; if (!si->highest_bit) goto no_page; - if (!si->swap_map[offset]) { - if (offset == si->lowest_bit) - si->lowest_bit++; - if (offset == si->highest_bit) - si->highest_bit--; - si->inuse_pages++; - if (si->inuse_pages == si->pages) { - si->lowest_bit = si->max; - si->highest_bit = 0; - } - si->swap_map[offset] = 1; - si->cluster_next = offset + 1; - si->flags -= SWP_SCANNING; - return offset; + if (offset > si->highest_bit) + offset = si->lowest_bit; + if (si->swap_map[offset]) + goto scan; + + if (offset == si->lowest_bit) + si->lowest_bit++; + if (offset == si->highest_bit) + si->highest_bit--; + si->inuse_pages++; + if (si->inuse_pages == si->pages) { + si->lowest_bit = si->max; + si->highest_bit = 0; } + si->swap_map[offset] = 1; + si->cluster_next = offset + 1; + si->flags -= SWP_SCANNING; + return offset; +scan: spin_unlock(&swap_lock); while (++offset <= si->highest_bit) { if (!si->swap_map[offset]) { @@ -167,7 +175,7 @@ checks: if (!(si->flags & SWP_WRITEOK)) } } spin_lock(&swap_lock); - goto lowest; + goto checks; no_page: si->flags -= SWP_SCANNING; -- cgit v1.2.2 From 6a6ba83175c029c7820765bae44692266b29e67a Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:51 -0800 Subject: swapfile: swapon use discard (trim) When adding swap, all the old data on swap can be forgotten: sys_swapon() discard all but the header page of the swap partition (or every extent but the header of the swap file), to give a solidstate swap device the opportunity to optimize its wear-levelling. If that succeeds, note SWP_DISCARDABLE for later use, and report it with a "D" at the right end of the kernel's "Adding ... swap" message. Perhaps something should be shown in /proc/swaps (swapon -s), but we have to be more cautious before making any addition to that format. Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Cc: David Woodhouse Cc: Jens Axboe Cc: Matthew Wilcox Cc: Joern Engel Cc: James Bottomley Cc: Donjun Shin Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index 4d9855f86e7d..fbeb4bb8eb50 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -84,6 +84,37 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) up_read(&swap_unplug_sem); } +/* + * swapon tell device that all the old swap contents can be discarded, + * to allow the swap device to optimize its wear-levelling. + */ +static int discard_swap(struct swap_info_struct *si) +{ + struct swap_extent *se; + int err = 0; + + list_for_each_entry(se, &si->extent_list, list) { + sector_t start_block = se->start_block << (PAGE_SHIFT - 9); + pgoff_t nr_blocks = se->nr_pages << (PAGE_SHIFT - 9); + + if (se->start_page == 0) { + /* Do not discard the swap header page! */ + start_block += 1 << (PAGE_SHIFT - 9); + nr_blocks -= 1 << (PAGE_SHIFT - 9); + if (!nr_blocks) + continue; + } + + err = blkdev_issue_discard(si->bdev, start_block, + nr_blocks, GFP_KERNEL); + if (err) + break; + + cond_resched(); + } + return err; /* That will often be -EOPNOTSUPP */ +} + #define SWAPFILE_CLUSTER 256 #define LATENCY_LIMIT 256 @@ -1658,6 +1689,9 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) goto bad_swap; } + if (discard_swap(p) == 0) + p->flags |= SWP_DISCARDABLE; + mutex_lock(&swapon_mutex); spin_lock(&swap_lock); if (swap_flags & SWAP_FLAG_PREFER) @@ -1671,9 +1705,10 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) total_swap_pages += nr_good_pages; printk(KERN_INFO "Adding %uk swap on %s. " - "Priority:%d extents:%d across:%lluk\n", + "Priority:%d extents:%d across:%lluk%s\n", nr_good_pages<<(PAGE_SHIFT-10), name, p->prio, - nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10)); + nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), + (p->flags & SWP_DISCARDABLE) ? " D" : ""); /* insert swap space into swap_list: */ prev = -1; -- cgit v1.2.2 From 7992fde72ce06c73280a1939b7a1e903bc95ef85 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:53 -0800 Subject: swapfile: swap allocation use discard When scan_swap_map() finds a free cluster of swap pages to allocate, discard the old contents of the cluster if the device supports discard. But don't bother when swap is so fragmented that we allocate single pages. Be careful about racing allocations made while we're scanning for a cluster; and hold up allocations made while we're discarding. Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Cc: David Woodhouse Cc: Jens Axboe Cc: Matthew Wilcox Cc: Joern Engel Cc: James Bottomley Cc: Donjun Shin Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 119 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 118 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index fbeb4bb8eb50..ca75b9e7c09f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -115,14 +115,62 @@ static int discard_swap(struct swap_info_struct *si) return err; /* That will often be -EOPNOTSUPP */ } +/* + * swap allocation tell device that a cluster of swap can now be discarded, + * to allow the swap device to optimize its wear-levelling. + */ +static void discard_swap_cluster(struct swap_info_struct *si, + pgoff_t start_page, pgoff_t nr_pages) +{ + struct swap_extent *se = si->curr_swap_extent; + int found_extent = 0; + + while (nr_pages) { + struct list_head *lh; + + if (se->start_page <= start_page && + start_page < se->start_page + se->nr_pages) { + pgoff_t offset = start_page - se->start_page; + sector_t start_block = se->start_block + offset; + pgoff_t nr_blocks = se->nr_pages - offset; + + if (nr_blocks > nr_pages) + nr_blocks = nr_pages; + start_page += nr_blocks; + nr_pages -= nr_blocks; + + if (!found_extent++) + si->curr_swap_extent = se; + + start_block <<= PAGE_SHIFT - 9; + nr_blocks <<= PAGE_SHIFT - 9; + if (blkdev_issue_discard(si->bdev, start_block, + nr_blocks, GFP_NOIO)) + break; + } + + lh = se->list.next; + if (lh == &si->extent_list) + lh = lh->next; + se = list_entry(lh, struct swap_extent, list); + } +} + +static int wait_for_discard(void *word) +{ + schedule(); + return 0; +} + #define SWAPFILE_CLUSTER 256 #define LATENCY_LIMIT 256 static inline unsigned long scan_swap_map(struct swap_info_struct *si) { unsigned long offset; - unsigned long last_in_cluster; + unsigned long last_in_cluster = 0; int latency_ration = LATENCY_LIMIT; + int found_free_cluster = 0; /* * We try to cluster swap pages by allocating them sequentially @@ -142,6 +190,19 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) si->cluster_nr = SWAPFILE_CLUSTER - 1; goto checks; } + if (si->flags & SWP_DISCARDABLE) { + /* + * Start range check on racing allocations, in case + * they overlap the cluster we eventually decide on + * (we scan without swap_lock to allow preemption). + * It's hardly conceivable that cluster_nr could be + * wrapped during our scan, but don't depend on it. + */ + if (si->lowest_alloc) + goto checks; + si->lowest_alloc = si->max; + si->highest_alloc = 0; + } spin_unlock(&swap_lock); offset = si->lowest_bit; @@ -156,6 +217,7 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) offset -= SWAPFILE_CLUSTER - 1; si->cluster_next = offset; si->cluster_nr = SWAPFILE_CLUSTER - 1; + found_free_cluster = 1; goto checks; } if (unlikely(--latency_ration < 0)) { @@ -167,6 +229,7 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) offset = si->lowest_bit; spin_lock(&swap_lock); si->cluster_nr = SWAPFILE_CLUSTER - 1; + si->lowest_alloc = 0; } checks: @@ -191,6 +254,60 @@ checks: si->swap_map[offset] = 1; si->cluster_next = offset + 1; si->flags -= SWP_SCANNING; + + if (si->lowest_alloc) { + /* + * Only set when SWP_DISCARDABLE, and there's a scan + * for a free cluster in progress or just completed. + */ + if (found_free_cluster) { + /* + * To optimize wear-levelling, discard the + * old data of the cluster, taking care not to + * discard any of its pages that have already + * been allocated by racing tasks (offset has + * already stepped over any at the beginning). + */ + if (offset < si->highest_alloc && + si->lowest_alloc <= last_in_cluster) + last_in_cluster = si->lowest_alloc - 1; + si->flags |= SWP_DISCARDING; + spin_unlock(&swap_lock); + + if (offset < last_in_cluster) + discard_swap_cluster(si, offset, + last_in_cluster - offset + 1); + + spin_lock(&swap_lock); + si->lowest_alloc = 0; + si->flags &= ~SWP_DISCARDING; + + smp_mb(); /* wake_up_bit advises this */ + wake_up_bit(&si->flags, ilog2(SWP_DISCARDING)); + + } else if (si->flags & SWP_DISCARDING) { + /* + * Delay using pages allocated by racing tasks + * until the whole discard has been issued. We + * could defer that delay until swap_writepage, + * but it's easier to keep this self-contained. + */ + spin_unlock(&swap_lock); + wait_on_bit(&si->flags, ilog2(SWP_DISCARDING), + wait_for_discard, TASK_UNINTERRUPTIBLE); + spin_lock(&swap_lock); + } else { + /* + * Note pages allocated by racing tasks while + * scan for a free cluster is in progress, so + * that its final discard can exclude them. + */ + if (offset < si->lowest_alloc) + si->lowest_alloc = offset; + if (offset > si->highest_alloc) + si->highest_alloc = offset; + } + } return offset; scan: -- cgit v1.2.2 From 20137a490f397d9c01fc9fadd83a8d198bda4477 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:54 -0800 Subject: swapfile: swapon randomize if nonrot Swap allocation has always started from the beginning of the swap area; but if we're dealing with a solidstate swap device which can only remap blocks within limited zones, that would sooner wear out the first zone. Therefore sys_swapon() test whether blk_queue is non-rotational, and if so randomize the cluster_next starting position for allocation. If blk_queue is nonrot, note SWP_SOLIDSTATE for later use, and report it with an "SS" at the right end of the kernel's "Adding ... swap" message (so that if it's both nonrot and discardable, "SSD" will be shown there). Perhaps something should be shown in /proc/swaps (swapon -s), but we have to be more cautious before making any addition to that format. Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Cc: David Woodhouse Cc: Jens Axboe Cc: Matthew Wilcox Cc: Joern Engel Cc: James Bottomley Cc: Donjun Shin Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index ca75b9e7c09f..b0f56603b9be 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -1806,6 +1807,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) goto bad_swap; } + if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { + p->flags |= SWP_SOLIDSTATE; + srandom32((u32)get_seconds()); + p->cluster_next = 1 + (random32() % p->highest_bit); + } if (discard_swap(p) == 0) p->flags |= SWP_DISCARDABLE; @@ -1822,10 +1828,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) total_swap_pages += nr_good_pages; printk(KERN_INFO "Adding %uk swap on %s. " - "Priority:%d extents:%d across:%lluk%s\n", + "Priority:%d extents:%d across:%lluk %s%s\n", nr_good_pages<<(PAGE_SHIFT-10), name, p->prio, nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), - (p->flags & SWP_DISCARDABLE) ? " D" : ""); + (p->flags & SWP_SOLIDSTATE) ? "SS" : "", + (p->flags & SWP_DISCARDABLE) ? "D" : ""); /* insert swap space into swap_list: */ prev = -1; -- cgit v1.2.2 From c60aa176c6de82703f064082b909496fc4fee956 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:55 -0800 Subject: swapfile: swap allocation cycle if nonrot Though attempting to find free clusters (Andrea), swap allocation has always restarted its searches from the beginning of the swap area (sct), to reduce seek times between swap pages, by not scattering them all over the partition. But on a solidstate swap device, seeks are cheap, and block remapping to level the wear may be limited by zones: in that case it's better to cycle around the whole partition. Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Cc: David Woodhouse Cc: Jens Axboe Cc: Matthew Wilcox Cc: Joern Engel Cc: James Bottomley Cc: Donjun Shin Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index b0f56603b9be..763210732b5f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -169,6 +169,7 @@ static int wait_for_discard(void *word) static inline unsigned long scan_swap_map(struct swap_info_struct *si) { unsigned long offset; + unsigned long scan_base; unsigned long last_in_cluster = 0; int latency_ration = LATENCY_LIMIT; int found_free_cluster = 0; @@ -181,10 +182,11 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) * all over the entire swap partition, so that we reduce * overall disk seek times between swap pages. -- sct * But we do now try to find an empty cluster. -Andrea + * And we let swap pages go all over an SSD partition. Hugh */ si->flags += SWP_SCANNING; - offset = si->cluster_next; + scan_base = offset = si->cluster_next; if (unlikely(!si->cluster_nr--)) { if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { @@ -206,7 +208,16 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) } spin_unlock(&swap_lock); - offset = si->lowest_bit; + /* + * If seek is expensive, start searching for new cluster from + * start of partition, to minimize the span of allocated swap. + * But if seek is cheap, search from our current position, so + * that swap is allocated from all over the partition: if the + * Flash Translation Layer only remaps within limited zones, + * we don't want to wear out the first zone too quickly. + */ + if (!(si->flags & SWP_SOLIDSTATE)) + scan_base = offset = si->lowest_bit; last_in_cluster = offset + SWAPFILE_CLUSTER - 1; /* Locate the first empty (unaligned) cluster */ @@ -228,6 +239,27 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) } offset = si->lowest_bit; + last_in_cluster = offset + SWAPFILE_CLUSTER - 1; + + /* Locate the first empty (unaligned) cluster */ + for (; last_in_cluster < scan_base; offset++) { + if (si->swap_map[offset]) + last_in_cluster = offset + SWAPFILE_CLUSTER; + else if (offset == last_in_cluster) { + spin_lock(&swap_lock); + offset -= SWAPFILE_CLUSTER - 1; + si->cluster_next = offset; + si->cluster_nr = SWAPFILE_CLUSTER - 1; + found_free_cluster = 1; + goto checks; + } + if (unlikely(--latency_ration < 0)) { + cond_resched(); + latency_ration = LATENCY_LIMIT; + } + } + + offset = scan_base; spin_lock(&swap_lock); si->cluster_nr = SWAPFILE_CLUSTER - 1; si->lowest_alloc = 0; @@ -239,7 +271,7 @@ checks: if (!si->highest_bit) goto no_page; if (offset > si->highest_bit) - offset = si->lowest_bit; + scan_base = offset = si->lowest_bit; if (si->swap_map[offset]) goto scan; @@ -323,8 +355,18 @@ scan: latency_ration = LATENCY_LIMIT; } } + offset = si->lowest_bit; + while (++offset < scan_base) { + if (!si->swap_map[offset]) { + spin_lock(&swap_lock); + goto checks; + } + if (unlikely(--latency_ration < 0)) { + cond_resched(); + latency_ration = LATENCY_LIMIT; + } + } spin_lock(&swap_lock); - goto checks; no_page: si->flags -= SWP_SCANNING; -- cgit v1.2.2 From 858a29900ea2d639759e697be901a60b759cdcfb Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:56 -0800 Subject: swapfile: change discard pgoff_t to sector_t Change pgoff_t nr_blocks in discard_swap() and discard_swap_cluster() to sector_t: given the constraints on swap offsets (in particular, the 5 bits of swap type accommodated in the same unsigned long), pgoff_t was actually safe as is, but it certainly looked worrying when shifted left. [akpm@linux-foundation.org: fix shift overflow] Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Cc: David Woodhouse Cc: Jens Axboe Cc: Matthew Wilcox Cc: Joern Engel Cc: James Bottomley Cc: Donjun Shin Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index 763210732b5f..6a078557306a 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -96,7 +96,7 @@ static int discard_swap(struct swap_info_struct *si) list_for_each_entry(se, &si->extent_list, list) { sector_t start_block = se->start_block << (PAGE_SHIFT - 9); - pgoff_t nr_blocks = se->nr_pages << (PAGE_SHIFT - 9); + sector_t nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); if (se->start_page == 0) { /* Do not discard the swap header page! */ @@ -133,7 +133,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, start_page < se->start_page + se->nr_pages) { pgoff_t offset = start_page - se->start_page; sector_t start_block = se->start_block + offset; - pgoff_t nr_blocks = se->nr_pages - offset; + sector_t nr_blocks = se->nr_pages - offset; if (nr_blocks > nr_pages) nr_blocks = nr_pages; -- cgit v1.2.2 From f0d7a4b3ed46816f5097d521850a8ab7a0d40f3c Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:39:57 -0800 Subject: swapfile: let others seed random Remove the srandom32((u32)get_seconds()) from non-rotational swapon: there's been a coincidental discussion of earlier randomization, assume that goes ahead, let swapon be a client rather than stirring for itself. Signed-off-by: Hugh Dickins Cc: David Woodhouse Cc: Donjun Shin Cc: James Bottomley Cc: Jens Axboe Cc: Joern Engel Cc: KAMEZAWA Hiroyuki Cc: Matthew Wilcox Cc: Nick Piggin Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index 6a078557306a..d00523601913 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1851,7 +1851,6 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { p->flags |= SWP_SOLIDSTATE; - srandom32((u32)get_seconds()); p->cluster_next = 1 + (random32() % p->highest_bit); } if (discard_swap(p) == 0) -- cgit v1.2.2 From ebdd4aea8d736e3b5ce27ab0a26860c9fded341b Mon Sep 17 00:00:00 2001 From: Hannes Eder Date: Tue, 6 Jan 2009 14:39:58 -0800 Subject: hugetlb: fix sparse warnings Fix the following sparse warnings: mm/hugetlb.c:375:3: warning: returning void-valued expression mm/hugetlb.c:408:3: warning: returning void-valued expression Signed-off-by: Hannes Eder Acked-by: Nishanth Aravamudan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9595278b5ab4..82321da23cc3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -400,8 +400,10 @@ static void clear_huge_page(struct page *page, { int i; - if (unlikely(sz > MAX_ORDER_NR_PAGES)) - return clear_gigantic_page(page, addr, sz); + if (unlikely(sz > MAX_ORDER_NR_PAGES)) { + clear_gigantic_page(page, addr, sz); + return; + } might_sleep(); for (i = 0; i < sz/PAGE_SIZE; i++) { @@ -433,8 +435,10 @@ static void copy_huge_page(struct page *dst, struct page *src, int i; struct hstate *h = hstate_vma(vma); - if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) - return copy_gigantic_page(dst, src, addr, vma); + if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { + copy_gigantic_page(dst, src, addr, vma); + return; + } might_sleep(); for (i = 0; i < pages_per_huge_page(h); i++) { -- cgit v1.2.2 From a79311c14eae4bb946a97af25f3e1b17d625985d Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Tue, 6 Jan 2009 14:40:01 -0800 Subject: vmscan: bail out of direct reclaim after swap_cluster_max pages When the VM is under pressure, it can happen that several direct reclaim processes are in the pageout code simultaneously. It also happens that the reclaiming processes run into mostly referenced, mapped and dirty pages in the first round. This results in multiple direct reclaim processes having a lower pageout priority, which corresponds to a higher target of pages to scan. This in turn can result in each direct reclaim process freeing many pages. Together, they can end up freeing way too many pages. This kicks useful data out of memory (in some cases more than half of all memory is swapped out). It also impacts performance by keeping tasks stuck in the pageout code for too long. A 30% improvement in hackbench has been observed with this patch. The fix is relatively simple: in shrink_zone() we can check how many pages we have already freed, direct reclaim tasks break out of the scanning loop if they have already freed enough pages and have reached a lower priority level. We do not break out of shrink_zone() when priority == DEF_PRIORITY, to ensure that equal pressure is applied to every zone in the common case. However, in order to do this we do need to know how many pages we already freed, so move nr_reclaimed into scan_control. akpm: a historical interlude... We tried this in 2004: :commit e468e46a9bea3297011d5918663ce6d19094cf87 :Author: akpm :Date: Thu Jun 24 15:53:52 2004 +0000 : :[PATCH] vmscan.c: dont reclaim too many pages : : The shrink_zone() logic can, under some circumstances, cause far too many : pages to be reclaimed. Say, we're scanning at high priority and suddenly hit : a large number of reclaimable pages on the LRU. : Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed. And we reverted it in 2006: :commit 210fe530305ee50cd889fe9250168228b2994f32 :Author: Andrew Morton :Date: Fri Jan 6 00:11:14 2006 -0800 : : [PATCH] vmscan: balancing fix : : Revert a patch which went into 2.6.8-rc1. The changelog for that patch was: : : The shrink_zone() logic can, under some circumstances, cause far too many : pages to be reclaimed. Say, we're scanning at high priority and suddenly : hit a large number of reclaimable pages on the LRU. : : Change things so we bale out when SWAP_CLUSTER_MAX pages have been : reclaimed. : : Problem is, this change caused significant imbalance in inter-zone scan : balancing by truncating scans of larger zones. : : Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone : balancing algorithm would require that if we're scanning 100 pages of : ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will : cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are : reclaimed. Thus effectively causing smaller zones to be scanned relatively : harder than large ones. : : Now I need to remember what the workload was which caused me to write this : patch originally, then fix it up in a different way... And we haven't demonstrated that whatever problem caused that reversion is not being reintroduced by this change in 2008. Signed-off-by: Rik van Riel Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 62 ++++++++++++++++++++++++++++++++----------------------------- 1 file changed, 33 insertions(+), 29 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 1ef5a2eeb298..5faa7739487f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -52,6 +52,9 @@ struct scan_control { /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; + /* Number of pages freed so far during a call to shrink_zones() */ + unsigned long nr_reclaimed; + /* This context's GFP mask */ gfp_t gfp_mask; @@ -1400,12 +1403,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ -static unsigned long shrink_zone(int priority, struct zone *zone, +static void shrink_zone(int priority, struct zone *zone, struct scan_control *sc) { unsigned long nr[NR_LRU_LISTS]; unsigned long nr_to_scan; - unsigned long nr_reclaimed = 0; unsigned long percent[2]; /* anon @ 0; file @ 1 */ enum lru_list l; @@ -1446,10 +1448,21 @@ static unsigned long shrink_zone(int priority, struct zone *zone, (unsigned long)sc->swap_cluster_max); nr[l] -= nr_to_scan; - nr_reclaimed += shrink_list(l, nr_to_scan, + sc->nr_reclaimed += shrink_list(l, nr_to_scan, zone, sc, priority); } } + /* + * On large memory systems, scan >> priority can become + * really large. This is fine for the starting priority; + * we want to put equal scanning pressure on each zone. + * However, if the VM has a harder time of freeing pages, + * with multiple processes reclaiming pages, the total + * freeing target can get unreasonably large. + */ + if (sc->nr_reclaimed > sc->swap_cluster_max && + priority < DEF_PRIORITY && !current_is_kswapd()) + break; } /* @@ -1462,7 +1475,6 @@ static unsigned long shrink_zone(int priority, struct zone *zone, shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); throttle_vm_writeout(sc->gfp_mask); - return nr_reclaimed; } /* @@ -1476,16 +1488,13 @@ static unsigned long shrink_zone(int priority, struct zone *zone, * b) The zones may be over pages_high but they must go *over* pages_high to * satisfy the `incremental min' zone defense algorithm. * - * Returns the number of reclaimed pages. - * * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ -static unsigned long shrink_zones(int priority, struct zonelist *zonelist, +static void shrink_zones(int priority, struct zonelist *zonelist, struct scan_control *sc) { enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); - unsigned long nr_reclaimed = 0; struct zoneref *z; struct zone *zone; @@ -1516,10 +1525,8 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist, priority); } - nr_reclaimed += shrink_zone(priority, zone, sc); + shrink_zone(priority, zone, sc); } - - return nr_reclaimed; } /* @@ -1544,7 +1551,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, int priority; unsigned long ret = 0; unsigned long total_scanned = 0; - unsigned long nr_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long lru_pages = 0; struct zoneref *z; @@ -1572,7 +1578,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, sc->nr_scanned = 0; if (!priority) disable_swap_token(); - nr_reclaimed += shrink_zones(priority, zonelist, sc); + shrink_zones(priority, zonelist, sc); /* * Don't shrink slabs when reclaiming memory from * over limit cgroups @@ -1580,13 +1586,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, if (scan_global_lru(sc)) { shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); if (reclaim_state) { - nr_reclaimed += reclaim_state->reclaimed_slab; + sc->nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; } } total_scanned += sc->nr_scanned; - if (nr_reclaimed >= sc->swap_cluster_max) { - ret = nr_reclaimed; + if (sc->nr_reclaimed >= sc->swap_cluster_max) { + ret = sc->nr_reclaimed; goto out; } @@ -1609,7 +1615,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, } /* top priority shrink_zones still had more to do? don't OOM, then */ if (!sc->all_unreclaimable && scan_global_lru(sc)) - ret = nr_reclaimed; + ret = sc->nr_reclaimed; out: /* * Now that we've scanned all the zones at this priority level, note @@ -1704,7 +1710,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) int priority; int i; unsigned long total_scanned; - unsigned long nr_reclaimed; struct reclaim_state *reclaim_state = current->reclaim_state; struct scan_control sc = { .gfp_mask = GFP_KERNEL, @@ -1723,7 +1728,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) loop_again: total_scanned = 0; - nr_reclaimed = 0; + sc.nr_reclaimed = 0; sc.may_writepage = !laptop_mode; count_vm_event(PAGEOUTRUN); @@ -1809,11 +1814,11 @@ loop_again: */ if (!zone_watermark_ok(zone, order, 8*zone->pages_high, end_zone, 0)) - nr_reclaimed += shrink_zone(priority, zone, &sc); + shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); - nr_reclaimed += reclaim_state->reclaimed_slab; + sc.nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; if (zone_is_all_unreclaimable(zone)) continue; @@ -1827,7 +1832,7 @@ loop_again: * even in laptop mode */ if (total_scanned > SWAP_CLUSTER_MAX * 2 && - total_scanned > nr_reclaimed + nr_reclaimed / 2) + total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) sc.may_writepage = 1; } if (all_zones_ok) @@ -1845,7 +1850,7 @@ loop_again: * matches the direct reclaim path behaviour in terms of impact * on zone->*_priority. */ - if (nr_reclaimed >= SWAP_CLUSTER_MAX) + if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) break; } out: @@ -1867,7 +1872,7 @@ out: goto loop_again; } - return nr_reclaimed; + return sc.nr_reclaimed; } /* @@ -2219,7 +2224,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) struct task_struct *p = current; struct reclaim_state reclaim_state; int priority; - unsigned long nr_reclaimed = 0; struct scan_control sc = { .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), @@ -2252,9 +2256,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) priority = ZONE_RECLAIM_PRIORITY; do { note_zone_scanning_priority(zone, priority); - nr_reclaimed += shrink_zone(priority, zone, &sc); + shrink_zone(priority, zone, &sc); priority--; - } while (priority >= 0 && nr_reclaimed < nr_pages); + } while (priority >= 0 && sc.nr_reclaimed < nr_pages); } slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); @@ -2278,13 +2282,13 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) * Update nr_reclaimed by the number of slab pages we * reclaimed from this zone. */ - nr_reclaimed += slab_reclaimable - + sc.nr_reclaimed += slab_reclaimable - zone_page_state(zone, NR_SLAB_RECLAIMABLE); } p->reclaim_state = NULL; current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); - return nr_reclaimed >= nr_pages; + return sc.nr_reclaimed >= nr_pages; } int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) -- cgit v1.2.2 From 01dbe5c9b1004dab045cb7f38428258ca9cddc02 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:40:02 -0800 Subject: vmscan: improve reclaim throughput to bail out patch The vmscan bail out patch move nr_reclaimed variable to struct scan_control. Unfortunately, indirect access can easily happen cache miss. if heavy memory pressure happend, that's ok. cache miss already plenty. it is not observable. but, if memory pressure is lite, performance degression is obserbable. I compared following three pattern (it was mesured 10 times each) hackbench 125 process 3000 hackbench 130 process 3000 hackbench 135 process 3000 2.6.28-rc6 bail-out 125 130 135 125 130 135 ============================================================== 71.866 75.86 81.274 93.414 73.254 193.382 74.145 78.295 77.27 74.897 75.021 80.17 70.305 77.643 75.855 70.134 77.571 79.896 74.288 73.986 75.955 77.222 78.48 80.619 72.029 79.947 78.312 75.128 82.172 79.708 71.499 77.615 77.042 74.177 76.532 77.306 76.188 74.471 83.562 73.839 72.43 79.833 73.236 75.606 78.743 76.001 76.557 82.726 69.427 77.271 76.691 76.236 79.371 103.189 72.473 76.978 80.643 69.128 78.932 75.736 avg 72.545 76.767 78.534 76.017 77.03 93.256 std 1.89 1.71 2.41 6.29 2.79 34.16 min 69.427 73.986 75.855 69.128 72.43 75.736 max 76.188 79.947 83.562 93.414 82.172 193.382 about 4-5% degression. Then, this patch introduces a temporary local variable. result: 2.6.28-rc6 this patch num 125 130 135 125 130 135 ============================================================== 71.866 75.86 81.274 67.302 68.269 77.161 74.145 78.295 77.27 72.616 72.712 79.06 70.305 77.643 75.855 72.475 75.712 77.735 74.288 73.986 75.955 69.229 73.062 78.814 72.029 79.947 78.312 71.551 74.392 78.564 71.499 77.615 77.042 69.227 74.31 78.837 76.188 74.471 83.562 70.759 75.256 76.6 73.236 75.606 78.743 69.966 76.001 78.464 69.427 77.271 76.691 69.068 75.218 80.321 72.473 76.978 80.643 72.057 77.151 79.068 avg 72.545 76.767 78.534 70.425 74.2083 78.462 std 1.89 1.71 2.41 1.66 2.34 1.00 min 69.427 73.986 75.855 67.302 68.269 76.6 max 76.188 79.947 83.562 72.616 77.151 80.321 OK. the degression is disappeared. Signed-off-by: KOSAKI Motohiro Acked-by: Rik van Riel Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 5faa7739487f..13f050d667e9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1410,6 +1410,8 @@ static void shrink_zone(int priority, struct zone *zone, unsigned long nr_to_scan; unsigned long percent[2]; /* anon @ 0; file @ 1 */ enum lru_list l; + unsigned long nr_reclaimed = sc->nr_reclaimed; + unsigned long swap_cluster_max = sc->swap_cluster_max; get_scan_ratio(zone, sc, percent); @@ -1425,7 +1427,7 @@ static void shrink_zone(int priority, struct zone *zone, } zone->lru[l].nr_scan += scan; nr[l] = zone->lru[l].nr_scan; - if (nr[l] >= sc->swap_cluster_max) + if (nr[l] >= swap_cluster_max) zone->lru[l].nr_scan = 0; else nr[l] = 0; @@ -1444,12 +1446,11 @@ static void shrink_zone(int priority, struct zone *zone, nr[LRU_INACTIVE_FILE]) { for_each_evictable_lru(l) { if (nr[l]) { - nr_to_scan = min(nr[l], - (unsigned long)sc->swap_cluster_max); + nr_to_scan = min(nr[l], swap_cluster_max); nr[l] -= nr_to_scan; - sc->nr_reclaimed += shrink_list(l, nr_to_scan, - zone, sc, priority); + nr_reclaimed += shrink_list(l, nr_to_scan, + zone, sc, priority); } } /* @@ -1460,11 +1461,13 @@ static void shrink_zone(int priority, struct zone *zone, * with multiple processes reclaiming pages, the total * freeing target can get unreasonably large. */ - if (sc->nr_reclaimed > sc->swap_cluster_max && + if (nr_reclaimed > swap_cluster_max && priority < DEF_PRIORITY && !current_is_kswapd()) break; } + sc->nr_reclaimed = nr_reclaimed; + /* * Even if we did not try to evict anon pages at all, we want to * rebalance the anon lru active/inactive ratio. -- cgit v1.2.2 From 09f445e7f5107c91be12ed386350de6cd055e0a4 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:40:03 -0800 Subject: mm: kill zone_is_near_oom() zone_is_near_oom() is unused. Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 13f050d667e9..466a36b3bada 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1167,11 +1167,6 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority) zone->prev_priority = priority; } -static inline int zone_is_near_oom(struct zone *zone) -{ - return zone->pages_scanned >= (zone_lru_pages(zone) * 3); -} - /* * This moves pages from the active list to the inactive list. * -- cgit v1.2.2 From 79f4b7bf393e67bbffec807cc68caaefc72b82ee Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:40:05 -0800 Subject: badpage: simplify page_alloc flag check+clear Simplify the PAGE_FLAGS checking and clearing when freeing and allocating a page: check the same flags as before when freeing, clear ALL the flags (unless PageReserved) when freeing, check ALL flags off when allocating. Signed-off-by: Hugh Dickins Cc: Nick Piggin Cc: Christoph Lameter Cc: Mel Gorman Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 31c512410a99..b90a74d28485 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -231,7 +231,6 @@ static void bad_page(struct page *page) printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" KERN_EMERG "Backtrace:\n"); dump_stack(); - page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD; set_page_count(page, 0); reset_page_mapcount(page); page->mapping = NULL; @@ -468,16 +467,16 @@ static inline int free_pages_check(struct page *page) (page_count(page) != 0) | (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) bad_page(page); - if (PageDirty(page)) - __ClearPageDirty(page); - if (PageSwapBacked(page)) - __ClearPageSwapBacked(page); /* * For now, we report if PG_reserved was found set, but do not * clear it, and do not free the page. But we shall soon need * to do more, for when the ZERO_PAGE count wraps negative. */ - return PageReserved(page); + if (PageReserved(page)) + return 1; + if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) + page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + return 0; } /* @@ -621,13 +620,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) if (PageReserved(page)) return 1; - page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim | - 1 << PG_referenced | 1 << PG_arch_1 | - 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk -#ifdef CONFIG_UNEVICTABLE_LRU - | 1 << PG_mlocked -#endif - ); + page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; set_page_private(page, 0); set_page_refcounted(page); -- cgit v1.2.2 From 8cc3b39221b0ecbd83a338948a8396df097fc656 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:40:06 -0800 Subject: badpage: keep any bad page out of circulation Until now the bad_page() checkers have special-cased PageReserved, keeping those pages out of circulation thereafter. Now extend the special case to all: we want to keep ANY page with bad state out of circulation - the "free" page may well be in use by something. Leave the bad state of those pages untouched, for examination by debuggers; except for PageBuddy - leaving that set would risk bringing the page back. Signed-off-by: Hugh Dickins Cc: Nick Piggin Cc: Christoph Lameter Cc: Mel Gorman Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 52 ++++++++++++++++++++++++---------------------------- 1 file changed, 24 insertions(+), 28 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b90a74d28485..bd330252fc77 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -231,9 +231,9 @@ static void bad_page(struct page *page) printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" KERN_EMERG "Backtrace:\n"); dump_stack(); - set_page_count(page, 0); - reset_page_mapcount(page); - page->mapping = NULL; + + /* Leave bad fields for debug, except PageBuddy could make trouble */ + __ClearPageBuddy(page); add_taint(TAINT_BAD_PAGE); } @@ -290,25 +290,31 @@ void prep_compound_gigantic_page(struct page *page, unsigned long order) } #endif -static void destroy_compound_page(struct page *page, unsigned long order) +static int destroy_compound_page(struct page *page, unsigned long order) { int i; int nr_pages = 1 << order; + int bad = 0; - if (unlikely(compound_order(page) != order)) + if (unlikely(compound_order(page) != order) || + unlikely(!PageHead(page))) { bad_page(page); + bad++; + } - if (unlikely(!PageHead(page))) - bad_page(page); __ClearPageHead(page); + for (i = 1; i < nr_pages; i++) { struct page *p = page + i; - if (unlikely(!PageTail(p) | - (p->first_page != page))) + if (unlikely(!PageTail(p) | (p->first_page != page))) { bad_page(page); + bad++; + } __ClearPageTail(p); } + + return bad; } static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) @@ -428,7 +434,8 @@ static inline void __free_one_page(struct page *page, int migratetype = get_pageblock_migratetype(page); if (unlikely(PageCompound(page))) - destroy_compound_page(page, order); + if (unlikely(destroy_compound_page(page, order))) + return; page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); @@ -465,15 +472,10 @@ static inline int free_pages_check(struct page *page) if (unlikely(page_mapcount(page) | (page->mapping != NULL) | (page_count(page) != 0) | - (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) + (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) { bad_page(page); - /* - * For now, we report if PG_reserved was found set, but do not - * clear it, and do not free the page. But we shall soon need - * to do more, for when the ZERO_PAGE count wraps negative. - */ - if (PageReserved(page)) return 1; + } if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; return 0; @@ -521,11 +523,11 @@ static void __free_pages_ok(struct page *page, unsigned int order) { unsigned long flags; int i; - int reserved = 0; + int bad = 0; for (i = 0 ; i < (1 << order) ; ++i) - reserved += free_pages_check(page + i); - if (reserved) + bad += free_pages_check(page + i); + if (bad) return; if (!PageHighMem(page)) { @@ -610,17 +612,11 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) if (unlikely(page_mapcount(page) | (page->mapping != NULL) | (page_count(page) != 0) | - (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) + (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) { bad_page(page); - - /* - * For now, we report if PG_reserved was found set, but do not - * clear it, and do not allocate the page: as a safety net. - */ - if (PageReserved(page)) return 1; + } - page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; set_page_private(page, 0); set_page_refcounted(page); -- cgit v1.2.2 From 3dc147414ccad81dc33edb80774b1fed12a38c08 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:40:08 -0800 Subject: badpage: replace page_remove_rmap Eeek and BUG Now that bad pages are kept out of circulation, there is no need for the infamous page_remove_rmap() BUG() - once that page is freed, its negative mapcount will issue a "Bad page state" message and the page won't be freed. Removing the BUG() allows more info, on subsequent pages, to be gathered. We do have more info about the page at this point than bad_page() can know - notably, what the pmd is, which might pinpoint something like low 64kB corruption - but page_remove_rmap() isn't given the address to find that. In practice, there is only one call to page_remove_rmap() which has ever reported anything, that from zap_pte_range() (usually on exit, sometimes on munmap). It has all the info, so remove page_remove_rmap()'s "Eeek" message and leave it all to zap_pte_range(). mm/memory.c already has a hardly used print_bad_pte() function, showing some of the appropriate info: extend it to show what we want for the rmap case: pte info, page info (when there is a page) and vma info to compare. zap_pte_range() already knows the pmd, but print_bad_pte() is easier to use if it works that out for itself. Some of this info is also shown in bad_page()'s "Bad page state" message. Keep them separate, but adjust them to match each other as far as possible. Say "Bad page map" in print_bad_pte(), and add a TAINT_BAD_PAGE there too. print_bad_pte() show current->comm unconditionally (though it should get repeated in the usually irrelevant stack trace): sorry, I misled Nick Piggin to make it conditional on vm_mm == current->mm, but current->mm is already NULL in the exit case. Usually current->comm is good, though exceptionally it may not be that of the mm (when "swapoff" for example). Signed-off-by: Hugh Dickins Cc: Nick Piggin Cc: Christoph Lameter Cc: Mel Gorman Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 52 ++++++++++++++++++++++++++++++++++++++++------------ mm/page_alloc.c | 16 ++++++++-------- mm/rmap.c | 16 ---------------- 3 files changed, 48 insertions(+), 36 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 89339c61f8e5..cda04b19f733 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -52,6 +52,9 @@ #include #include #include +#include +#include +#include #include #include @@ -59,9 +62,6 @@ #include #include -#include -#include - #include "internal.h" #ifndef CONFIG_NEED_MULTIPLE_NODES @@ -375,15 +375,41 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) * * The calling function must still handle the error. */ -static void print_bad_pte(struct vm_area_struct *vma, pte_t pte, - unsigned long vaddr) -{ - printk(KERN_ERR "Bad pte = %08llx, process = %s, " - "vm_flags = %lx, vaddr = %lx\n", - (long long)pte_val(pte), - (vma->vm_mm == current->mm ? current->comm : "???"), - vma->vm_flags, vaddr); +static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, struct page *page) +{ + pgd_t *pgd = pgd_offset(vma->vm_mm, addr); + pud_t *pud = pud_offset(pgd, addr); + pmd_t *pmd = pmd_offset(pud, addr); + struct address_space *mapping; + pgoff_t index; + + mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; + index = linear_page_index(vma, addr); + + printk(KERN_EMERG "Bad page map in process %s pte:%08llx pmd:%08llx\n", + current->comm, + (long long)pte_val(pte), (long long)pmd_val(*pmd)); + if (page) { + printk(KERN_EMERG + "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n", + page, (void *)page->flags, page_count(page), + page_mapcount(page), page->mapping, page->index); + } + printk(KERN_EMERG + "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", + (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); + /* + * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y + */ + if (vma->vm_ops) + print_symbol(KERN_EMERG "vma->vm_ops->fault: %s\n", + (unsigned long)vma->vm_ops->fault); + if (vma->vm_file && vma->vm_file->f_op) + print_symbol(KERN_EMERG "vma->vm_file->f_op->mmap: %s\n", + (unsigned long)vma->vm_file->f_op->mmap); dump_stack(); + add_taint(TAINT_BAD_PAGE); } static inline int is_cow_mapping(unsigned int flags) @@ -773,6 +799,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, file_rss--; } page_remove_rmap(page, vma); + if (unlikely(page_mapcount(page) < 0)) + print_bad_pte(vma, addr, ptent, page); tlb_remove_page(tlb, page); continue; } @@ -2684,7 +2712,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, /* * Page table corrupted: show pte and kill process. */ - print_bad_pte(vma, orig_pte, address); + print_bad_pte(vma, address, orig_pte, NULL); return VM_FAULT_OOM; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bd330252fc77..3acb216e9a78 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -222,14 +222,14 @@ static inline int bad_range(struct zone *zone, struct page *page) static void bad_page(struct page *page) { - printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG - "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", - current->comm, page, (int)(2*sizeof(unsigned long)), - (unsigned long)page->flags, page->mapping, - page_mapcount(page), page_count(page)); - - printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" - KERN_EMERG "Backtrace:\n"); + printk(KERN_EMERG "Bad page state in process %s pfn:%05lx\n", + current->comm, page_to_pfn(page)); + printk(KERN_EMERG + "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n", + page, (void *)page->flags, page_count(page), + page_mapcount(page), page->mapping, page->index); + printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"); + dump_stack(); /* Leave bad fields for debug, except PageBuddy could make trouble */ diff --git a/mm/rmap.c b/mm/rmap.c index b1770b11a571..32098255082e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -725,21 +724,6 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long void page_remove_rmap(struct page *page, struct vm_area_struct *vma) { if (atomic_add_negative(-1, &page->_mapcount)) { - if (unlikely(page_mapcount(page) < 0)) { - printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); - printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page)); - printk (KERN_EMERG " page->flags = %lx\n", page->flags); - printk (KERN_EMERG " page->count = %x\n", page_count(page)); - printk (KERN_EMERG " page->mapping = %p\n", page->mapping); - print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); - if (vma->vm_ops) { - print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault); - } - if (vma->vm_file && vma->vm_file->f_op) - print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); - BUG(); - } - /* * Now that the last pte has gone, s390 must transfer dirty * flag from storage key to struct page. We can usually skip -- cgit v1.2.2 From 22b31eec63e5f2e219a3ee15f456897272bc73e8 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:40:09 -0800 Subject: badpage: vm_normal_page use print_bad_pte print_bad_pte() is so far being called only when zap_pte_range() finds negative page_mapcount, or there's a fault on a pte_file where it does not belong. That's weak coverage when we suspect pagetable corruption. Originally, it was called when vm_normal_page() found an invalid pfn: but pfn_valid is expensive on some architectures and configurations, so 2.6.24 put that under CONFIG_DEBUG_VM (which doesn't help in the field), then 2.6.26 replaced it by a VM_BUG_ON (likewise). Reinstate the print_bad_pte() in vm_normal_page(), but use a cheaper test than pfn_valid(): memmap_init_zone() (used in bootup and hotplug) keep a __read_mostly note of the highest_memmap_pfn, vm_normal_page() then check pfn against that. We could call this pfn_plausible() or pfn_sane(), but I doubt we'll need it elsewhere: of course it's not reliable, but gives much stronger pagetable validation on many boxes. Also use print_bad_pte() when the pte_special bit is found outside a VM_PFNMAP or VM_MIXEDMAP area, instead of VM_BUG_ON. Signed-off-by: Hugh Dickins Cc: Nick Piggin Cc: Christoph Lameter Cc: Mel Gorman Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/internal.h | 1 + mm/memory.c | 20 ++++++++++---------- mm/page_alloc.c | 4 ++++ 3 files changed, 15 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index 13333bc2eb68..1981bc9454f3 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page); /* * in mm/page_alloc.c */ +extern unsigned long highest_memmap_pfn; extern void __free_pages_bootmem(struct page *page, unsigned int order); /* diff --git a/mm/memory.c b/mm/memory.c index cda04b19f733..890095f5f36d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -467,21 +467,18 @@ static inline int is_cow_mapping(unsigned int flags) struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { - unsigned long pfn; + unsigned long pfn = pte_pfn(pte); if (HAVE_PTE_SPECIAL) { - if (likely(!pte_special(pte))) { - VM_BUG_ON(!pfn_valid(pte_pfn(pte))); - return pte_page(pte); - } - VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); + if (likely(!pte_special(pte))) + goto check_pfn; + if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) + print_bad_pte(vma, addr, pte, NULL); return NULL; } /* !HAVE_PTE_SPECIAL case follows: */ - pfn = pte_pfn(pte); - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { if (vma->vm_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) @@ -497,11 +494,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, } } - VM_BUG_ON(!pfn_valid(pfn)); +check_pfn: + if (unlikely(pfn > highest_memmap_pfn)) { + print_bad_pte(vma, addr, pte, NULL); + return NULL; + } /* * NOTE! We still have PageReserved() pages in the page tables. - * * eg. VDSO mappings can cause them to exist. */ out: diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3acb216e9a78..755c99a0ac71 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -69,6 +69,7 @@ EXPORT_SYMBOL(node_states); unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; +unsigned long highest_memmap_pfn __read_mostly; int percpu_pagelist_fraction; #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE @@ -2597,6 +2598,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, unsigned long pfn; struct zone *z; + if (highest_memmap_pfn < end_pfn - 1) + highest_memmap_pfn = end_pfn - 1; + z = &NODE_DATA(nid)->node_zones[zone]; for (pfn = start_pfn; pfn < end_pfn; pfn++) { /* -- cgit v1.2.2 From 2509ef26db4699a5d9fa876e90ddfc107afcab84 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:40:10 -0800 Subject: badpage: zap print_bad_pte on swap and file Complete zap_pte_range()'s coverage of bad pagetable entries by calling print_bad_pte() on a pte_file in a linear vma and on a bad swap entry. That needs free_swap_and_cache() to tell it, which will also have shown one of those "swap_free" errors (but with much less information). Similar checks in fork's copy_one_pte()? No, that would be more noisy than helpful: we'll see them when parent and child exec or exit. Where do_nonlinear_fault() calls print_bad_pte(): omit !VM_CAN_NONLINEAR case, that could only be a bug in sys_remap_file_pages(), not a bad pte. VM_FAULT_OOM rather than VM_FAULT_SIGBUS? Well, okay, that is consistent with what happens if do_swap_page() operates a bad swap entry; but don't we have patches to be more careful about killing when VM_FAULT_OOM? Signed-off-by: Hugh Dickins Cc: Nick Piggin Cc: Christoph Lameter Cc: Mel Gorman Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 11 +++++++---- mm/swapfile.c | 7 ++++--- 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 890095f5f36d..b273cc12b15d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -810,8 +810,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, */ if (unlikely(details)) continue; - if (!pte_file(ptent)) - free_swap_and_cache(pte_to_swp_entry(ptent)); + if (pte_file(ptent)) { + if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) + print_bad_pte(vma, addr, ptent, NULL); + } else if + (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent)))) + print_bad_pte(vma, addr, ptent, NULL); pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); @@ -2707,8 +2711,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) return 0; - if (unlikely(!(vma->vm_flags & VM_NONLINEAR) || - !(vma->vm_flags & VM_CAN_NONLINEAR))) { + if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { /* * Page table corrupted: show pte and kill process. */ diff --git a/mm/swapfile.c b/mm/swapfile.c index d00523601913..f28745855772 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -571,13 +571,13 @@ int try_to_free_swap(struct page *page) * Free the swap entry like above, but also try to * free the page cache entry if it is the last user. */ -void free_swap_and_cache(swp_entry_t entry) +int free_swap_and_cache(swp_entry_t entry) { - struct swap_info_struct * p; + struct swap_info_struct *p; struct page *page = NULL; if (is_migration_entry(entry)) - return; + return 1; p = swap_info_get(entry); if (p) { @@ -603,6 +603,7 @@ void free_swap_and_cache(swp_entry_t entry) unlock_page(page); page_cache_release(page); } + return p != NULL; } #ifdef CONFIG_HIBERNATION -- cgit v1.2.2 From edc315fd222497ae4f4b959a9e31ada1e68a4755 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:40:11 -0800 Subject: badpage: remove vma from page_remove_rmap Remove page_remove_rmap()'s vma arg, which was only for the Eeek message. And remove the BUG_ON(page_mapcount(page) == 0) from CONFIG_DEBUG_VM's page_dup_rmap(): we're trying to be more resilient about that than BUGs. Signed-off-by: Hugh Dickins Cc: Nick Piggin Cc: Christoph Lameter Cc: Mel Gorman Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap_xip.c | 2 +- mm/fremap.c | 2 +- mm/memory.c | 4 ++-- mm/rmap.c | 8 +++----- 4 files changed, 7 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index b5167dfb2f2d..0c04615651b7 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -193,7 +193,7 @@ retry: /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(*pte)); pteval = ptep_clear_flush_notify(vma, address, pte); - page_remove_rmap(page, vma); + page_remove_rmap(page); dec_mm_counter(mm, file_rss); BUG_ON(pte_dirty(pteval)); pte_unmap_unlock(pte, ptl); diff --git a/mm/fremap.c b/mm/fremap.c index 7d12ca70ef7b..62d5bbda921a 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, if (page) { if (pte_dirty(pte)) set_page_dirty(page); - page_remove_rmap(page, vma); + page_remove_rmap(page); page_cache_release(page); update_hiwater_rss(mm); dec_mm_counter(mm, file_rss); diff --git a/mm/memory.c b/mm/memory.c index b273cc12b15d..0f9abbaf618c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -798,7 +798,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, mark_page_accessed(page); file_rss--; } - page_remove_rmap(page, vma); + page_remove_rmap(page); if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); tlb_remove_page(tlb, page); @@ -2023,7 +2023,7 @@ gotten: * mapcount is visible. So transitively, TLBs to * old page will be flushed before it can be reused. */ - page_remove_rmap(old_page, vma); + page_remove_rmap(old_page); } /* Free the old page.. */ diff --git a/mm/rmap.c b/mm/rmap.c index 32098255082e..ac4af8cffbf9 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -707,7 +707,6 @@ void page_add_file_rmap(struct page *page) */ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { - BUG_ON(page_mapcount(page) == 0); if (PageAnon(page)) __page_check_anon_rmap(page, vma, address); atomic_inc(&page->_mapcount); @@ -717,11 +716,10 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long /** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from - * @vma: the vm area in which the mapping is removed * * The caller needs to hold the pte lock. */ -void page_remove_rmap(struct page *page, struct vm_area_struct *vma) +void page_remove_rmap(struct page *page) { if (atomic_add_negative(-1, &page->_mapcount)) { /* @@ -837,7 +835,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, dec_mm_counter(mm, file_rss); - page_remove_rmap(page, vma); + page_remove_rmap(page); page_cache_release(page); out_unmap: @@ -952,7 +950,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, if (pte_dirty(pteval)) set_page_dirty(page); - page_remove_rmap(page, vma); + page_remove_rmap(page); page_cache_release(page); dec_mm_counter(mm, file_rss); (*mapcount)--; -- cgit v1.2.2 From d936cf9b39b06c8d2e0d7fb5e7b4f176e18dec69 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:40:12 -0800 Subject: badpage: ratelimit print_bad_pte and bad_page print_bad_pte() and bad_page() might each need ratelimiting - especially for their dump_stacks, almost never of interest, yet not quite dispensible. Correlating corruption across neighbouring entries can be very helpful, so allow a burst of 60 reports before keeping quiet for the remainder of that minute (or allow a steady drip of one report per second). Signed-off-by: Hugh Dickins Cc: Nick Piggin Cc: Christoph Lameter Cc: Mel Gorman Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 23 +++++++++++++++++++++++ mm/page_alloc.c | 26 +++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 0f9abbaf618c..b12888c1b4e3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -383,6 +383,29 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd = pmd_offset(pud, addr); struct address_space *mapping; pgoff_t index; + static unsigned long resume; + static unsigned long nr_shown; + static unsigned long nr_unshown; + + /* + * Allow a burst of 60 reports, then keep quiet for that minute; + * or allow a steady drip of one report per second. + */ + if (nr_shown == 60) { + if (time_before(jiffies, resume)) { + nr_unshown++; + return; + } + if (nr_unshown) { + printk(KERN_EMERG + "Bad page map: %lu messages suppressed\n", + nr_unshown); + nr_unshown = 0; + } + nr_shown = 0; + } + if (nr_shown++ == 0) + resume = jiffies + 60 * HZ; mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; index = linear_page_index(vma, addr); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 755c99a0ac71..d1a80f652c6f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -223,6 +223,30 @@ static inline int bad_range(struct zone *zone, struct page *page) static void bad_page(struct page *page) { + static unsigned long resume; + static unsigned long nr_shown; + static unsigned long nr_unshown; + + /* + * Allow a burst of 60 reports, then keep quiet for that minute; + * or allow a steady drip of one report per second. + */ + if (nr_shown == 60) { + if (time_before(jiffies, resume)) { + nr_unshown++; + goto out; + } + if (nr_unshown) { + printk(KERN_EMERG + "Bad page state: %lu messages suppressed\n", + nr_unshown); + nr_unshown = 0; + } + nr_shown = 0; + } + if (nr_shown++ == 0) + resume = jiffies + 60 * HZ; + printk(KERN_EMERG "Bad page state in process %s pfn:%05lx\n", current->comm, page_to_pfn(page)); printk(KERN_EMERG @@ -232,7 +256,7 @@ static void bad_page(struct page *page) printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"); dump_stack(); - +out: /* Leave bad fields for debug, except PageBuddy could make trouble */ __ClearPageBuddy(page); add_taint(TAINT_BAD_PAGE); -- cgit v1.2.2 From 1e9e63650d6cb88e6d6d2ca6cc3ee276c26de4a3 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Jan 2009 14:40:13 -0800 Subject: badpage: KERN_ALERT BUG instead of KERN_EMERG bad_page() and rmap Eeek messages have said KERN_EMERG for a few years, which I've followed in print_bad_pte(). These are serious system errors, on a par with BUGs, but they're not quite emergencies, and we do our best to carry on: say KERN_ALERT "BUG: " like the x86 oops does. And remove the "Trying to fix it up, but a reboot is needed" line: it's not untrue, but I hope the KERN_ALERT "BUG: " conveys as much. Signed-off-by: Hugh Dickins Cc: Nick Piggin Cc: Christoph Lameter Cc: Mel Gorman Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 15 ++++++++------- mm/page_alloc.c | 9 ++++----- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index b12888c1b4e3..db68af8e0bc4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -397,8 +397,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, return; } if (nr_unshown) { - printk(KERN_EMERG - "Bad page map: %lu messages suppressed\n", + printk(KERN_ALERT + "BUG: Bad page map: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } @@ -410,26 +410,27 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; index = linear_page_index(vma, addr); - printk(KERN_EMERG "Bad page map in process %s pte:%08llx pmd:%08llx\n", + printk(KERN_ALERT + "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd)); if (page) { - printk(KERN_EMERG + printk(KERN_ALERT "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n", page, (void *)page->flags, page_count(page), page_mapcount(page), page->mapping, page->index); } - printk(KERN_EMERG + printk(KERN_ALERT "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); /* * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y */ if (vma->vm_ops) - print_symbol(KERN_EMERG "vma->vm_ops->fault: %s\n", + print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n", (unsigned long)vma->vm_ops->fault); if (vma->vm_file && vma->vm_file->f_op) - print_symbol(KERN_EMERG "vma->vm_file->f_op->mmap: %s\n", + print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n", (unsigned long)vma->vm_file->f_op->mmap); dump_stack(); add_taint(TAINT_BAD_PAGE); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d1a80f652c6f..d531e8ef9984 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -237,8 +237,8 @@ static void bad_page(struct page *page) goto out; } if (nr_unshown) { - printk(KERN_EMERG - "Bad page state: %lu messages suppressed\n", + printk(KERN_ALERT + "BUG: Bad page state: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } @@ -247,13 +247,12 @@ static void bad_page(struct page *page) if (nr_shown++ == 0) resume = jiffies + 60 * HZ; - printk(KERN_EMERG "Bad page state in process %s pfn:%05lx\n", + printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", current->comm, page_to_pfn(page)); - printk(KERN_EMERG + printk(KERN_ALERT "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n", page, (void *)page->flags, page_count(page), page_mapcount(page), page->mapping, page->index); - printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"); dump_stack(); out: -- cgit v1.2.2 From b555749aac87d7c2637f153e44bd77c7fdf4c65b Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 6 Jan 2009 14:40:13 -0800 Subject: vmscan: shrink_active_list(): reduce lru_lock hold time These three statements manipulate local variables and do not need the lock coverage. Cc: Johannes Weiner Cc: Lee Schermerhorn Cc: Rik van Riel Signed-off-by: Linus Torvalds --- mm/vmscan.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 466a36b3bada..5daf606e0a35 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1237,6 +1237,13 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, list_add(&page->lru, &l_inactive); } + /* + * Move the pages to the [file or anon] inactive list. + */ + pagevec_init(&pvec, 1); + pgmoved = 0; + lru = LRU_BASE + file * LRU_FILE; + spin_lock_irq(&zone->lru_lock); /* * Count referenced pages from currently used mappings as @@ -1247,13 +1254,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, if (scan_global_lru(sc)) zone->recent_rotated[!!file] += pgmoved; - /* - * Move the pages to the [file or anon] inactive list. - */ - pagevec_init(&pvec, 1); - - pgmoved = 0; - lru = LRU_BASE + file * LRU_FILE; while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); prefetchw_prev_lru_page(page, &l_inactive, flags); -- cgit v1.2.2 From 4779280d1ea4d361af13ae77ba55217fbcd16d4c Mon Sep 17 00:00:00 2001 From: Ying Han Date: Tue, 6 Jan 2009 14:40:18 -0800 Subject: mm: make get_user_pages() interruptible The initial implementation of checking TIF_MEMDIE covers the cases of OOM killing. If the process has been OOM killed, the TIF_MEMDIE is set and it return immediately. This patch includes: 1. add the case that the SIGKILL is sent by user processes. The process can try to get_user_pages() unlimited memory even if a user process has sent a SIGKILL to it(maybe a monitor find the process exceed its memory limit and try to kill it). In the old implementation, the SIGKILL won't be handled until the get_user_pages() returns. 2. change the return value to be ERESTARTSYS. It makes no sense to return ENOMEM if the get_user_pages returned by getting a SIGKILL signal. Considering the general convention for a system call interrupted by a signal is ERESTARTNOSYS, so the current return value is consistant to that. Lee: An unfortunate side effect of "make-get_user_pages-interruptible" is that it prevents a SIGKILL'd task from munlock-ing pages that it had mlocked, resulting in freeing of mlocked pages. Freeing of mlocked pages, in itself, is not so bad. We just count them now--altho' I had hoped to remove this stat and add PG_MLOCKED to the free pages flags check. However, consider pages in shared libraries mapped by more than one task that a task mlocked--e.g., via mlockall(). If the task that mlocked the pages exits via SIGKILL, these pages would be left mlocked and unevictable. Proposed fix: Add another GUP flag to ignore sigkill when calling get_user_pages from munlock()--similar to Kosaki Motohiro's 'IGNORE_VMA_PERMISSIONS flag for the same purpose. We are not actually allocating memory in this case, which "make-get_user_pages-interruptible" intends to avoid. We're just munlocking pages that are already resident and mapped, and we're reusing get_user_pages() to access those pages. ?? Maybe we should combine 'IGNORE_VMA_PERMISSIONS and '_IGNORE_SIGKILL into a single flag: GUP_FLAGS_MUNLOCK ??? [Lee.Schermerhorn@hp.com: ignore sigkill in get_user_pages during munlock] Signed-off-by: Paul Menage Signed-off-by: Ying Han Reviewed-by: KOSAKI Motohiro Reviewed-by: Pekka Enberg Cc: Nick Piggin Cc: Hugh Dickins Cc: Oleg Nesterov Cc: Lee Schermerhorn Cc: Rohit Seth Cc: David Rientjes Signed-off-by: Lee Schermerhorn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/internal.h | 1 + mm/memory.c | 14 +++++++++----- mm/mlock.c | 9 +++++---- 3 files changed, 15 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index 1981bc9454f3..478223b73a2a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -276,6 +276,7 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, #define GUP_FLAGS_WRITE 0x1 #define GUP_FLAGS_FORCE 0x2 #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4 +#define GUP_FLAGS_IGNORE_SIGKILL 0x8 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int flags, diff --git a/mm/memory.c b/mm/memory.c index db68af8e0bc4..3f8fa06b963b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1210,6 +1210,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int write = !!(flags & GUP_FLAGS_WRITE); int force = !!(flags & GUP_FLAGS_FORCE); int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); + int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL); if (len <= 0) return 0; @@ -1288,12 +1289,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, struct page *page; /* - * If tsk is ooming, cut off its access to large memory - * allocations. It has a pending SIGKILL, but it can't - * be processed until returning to user space. + * If we have a pending SIGKILL, don't keep faulting + * pages and potentially allocating memory, unless + * current is handling munlock--e.g., on exit. In + * that case, we are not allocating memory. Rather, + * we're only unlocking already resident/mapped pages. */ - if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE))) - return i ? i : -ENOMEM; + if (unlikely(!ignore_sigkill && + fatal_signal_pending(current))) + return i ? i : -ERESTARTSYS; if (write) foll_flags |= FOLL_WRITE; diff --git a/mm/mlock.c b/mm/mlock.c index 3035a56e7616..e125156c664e 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -173,12 +173,13 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, (atomic_read(&mm->mm_users) != 0)); /* - * mlock: don't page populate if page has PROT_NONE permission. - * munlock: the pages always do munlock althrough - * its has PROT_NONE permission. + * mlock: don't page populate if vma has PROT_NONE permission. + * munlock: always do munlock although the vma has PROT_NONE + * permission, or SIGKILL is pending. */ if (!mlock) - gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS; + gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS | + GUP_FLAGS_IGNORE_SIGKILL; if (vma->vm_flags & VM_WRITE) gup_flags |= GUP_FLAGS_WRITE; -- cgit v1.2.2 From 853ac43ab194f5051b27a55060215d696dc9480d Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Tue, 6 Jan 2009 14:40:20 -0800 Subject: shmem: unify regular and tiny shmem tiny-shmem shares most of its 130 lines of code with shmem and tends to break when particular bits of shmem get modified. Unifying saves code and makes keeping these two in sync much easier. before: 14367 392 24 14783 39bf mm/shmem.o 396 72 8 476 1dc mm/tiny-shmem.o after: 14367 392 24 14783 39bf mm/shmem.o 412 72 8 492 1ec mm/shmem.o tiny Signed-off-by: Matt Mackall Acked-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/Makefile | 4 +- mm/shmem.c | 81 +++++++++++++++++++++++++++++----- mm/tiny-shmem.c | 134 -------------------------------------------------------- 3 files changed, 72 insertions(+), 147 deletions(-) delete mode 100644 mm/tiny-shmem.c (limited to 'mm') diff --git a/mm/Makefile b/mm/Makefile index 51c27709cc7c..72255be57f89 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -9,7 +9,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ maccess.o page_alloc.o page-writeback.o pdflush.o \ - readahead.o swap.o truncate.o vmscan.o \ + readahead.o swap.o truncate.o vmscan.o shmem.o \ prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ page_isolation.o mm_init.o $(mmu-y) @@ -21,9 +21,7 @@ obj-$(CONFIG_HUGETLBFS) += hugetlb.o obj-$(CONFIG_NUMA) += mempolicy.o obj-$(CONFIG_SPARSEMEM) += sparse.o obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o -obj-$(CONFIG_SHMEM) += shmem.o obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o -obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o obj-$(CONFIG_SLAB) += slab.o diff --git a/mm/shmem.c b/mm/shmem.c index 24f18fdee6e3..5941f9801363 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -14,31 +14,39 @@ * Copyright (c) 2004, Luke Kenneth Casson Leighton * Copyright (c) 2004 Red Hat, Inc., James Morris * + * tiny-shmem: + * Copyright (c) 2004, 2008 Matt Mackall + * * This file is released under the GPL. */ +#include +#include +#include +#include +#include +#include +#include +#include + +static struct vfsmount *shm_mnt; + +#ifdef CONFIG_SHMEM /* * This virtual memory filesystem is heavily based on the ramfs. It * extends ramfs by the ability to use swap and honor resource limits * which makes it a completely usable filesystem. */ -#include -#include -#include #include #include #include -#include #include -#include -#include #include #include #include #include #include -#include #include #include #include @@ -2485,7 +2493,6 @@ static struct file_system_type tmpfs_fs_type = { .get_sb = shmem_get_sb, .kill_sb = kill_litter_super, }; -static struct vfsmount *shm_mnt; static int __init init_tmpfs(void) { @@ -2524,7 +2531,51 @@ out4: shm_mnt = ERR_PTR(error); return error; } -module_init(init_tmpfs) + +#else /* !CONFIG_SHMEM */ + +/* + * tiny-shmem: simple shmemfs and tmpfs using ramfs code + * + * This is intended for small system where the benefits of the full + * shmem code (swap-backed and resource-limited) are outweighed by + * their complexity. On systems without swap this code should be + * effectively equivalent, but much lighter weight. + */ + +#include + +static struct file_system_type tmpfs_fs_type = { + .name = "tmpfs", + .get_sb = ramfs_get_sb, + .kill_sb = kill_litter_super, +}; + +static int __init init_tmpfs(void) +{ + BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); + + shm_mnt = kern_mount(&tmpfs_fs_type); + BUG_ON(IS_ERR(shm_mnt)); + + return 0; +} + +int shmem_unuse(swp_entry_t entry, struct page *page) +{ + return 0; +} + +#define shmem_file_operations ramfs_file_operations +#define shmem_vm_ops generic_file_vm_ops +#define shmem_get_inode ramfs_get_inode +#define shmem_acct_size(a, b) 0 +#define shmem_unacct_size(a, b) do {} while (0) +#define SHMEM_MAX_BYTES LLONG_MAX + +#endif /* CONFIG_SHMEM */ + +/* common code */ /** * shmem_file_setup - get an unlinked file living in tmpfs @@ -2568,12 +2619,20 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) if (!inode) goto close_file; +#ifdef CONFIG_SHMEM SHMEM_I(inode)->flags = flags & VM_ACCOUNT; +#endif d_instantiate(dentry, inode); inode->i_size = size; inode->i_nlink = 0; /* It is unlinked */ init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, - &shmem_file_operations); + &shmem_file_operations); + +#ifndef CONFIG_MMU + error = ramfs_nommu_expand_for_mapping(inode, size); + if (error) + goto close_file; +#endif return file; close_file: @@ -2605,3 +2664,5 @@ int shmem_zero_setup(struct vm_area_struct *vma) vma->vm_ops = &shmem_vm_ops; return 0; } + +module_init(init_tmpfs) diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c deleted file mode 100644 index 3e67d575ee6e..000000000000 --- a/mm/tiny-shmem.c +++ /dev/null @@ -1,134 +0,0 @@ -/* - * tiny-shmem.c: simple shmemfs and tmpfs using ramfs code - * - * Matt Mackall January, 2004 - * derived from mm/shmem.c and fs/ramfs/inode.c - * - * This is intended for small system where the benefits of the full - * shmem code (swap-backed and resource-limited) are outweighed by - * their complexity. On systems without swap this code should be - * effectively equivalent, but much lighter weight. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static struct file_system_type tmpfs_fs_type = { - .name = "tmpfs", - .get_sb = ramfs_get_sb, - .kill_sb = kill_litter_super, -}; - -static struct vfsmount *shm_mnt; - -static int __init init_tmpfs(void) -{ - BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); - - shm_mnt = kern_mount(&tmpfs_fs_type); - BUG_ON(IS_ERR(shm_mnt)); - - return 0; -} -module_init(init_tmpfs) - -/** - * shmem_file_setup - get an unlinked file living in tmpfs - * @name: name for dentry (to be seen in /proc//maps - * @size: size to be set for the file - * @flags: vm_flags - */ -struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) -{ - int error; - struct file *file; - struct inode *inode; - struct dentry *dentry, *root; - struct qstr this; - - if (IS_ERR(shm_mnt)) - return (void *)shm_mnt; - - error = -ENOMEM; - this.name = name; - this.len = strlen(name); - this.hash = 0; /* will go */ - root = shm_mnt->mnt_root; - dentry = d_alloc(root, &this); - if (!dentry) - goto put_memory; - - error = -ENFILE; - file = get_empty_filp(); - if (!file) - goto put_dentry; - - error = -ENOSPC; - inode = ramfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); - if (!inode) - goto close_file; - - d_instantiate(dentry, inode); - inode->i_size = size; - inode->i_nlink = 0; /* It is unlinked */ - init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, - &ramfs_file_operations); - -#ifndef CONFIG_MMU - error = ramfs_nommu_expand_for_mapping(inode, size); - if (error) - goto close_file; -#endif - return file; - -close_file: - put_filp(file); -put_dentry: - dput(dentry); -put_memory: - return ERR_PTR(error); -} -EXPORT_SYMBOL_GPL(shmem_file_setup); - -/** - * shmem_zero_setup - setup a shared anonymous mapping - * @vma: the vma to be mmapped is prepared by do_mmap_pgoff - */ -int shmem_zero_setup(struct vm_area_struct *vma) -{ - struct file *file; - loff_t size = vma->vm_end - vma->vm_start; - - file = shmem_file_setup("dev/zero", size, vma->vm_flags); - if (IS_ERR(file)) - return PTR_ERR(file); - - if (vma->vm_file) - fput(vma->vm_file); - vma->vm_file = file; - vma->vm_ops = &generic_file_vm_ops; - return 0; -} - -int shmem_unuse(swp_entry_t entry, struct page *page) -{ - return 0; -} - -#ifndef CONFIG_MMU -unsigned long shmem_get_unmapped_area(struct file *file, - unsigned long addr, - unsigned long len, - unsigned long pgoff, - unsigned long flags) -{ - return ramfs_nommu_get_unmapped_area(file, addr, len, pgoff, flags); -} -#endif -- cgit v1.2.2 From 48aae42556e5ea1ba0d8ddab25352706577af2ed Mon Sep 17 00:00:00 2001 From: ZhenwenXu Date: Tue, 6 Jan 2009 14:40:21 -0800 Subject: mm/mmap.c: fix coding style Fix a little of the coding style in mm/mmap.c [akpm@linux-foundation.org: cleanup] Signed-off-by: ZhenwenXu Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 2c778fcfd9bd..e4507b23e620 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -413,7 +413,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, static void __vma_link_file(struct vm_area_struct *vma) { - struct file * file; + struct file *file; file = vma->vm_file; if (file) { @@ -474,11 +474,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, * insert vm structure into list and rbtree and anon_vma, * but it has already been inserted into prio_tree earlier. */ -static void -__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) +static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { - struct vm_area_struct * __vma, * prev; - struct rb_node ** rb_link, * rb_parent; + struct vm_area_struct *__vma, *prev; + struct rb_node **rb_link, *rb_parent; __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); BUG_ON(__vma && __vma->vm_start < vma->vm_end); @@ -908,7 +907,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags, * The caller must hold down_write(current->mm->mmap_sem). */ -unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, +unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff) { @@ -1464,7 +1463,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, EXPORT_SYMBOL(get_unmapped_area); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ -struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr) +struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma = NULL; @@ -1507,7 +1506,7 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { struct vm_area_struct *vma = NULL, *prev = NULL; - struct rb_node * rb_node; + struct rb_node *rb_node; if (!mm) goto out; @@ -1541,7 +1540,7 @@ out: * update accounting. This is shared with both the * grow-up and grow-down cases. */ -static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow) +static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; -- cgit v1.2.2 From 48b47c561e41525061b5bc0cfd67d6367fd11dc4 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:40:22 -0800 Subject: mm: direct IO starvation improvement Direct IO can invalidate and sync a lot of pagecache pages in the mapping. A 4K direct IO will actually try to sync and/or invalidate the pagecache of the entire file, for example (which might be many GB or TB large). Improve this by doing range syncs. Also, memory no longer has to be unmapped to catch the dirty bits for syncing, as dirty bits would remain coherent due to dirty mmap accounting. This fixes the immediate DM deadlocks when doing direct IO reads to block device with a mounted filesystem, if only by papering over the problem somewhat rather than addressing the fsync starvation cases. Signed-off-by: Nick Piggin Reviewed-by: Jeff Moyer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index 9c5e6235cc74..f3555fb806d3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1317,7 +1317,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, goto out; /* skip atime */ size = i_size_read(inode); if (pos < size) { - retval = filemap_write_and_wait(mapping); + retval = filemap_write_and_wait_range(mapping, pos, + pos + iov_length(iov, nr_segs) - 1); if (!retval) { retval = mapping->a_ops->direct_IO(READ, iocb, iov, pos, nr_segs); @@ -2059,18 +2060,10 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, if (count != ocount) *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); - /* - * Unmap all mmappings of the file up-front. - * - * This will cause any pte dirty bits to be propagated into the - * pageframes for the subsequent filemap_write_and_wait(). - */ write_len = iov_length(iov, *nr_segs); end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; - if (mapping_mapped(mapping)) - unmap_mapping_range(mapping, pos, write_len, 0); - written = filemap_write_and_wait(mapping); + written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); if (written) goto out; @@ -2290,7 +2283,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, * the file data here, to try to honour O_DIRECT expectations. */ if (unlikely(file->f_flags & O_DIRECT) && written) - status = filemap_write_and_wait(mapping); + status = filemap_write_and_wait_range(mapping, + pos, pos + written - 1); return written ? written : status; } -- cgit v1.2.2 From 67d58ac47d25f7e2a105248a4aea6113131ab874 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:40:28 -0800 Subject: mm: pagecache gfp flags fix Frustratingly, gfp_t is really divided into two classes of flags. One are the context dependent ones (can we sleep? can we enter filesystem? block subsystem? should we use some extra reserves, etc.). The other ones are the type of memory required and depend on how the algorithm is implemented rather than the point at which the memory is allocated (highmem? dma memory? etc). Some of the functions which allocate a page and add it to page cache take a gfp_t, but sometimes those functions or their callers aren't really doing the right thing: when allocating pagecache page, the memory type should be mapping_gfp_mask(mapping). When allocating radix tree nodes, the memory type should be kernel mapped (not highmem) memory. The gfp_t argument should only really be needed for context dependent options. This patch doesn't really solve that tangle in a nice way, but it does attempt to fix a couple of bugs. - find_or_create_page changes its radix-tree allocation to only include the main context dependent flags in order so the pagecache page may be allocated from arbitrary types of memory without affecting the radix-tree. In practice, slab allocations don't come from highmem anyway, and radix-tree only uses slab allocations. So there isn't a practical change (unless some fs uses GFP_DMA for pages). - grab_cache_page_nowait() is changed to allocate radix-tree nodes with GFP_NOFS, because it is not supposed to reenter the filesystem. This bug could cause lock recursion if a filesystem is not expecting the function to reenter the fs (as-per documentation). Filesystems should be careful about exactly what semantics they want and what they get when fiddling with gfp_t masks to allocate pagecache. One should be as liberal as possible with the type of memory that can be used, and same for the the context specific flags. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index f3555fb806d3..2f55a1e2baf7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -741,7 +741,14 @@ repeat: page = __page_cache_alloc(gfp_mask); if (!page) return NULL; - err = add_to_page_cache_lru(page, mapping, index, gfp_mask); + /* + * We want a regular kernel memory (not highmem or DMA etc) + * allocation for the radix tree nodes, but we need to honour + * the context-specific requirements the caller has asked for. + * GFP_RECLAIM_MASK collects those requirements. + */ + err = add_to_page_cache_lru(page, mapping, index, + (gfp_mask & GFP_RECLAIM_MASK)); if (unlikely(err)) { page_cache_release(page); page = NULL; @@ -950,7 +957,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) return NULL; } page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); - if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) { + if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) { page_cache_release(page); page = NULL; } -- cgit v1.2.2 From 901608d9045146aec6f14a7777ea4b1501c379f0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 6 Jan 2009 14:40:29 -0800 Subject: mm: introduce get_mm_hiwater_xxx(), fix taskstats->hiwater_xxx accounting xacct_add_tsk() relies on do_exit()->update_hiwater_xxx() and uses mm->hiwater_xxx directly, this leads to 2 problems: - taskstats_user_cmd() can call fill_pid()->xacct_add_tsk() at any moment before the task exits, so we should check the current values of rss/vm anyway. - do_exit()->update_hiwater_xxx() calls are racy. An exiting thread can be preempted right before mm->hiwater_xxx = new_val, and another thread can use A_LOT of memory and exit in between. When the first thread resumes it can be the last thread in the thread group, in that case we report the wrong hiwater_xxx values which do not take A_LOT into account. Introduce get_mm_hiwater_rss() and get_mm_hiwater_vm() helpers and change xacct_add_tsk() to use them. The first helper will also be used by rusage->ru_maxrss accounting. Kill do_exit()->update_hiwater_xxx() calls. Unless we are going to decrease rss/vm there is no point to update mm->hiwater_xxx, and nobody can look at this mm_struct when exit_mmap() actually unmaps the memory. Signed-off-by: Oleg Nesterov Acked-by: Hugh Dickins Reviewed-by: KOSAKI Motohiro Acked-by: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index e4507b23e620..1f97d8aa9b05 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2102,7 +2102,7 @@ void exit_mmap(struct mm_struct *mm) lru_add_drain(); flush_cache_mm(mm); tlb = tlb_gather_mmu(mm, 1); - /* Don't update_hiwater_rss(mm) here, do_exit already did */ + /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); -- cgit v1.2.2 From 9f572e3f96b8a2ef70dcb881e64c7b9c10057d98 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:40:29 -0800 Subject: mm: remove CONFIG_OUT_OF_LINE_PFN_TO_PAGE No architectures use CONFIG_OUT_OF_LINE_PFN_TO_PAGE - it can be removed. Signed-off-by: KOSAKI Motohiro Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d531e8ef9984..7bf22e045318 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4587,19 +4587,6 @@ void *__init alloc_large_system_hash(const char *tablename, return table; } -#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE -struct page *pfn_to_page(unsigned long pfn) -{ - return __pfn_to_page(pfn); -} -unsigned long page_to_pfn(struct page *page) -{ - return __page_to_pfn(page); -} -EXPORT_SYMBOL(pfn_to_page); -EXPORT_SYMBOL(page_to_pfn); -#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ - /* Return a pointer to the bitmap storing bits affecting a block of pages */ static inline unsigned long *get_pageblock_bitmap(struct zone *zone, unsigned long pfn) -- cgit v1.2.2 From 084f71ae5ceeb16734d8ac47559d3c718456a865 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:40:30 -0800 Subject: mm: kill page_queue_congested() page_queue_congested() was introduced in 2002, but it was never used Signed-off-by: KOSAKI Motohiro Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index f28745855772..eec5ca758a23 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1372,26 +1372,6 @@ out: return ret; } -#if 0 /* We don't need this yet */ -#include -int page_queue_congested(struct page *page) -{ - struct backing_dev_info *bdi; - - VM_BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ - - if (PageSwapCache(page)) { - swp_entry_t entry = { .val = page_private(page) }; - struct swap_info_struct *sis; - - sis = get_swap_info_struct(swp_type(entry)); - bdi = sis->bdev->bd_inode->i_mapping->backing_dev_info; - } else - bdi = page->mapping->backing_dev_info; - return bdi_write_congested(bdi); -} -#endif - asmlinkage long sys_swapoff(const char __user * specialfile) { struct swap_info_struct * p = NULL; -- cgit v1.2.2 From dcd4a049b9751828c516c59709f3fdf50436df85 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 6 Jan 2009 14:40:31 -0800 Subject: mm: check for no mmaps in exit_mmap() When dup_mmap() ooms we can end up with mm->mmap == NULL. The error path does mmput() and unmap_vmas() gets a NULL vma which it dereferences. In exit_mmap() there is nothing to do at all for this case, we can cancel the callpath right there. [akpm@linux-foundation.org: add sorely-needed comment] Signed-off-by: Johannes Weiner Reported-by: Akinobu Mita Cc: Nick Piggin Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 1f97d8aa9b05..a910c045cfd4 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2090,6 +2090,9 @@ void exit_mmap(struct mm_struct *mm) arch_exit_mmap(mm); mmu_notifier_release(mm); + if (!mm->mmap) /* Can happen if dup_mmap() received an OOM */ + return; + if (mm->locked_vm) { vma = mm->mmap; while (vma) { -- cgit v1.2.2 From 594fe1a044325bb0a1a49ca7d086e3df4f1df59a Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 6 Jan 2009 14:40:32 -0800 Subject: bootmem: print request details before BUG_ON(them) Moving the request details print-out before the sanity checks that might panic() enables us to analyse invalid requests without having access to the line information of the stack dump. Signed-off-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/bootmem.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/bootmem.c b/mm/bootmem.c index ac5a891f142a..51a0ccf61e0e 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -435,6 +435,10 @@ static void * __init alloc_bootmem_core(struct bootmem_data *bdata, unsigned long fallback = 0; unsigned long min, max, start, sidx, midx, step; + bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n", + bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT, + align, goal, limit); + BUG_ON(!size); BUG_ON(align & (align - 1)); BUG_ON(limit && goal + size > limit); @@ -442,10 +446,6 @@ static void * __init alloc_bootmem_core(struct bootmem_data *bdata, if (!bdata->node_bootmem_map) return NULL; - bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n", - bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT, - align, goal, limit); - min = bdata->node_min_pfn; max = bdata->node_low_pfn; -- cgit v1.2.2 From 73ce02e96fe34a983199a9855b2ae738f960a6ee Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 6 Jan 2009 14:40:33 -0800 Subject: mm: stop kswapd's infinite loop at high order allocation Wassim Dagash reported following kswapd infinite loop problem. kswapd runs in some infinite loop trying to swap until order 10 of zone highmem is OK.... kswapd will continue to try to balance order 10 of zone highmem forever (or until someone release a very large chunk of highmem). For non order-0 allocations, the system may never be balanced due to fragmentation but kswapd should not infinitely loop as a result. Instead, recheck all watermarks at order-0 as they are the most important. If watermarks are ok, kswapd will go back to sleep. [akpm@linux-foundation.org: fix comment] Reported-by: wassim dagash Signed-off-by: KOSAKI Motohiro Reviewed-by: Nick Piggin Signed-off-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 5daf606e0a35..b07c48b09a93 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1867,6 +1867,23 @@ out: try_to_freeze(); + /* + * Fragmentation may mean that the system cannot be + * rebalanced for high-order allocations in all zones. + * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, + * it means the zones have been fully scanned and are still + * not balanced. For high-order allocations, there is + * little point trying all over again as kswapd may + * infinite loop. + * + * Instead, recheck all watermarks at order-0 as they + * are the most important. If watermarks are ok, kswapd will go + * back to sleep. High-order users can still perform direct + * reclaim if they wish. + */ + if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) + order = sc.order = 0; + goto loop_again; } -- cgit v1.2.2 From 91f47662dfaa5b459aebe13284c6c38db27350dc Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Tue, 6 Jan 2009 14:40:33 -0800 Subject: mm: hugetlb: remove redundant `if' operation At this point we already know that 'addr' is not NULL so get rid of redundant 'if'. Probably gcc eliminate it by optimization pass. [akpm@linux-foundation.org: use __weak, too] Signed-off-by: Cyrill Gorcunov Reviewed-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 82321da23cc3..618e98304080 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1005,7 +1005,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, return page; } -__attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h) +int __weak alloc_bootmem_huge_page(struct hstate *h) { struct huge_bootmem_page *m; int nr_nodes = nodes_weight(node_online_map); @@ -1024,8 +1024,7 @@ __attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h) * puts them into the mem_map). */ m = addr; - if (m) - goto found; + goto found; } hstate_next_node(h); nr_nodes--; -- cgit v1.2.2 From 67faaada1ebcccf29745346f1d7cb5392f46500a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 6 Jan 2009 14:41:12 -0800 Subject: Remove obsolete CONFIG_RESOURCES_64BIT commit 8308c54d7e312f7a03e2ce2057d0837e6fe3843f ("generic: redefine resource_size_t as phys_addr_t") made CONFIG_RESOURCES_64BIT obsolete, but didn't remove it. Remove it. Signed-off-by: Geert Uytterhoeven Cc: Jeremy Fitzhardinge Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/Kconfig | 6 ------ 1 file changed, 6 deletions(-) (limited to 'mm') diff --git a/mm/Kconfig b/mm/Kconfig index 5b5790f8a816..a5b77811fdf2 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -181,12 +181,6 @@ config MIGRATION example on NUMA systems to put pages nearer to the processors accessing the page. -config RESOURCES_64BIT - bool "64 bit Memory and IO resources (EXPERIMENTAL)" if (!64BIT && EXPERIMENTAL) - default 64BIT - help - This option allows memory and IO resources to be 64 bit. - config PHYS_ADDR_T_64BIT def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT -- cgit v1.2.2 From 41836382ebb415d68d3ebc4525e78e871fe58baf Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 8 Jan 2009 12:04:47 +0000 Subject: NOMMU: Delete askedalloc and realalloc variables Delete the askedalloc and realalloc variables as nothing actually uses the value calculated. Signed-off-by: David Howells Tested-by: Mike Frysinger Acked-by: Paul Mundt --- mm/nommu.c | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 1c28ea3a4e9c..23f355bbe262 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -40,7 +40,6 @@ void *high_memory; struct page *mem_map; unsigned long max_mapnr; unsigned long num_physpages; -unsigned long askedalloc, realalloc; atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio = 50; /* default is 50% */ @@ -1042,22 +1041,11 @@ unsigned long do_mmap_pgoff(struct file *file, /* okay... we have a mapping; now we have to register it */ result = (void *) vma->vm_start; - if (vma->vm_flags & VM_MAPPED_COPY) { - realalloc += kobjsize(result); - askedalloc += len; - } - - realalloc += kobjsize(vma); - askedalloc += sizeof(*vma); - current->mm->total_vm += len >> PAGE_SHIFT; add_nommu_vma(vma); shared: - realalloc += kobjsize(vml); - askedalloc += sizeof(*vml); - add_vma_to_mm(current->mm, vml); up_write(&nommu_vma_sem); @@ -1124,14 +1112,8 @@ static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma) /* IO memory and memory shared directly out of the pagecache from * ramfs/tmpfs mustn't be released here */ - if (vma->vm_flags & VM_MAPPED_COPY) { - realalloc -= kobjsize((void *) vma->vm_start); - askedalloc -= vma->vm_end - vma->vm_start; + if (vma->vm_flags & VM_MAPPED_COPY) kfree((void *) vma->vm_start); - } - - realalloc -= kobjsize(vma); - askedalloc -= sizeof(*vma); if (vma->vm_file) { fput(vma->vm_file); @@ -1177,8 +1159,6 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) put_vma(mm, vml->vma); *parent = vml->next; - realalloc -= kobjsize(vml); - askedalloc -= sizeof(*vml); kfree(vml); update_hiwater_vm(mm); @@ -1220,9 +1200,6 @@ void exit_mmap(struct mm_struct * mm) while ((tmp = mm->context.vmlist)) { mm->context.vmlist = tmp->next; put_vma(mm, tmp->vma); - - realalloc -= kobjsize(tmp); - askedalloc -= sizeof(*tmp); kfree(tmp); } @@ -1276,9 +1253,6 @@ unsigned long do_mremap(unsigned long addr, /* all checks complete - do it */ vma->vm_end = vma->vm_start + new_len; - askedalloc -= old_len; - askedalloc += new_len; - return vma->vm_start; } EXPORT_SYMBOL(do_mremap); -- cgit v1.2.2 From 8feae13110d60cc6287afabc2887366b0eb226c2 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 8 Jan 2009 12:04:47 +0000 Subject: NOMMU: Make VMAs per MM as for MMU-mode linux Make VMAs per mm_struct as for MMU-mode linux. This solves two problems: (1) In SYSV SHM where nattch for a segment does not reflect the number of shmat's (and forks) done. (2) In mmap() where the VMA's vm_mm is set to point to the parent mm by an exec'ing process when VM_EXECUTABLE is specified, regardless of the fact that a VMA might be shared and already have its vm_mm assigned to another process or a dead process. A new struct (vm_region) is introduced to track a mapped region and to remember the circumstances under which it may be shared and the vm_list_struct structure is discarded as it's no longer required. This patch makes the following additional changes: (1) Regions are now allocated with alloc_pages() rather than kmalloc() and with no recourse to __GFP_COMP, so the pages are not composite. Instead, each page has a reference on it held by the region. Anything else that is interested in such a page will have to get a reference on it to retain it. When the pages are released due to unmapping, each page is passed to put_page() and will be freed when the page usage count reaches zero. (2) Excess pages are trimmed after an allocation as the allocation must be made as a power-of-2 quantity of pages. (3) VMAs are added to the parent MM's R/B tree and mmap lists. As an MM may end up with overlapping VMAs within the tree, the VMA struct address is appended to the sort key. (4) Non-anonymous VMAs are now added to the backing inode's prio list. (5) Holes may be punched in anonymous VMAs with munmap(), releasing parts of the backing region. The VMA and region structs will be split if necessary. (6) sys_shmdt() only releases one attachment to a SYSV IPC shared memory segment instead of all the attachments at that addresss. Multiple shmat()'s return the same address under NOMMU-mode instead of different virtual addresses as under MMU-mode. (7) Core dumping for ELF-FDPIC requires fewer exceptions for NOMMU-mode. (8) /proc/maps is now the global list of mapped regions, and may list bits that aren't actually mapped anywhere. (9) /proc/meminfo gains a line (tagged "MmapCopy") that indicates the amount of RAM currently allocated by mmap to hold mappable regions that can't be mapped directly. These are copies of the backing device or file if not anonymous. These changes make NOMMU mode more similar to MMU mode. The downside is that NOMMU mode requires some extra memory to track things over NOMMU without this patch (VMAs are no longer shared, and there are now region structs). Signed-off-by: David Howells Tested-by: Mike Frysinger Acked-by: Paul Mundt --- mm/mmap.c | 10 + mm/nommu.c | 960 ++++++++++++++++++++++++++++++++++++++++++------------------- 2 files changed, 674 insertions(+), 296 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index a910c045cfd4..749623196cb9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2472,3 +2472,13 @@ void mm_drop_all_locks(struct mm_struct *mm) mutex_unlock(&mm_all_locks_mutex); } + +/* + * initialise the VMA slab + */ +void __init mmap_init(void) +{ + vm_area_cachep = kmem_cache_create("vm_area_struct", + sizeof(struct vm_area_struct), 0, + SLAB_PANIC, NULL); +} diff --git a/mm/nommu.c b/mm/nommu.c index 23f355bbe262..0d363dfcf10e 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -6,7 +6,7 @@ * * See Documentation/nommu-mmap.txt * - * Copyright (c) 2004-2005 David Howells + * Copyright (c) 2004-2008 David Howells * Copyright (c) 2000-2003 David McCullough * Copyright (c) 2000-2001 D Jeff Dionne * Copyright (c) 2002 Greg Ungerer @@ -33,6 +33,28 @@ #include #include #include +#include "internal.h" + +static inline __attribute__((format(printf, 1, 2))) +void no_printk(const char *fmt, ...) +{ +} + +#if 0 +#define kenter(FMT, ...) \ + printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) +#define kleave(FMT, ...) \ + printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) +#define kdebug(FMT, ...) \ + printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) +#else +#define kenter(FMT, ...) \ + no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) +#define kleave(FMT, ...) \ + no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) +#define kdebug(FMT, ...) \ + no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) +#endif #include "internal.h" @@ -46,12 +68,15 @@ int sysctl_overcommit_ratio = 50; /* default is 50% */ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; int heap_stack_gap = 0; +atomic_t mmap_pages_allocated; + EXPORT_SYMBOL(mem_map); EXPORT_SYMBOL(num_physpages); -/* list of shareable VMAs */ -struct rb_root nommu_vma_tree = RB_ROOT; -DECLARE_RWSEM(nommu_vma_sem); +/* list of mapped, potentially shareable regions */ +static struct kmem_cache *vm_region_jar; +struct rb_root nommu_region_tree = RB_ROOT; +DECLARE_RWSEM(nommu_region_sem); struct vm_operations_struct generic_file_vm_ops = { }; @@ -400,129 +425,174 @@ asmlinkage unsigned long sys_brk(unsigned long brk) return mm->brk = brk; } -#ifdef DEBUG -static void show_process_blocks(void) +/* + * initialise the VMA and region record slabs + */ +void __init mmap_init(void) { - struct vm_list_struct *vml; - - printk("Process blocks %d:", current->pid); - - for (vml = ¤t->mm->context.vmlist; vml; vml = vml->next) { - printk(" %p: %p", vml, vml->vma); - if (vml->vma) - printk(" (%d @%lx #%d)", - kobjsize((void *) vml->vma->vm_start), - vml->vma->vm_start, - atomic_read(&vml->vma->vm_usage)); - printk(vml->next ? " ->" : ".\n"); - } + vm_region_jar = kmem_cache_create("vm_region_jar", + sizeof(struct vm_region), 0, + SLAB_PANIC, NULL); + vm_area_cachep = kmem_cache_create("vm_area_struct", + sizeof(struct vm_area_struct), 0, + SLAB_PANIC, NULL); } -#endif /* DEBUG */ /* - * add a VMA into a process's mm_struct in the appropriate place in the list - * - should be called with mm->mmap_sem held writelocked + * validate the region tree + * - the caller must hold the region lock */ -static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml) +#ifdef CONFIG_DEBUG_NOMMU_REGIONS +static noinline void validate_nommu_regions(void) { - struct vm_list_struct **ppv; + struct vm_region *region, *last; + struct rb_node *p, *lastp; - for (ppv = ¤t->mm->context.vmlist; *ppv; ppv = &(*ppv)->next) - if ((*ppv)->vma->vm_start > vml->vma->vm_start) - break; + lastp = rb_first(&nommu_region_tree); + if (!lastp) + return; + + last = rb_entry(lastp, struct vm_region, vm_rb); + if (unlikely(last->vm_end <= last->vm_start)) + BUG(); + + while ((p = rb_next(lastp))) { + region = rb_entry(p, struct vm_region, vm_rb); + last = rb_entry(lastp, struct vm_region, vm_rb); + + if (unlikely(region->vm_end <= region->vm_start)) + BUG(); + if (unlikely(region->vm_start < last->vm_end)) + BUG(); - vml->next = *ppv; - *ppv = vml; + lastp = p; + } } +#else +#define validate_nommu_regions() do {} while(0) +#endif /* - * look up the first VMA in which addr resides, NULL if none - * - should be called with mm->mmap_sem at least held readlocked + * add a region into the global tree */ -struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +static void add_nommu_region(struct vm_region *region) { - struct vm_list_struct *loop, *vml; + struct vm_region *pregion; + struct rb_node **p, *parent; - /* search the vm_start ordered list */ - vml = NULL; - for (loop = mm->context.vmlist; loop; loop = loop->next) { - if (loop->vma->vm_start > addr) - break; - vml = loop; + validate_nommu_regions(); + + BUG_ON(region->vm_start & ~PAGE_MASK); + + parent = NULL; + p = &nommu_region_tree.rb_node; + while (*p) { + parent = *p; + pregion = rb_entry(parent, struct vm_region, vm_rb); + if (region->vm_start < pregion->vm_start) + p = &(*p)->rb_left; + else if (region->vm_start > pregion->vm_start) + p = &(*p)->rb_right; + else if (pregion == region) + return; + else + BUG(); } - if (vml && vml->vma->vm_end > addr) - return vml->vma; + rb_link_node(®ion->vm_rb, parent, p); + rb_insert_color(®ion->vm_rb, &nommu_region_tree); - return NULL; + validate_nommu_regions(); } -EXPORT_SYMBOL(find_vma); /* - * find a VMA - * - we don't extend stack VMAs under NOMMU conditions + * delete a region from the global tree */ -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) +static void delete_nommu_region(struct vm_region *region) { - return find_vma(mm, addr); -} + BUG_ON(!nommu_region_tree.rb_node); -int expand_stack(struct vm_area_struct *vma, unsigned long address) -{ - return -ENOMEM; + validate_nommu_regions(); + rb_erase(®ion->vm_rb, &nommu_region_tree); + validate_nommu_regions(); } /* - * look up the first VMA exactly that exactly matches addr - * - should be called with mm->mmap_sem at least held readlocked + * free a contiguous series of pages */ -static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm, - unsigned long addr) +static void free_page_series(unsigned long from, unsigned long to) { - struct vm_list_struct *vml; - - /* search the vm_start ordered list */ - for (vml = mm->context.vmlist; vml; vml = vml->next) { - if (vml->vma->vm_start == addr) - return vml->vma; - if (vml->vma->vm_start > addr) - break; + for (; from < to; from += PAGE_SIZE) { + struct page *page = virt_to_page(from); + + kdebug("- free %lx", from); + atomic_dec(&mmap_pages_allocated); + if (page_count(page) != 1) + kdebug("free page %p [%d]", page, page_count(page)); + put_page(page); } - - return NULL; } /* - * find a VMA in the global tree + * release a reference to a region + * - the caller must hold the region semaphore, which this releases + * - the region may not have been added to the tree yet, in which case vm_end + * will equal vm_start */ -static inline struct vm_area_struct *find_nommu_vma(unsigned long start) +static void __put_nommu_region(struct vm_region *region) + __releases(nommu_region_sem) { - struct vm_area_struct *vma; - struct rb_node *n = nommu_vma_tree.rb_node; + kenter("%p{%d}", region, atomic_read(®ion->vm_usage)); - while (n) { - vma = rb_entry(n, struct vm_area_struct, vm_rb); + BUG_ON(!nommu_region_tree.rb_node); - if (start < vma->vm_start) - n = n->rb_left; - else if (start > vma->vm_start) - n = n->rb_right; - else - return vma; + if (atomic_dec_and_test(®ion->vm_usage)) { + if (region->vm_end > region->vm_start) + delete_nommu_region(region); + up_write(&nommu_region_sem); + + if (region->vm_file) + fput(region->vm_file); + + /* IO memory and memory shared directly out of the pagecache + * from ramfs/tmpfs mustn't be released here */ + if (region->vm_flags & VM_MAPPED_COPY) { + kdebug("free series"); + free_page_series(region->vm_start, region->vm_end); + } + kmem_cache_free(vm_region_jar, region); + } else { + up_write(&nommu_region_sem); } +} - return NULL; +/* + * release a reference to a region + */ +static void put_nommu_region(struct vm_region *region) +{ + down_write(&nommu_region_sem); + __put_nommu_region(region); } /* - * add a VMA in the global tree + * add a VMA into a process's mm_struct in the appropriate place in the list + * and tree and add to the address space's page tree also if not an anonymous + * page + * - should be called with mm->mmap_sem held writelocked */ -static void add_nommu_vma(struct vm_area_struct *vma) +static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) { - struct vm_area_struct *pvma; + struct vm_area_struct *pvma, **pp; struct address_space *mapping; - struct rb_node **p = &nommu_vma_tree.rb_node; - struct rb_node *parent = NULL; + struct rb_node **p, *parent; + + kenter(",%p", vma); + + BUG_ON(!vma->vm_region); + + mm->map_count++; + vma->vm_mm = mm; /* add the VMA to the mapping */ if (vma->vm_file) { @@ -533,42 +603,62 @@ static void add_nommu_vma(struct vm_area_struct *vma) flush_dcache_mmap_unlock(mapping); } - /* add the VMA to the master list */ + /* add the VMA to the tree */ + parent = NULL; + p = &mm->mm_rb.rb_node; while (*p) { parent = *p; pvma = rb_entry(parent, struct vm_area_struct, vm_rb); - if (vma->vm_start < pvma->vm_start) { + /* sort by: start addr, end addr, VMA struct addr in that order + * (the latter is necessary as we may get identical VMAs) */ + if (vma->vm_start < pvma->vm_start) p = &(*p)->rb_left; - } - else if (vma->vm_start > pvma->vm_start) { + else if (vma->vm_start > pvma->vm_start) p = &(*p)->rb_right; - } - else { - /* mappings are at the same address - this can only - * happen for shared-mem chardevs and shared file - * mappings backed by ramfs/tmpfs */ - BUG_ON(!(pvma->vm_flags & VM_SHARED)); - - if (vma < pvma) - p = &(*p)->rb_left; - else if (vma > pvma) - p = &(*p)->rb_right; - else - BUG(); - } + else if (vma->vm_end < pvma->vm_end) + p = &(*p)->rb_left; + else if (vma->vm_end > pvma->vm_end) + p = &(*p)->rb_right; + else if (vma < pvma) + p = &(*p)->rb_left; + else if (vma > pvma) + p = &(*p)->rb_right; + else + BUG(); } rb_link_node(&vma->vm_rb, parent, p); - rb_insert_color(&vma->vm_rb, &nommu_vma_tree); + rb_insert_color(&vma->vm_rb, &mm->mm_rb); + + /* add VMA to the VMA list also */ + for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) { + if (pvma->vm_start > vma->vm_start) + break; + if (pvma->vm_start < vma->vm_start) + continue; + if (pvma->vm_end < vma->vm_end) + break; + } + + vma->vm_next = *pp; + *pp = vma; } /* - * delete a VMA from the global list + * delete a VMA from its owning mm_struct and address space */ -static void delete_nommu_vma(struct vm_area_struct *vma) +static void delete_vma_from_mm(struct vm_area_struct *vma) { + struct vm_area_struct **pp; struct address_space *mapping; + struct mm_struct *mm = vma->vm_mm; + + kenter("%p", vma); + + mm->map_count--; + if (mm->mmap_cache == vma) + mm->mmap_cache = NULL; /* remove the VMA from the mapping */ if (vma->vm_file) { @@ -579,8 +669,115 @@ static void delete_nommu_vma(struct vm_area_struct *vma) flush_dcache_mmap_unlock(mapping); } - /* remove from the master list */ - rb_erase(&vma->vm_rb, &nommu_vma_tree); + /* remove from the MM's tree and list */ + rb_erase(&vma->vm_rb, &mm->mm_rb); + for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) { + if (*pp == vma) { + *pp = vma->vm_next; + break; + } + } + + vma->vm_mm = NULL; +} + +/* + * destroy a VMA record + */ +static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) +{ + kenter("%p", vma); + if (vma->vm_ops && vma->vm_ops->close) + vma->vm_ops->close(vma); + if (vma->vm_file) { + fput(vma->vm_file); + if (vma->vm_flags & VM_EXECUTABLE) + removed_exe_file_vma(mm); + } + put_nommu_region(vma->vm_region); + kmem_cache_free(vm_area_cachep, vma); +} + +/* + * look up the first VMA in which addr resides, NULL if none + * - should be called with mm->mmap_sem at least held readlocked + */ +struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma; + struct rb_node *n = mm->mm_rb.rb_node; + + /* check the cache first */ + vma = mm->mmap_cache; + if (vma && vma->vm_start <= addr && vma->vm_end > addr) + return vma; + + /* trawl the tree (there may be multiple mappings in which addr + * resides) */ + for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { + vma = rb_entry(n, struct vm_area_struct, vm_rb); + if (vma->vm_start > addr) + return NULL; + if (vma->vm_end > addr) { + mm->mmap_cache = vma; + return vma; + } + } + + return NULL; +} +EXPORT_SYMBOL(find_vma); + +/* + * find a VMA + * - we don't extend stack VMAs under NOMMU conditions + */ +struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) +{ + return find_vma(mm, addr); +} + +/* + * expand a stack to a given address + * - not supported under NOMMU conditions + */ +int expand_stack(struct vm_area_struct *vma, unsigned long address) +{ + return -ENOMEM; +} + +/* + * look up the first VMA exactly that exactly matches addr + * - should be called with mm->mmap_sem at least held readlocked + */ +static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, + unsigned long addr, + unsigned long len) +{ + struct vm_area_struct *vma; + struct rb_node *n = mm->mm_rb.rb_node; + unsigned long end = addr + len; + + /* check the cache first */ + vma = mm->mmap_cache; + if (vma && vma->vm_start == addr && vma->vm_end == end) + return vma; + + /* trawl the tree (there may be multiple mappings in which addr + * resides) */ + for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { + vma = rb_entry(n, struct vm_area_struct, vm_rb); + if (vma->vm_start < addr) + continue; + if (vma->vm_start > addr) + return NULL; + if (vma->vm_end == end) { + mm->mmap_cache = vma; + return vma; + } + } + + return NULL; } /* @@ -595,7 +792,7 @@ static int validate_mmap_request(struct file *file, unsigned long pgoff, unsigned long *_capabilities) { - unsigned long capabilities; + unsigned long capabilities, rlen; unsigned long reqprot = prot; int ret; @@ -615,12 +812,12 @@ static int validate_mmap_request(struct file *file, return -EINVAL; /* Careful about overflows.. */ - len = PAGE_ALIGN(len); - if (!len || len > TASK_SIZE) + rlen = PAGE_ALIGN(len); + if (!rlen || rlen > TASK_SIZE) return -ENOMEM; /* offset overflow? */ - if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) + if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; if (file) { @@ -794,9 +991,10 @@ static unsigned long determine_vm_flags(struct file *file, } /* - * set up a shared mapping on a file + * set up a shared mapping on a file (the driver or filesystem provides and + * pins the storage) */ -static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len) +static int do_mmap_shared_file(struct vm_area_struct *vma) { int ret; @@ -814,10 +1012,14 @@ static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len) /* * set up a private mapping or an anonymous shared mapping */ -static int do_mmap_private(struct vm_area_struct *vma, unsigned long len) +static int do_mmap_private(struct vm_area_struct *vma, + struct vm_region *region, + unsigned long len) { + struct page *pages; + unsigned long total, point, n, rlen; void *base; - int ret; + int ret, order; /* invoke the file's mapping function so that it can keep track of * shared mappings on devices or memory @@ -836,23 +1038,46 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len) * make a private copy of the data and map that instead */ } + rlen = PAGE_ALIGN(len); + /* allocate some memory to hold the mapping * - note that this may not return a page-aligned address if the object * we're allocating is smaller than a page */ - base = kmalloc(len, GFP_KERNEL|__GFP_COMP); - if (!base) + order = get_order(rlen); + kdebug("alloc order %d for %lx", order, len); + + pages = alloc_pages(GFP_KERNEL, order); + if (!pages) goto enomem; - vma->vm_start = (unsigned long) base; - vma->vm_end = vma->vm_start + len; - vma->vm_flags |= VM_MAPPED_COPY; + /* we allocated a power-of-2 sized page set, so we need to trim off the + * excess */ + total = 1 << order; + atomic_add(total, &mmap_pages_allocated); + + point = rlen >> PAGE_SHIFT; + while (total > point) { + order = ilog2(total - point); + n = 1 << order; + kdebug("shave %lu/%lu @%lu", n, total - point, total); + atomic_sub(n, &mmap_pages_allocated); + total -= n; + set_page_refcounted(pages + total); + __free_pages(pages + total, order); + } + + total = rlen >> PAGE_SHIFT; + for (point = 1; point < total; point++) + set_page_refcounted(&pages[point]); -#ifdef WARN_ON_SLACK - if (len + WARN_ON_SLACK <= kobjsize(result)) - printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n", - len, current->pid, kobjsize(result) - len); -#endif + base = page_address(pages); + region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; + region->vm_start = (unsigned long) base; + region->vm_end = region->vm_start + rlen; + + vma->vm_start = region->vm_start; + vma->vm_end = region->vm_start + len; if (vma->vm_file) { /* read the contents of a file into the copy */ @@ -864,26 +1089,27 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len) old_fs = get_fs(); set_fs(KERNEL_DS); - ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos); + ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); set_fs(old_fs); if (ret < 0) goto error_free; /* clear the last little bit */ - if (ret < len) - memset(base + ret, 0, len - ret); + if (ret < rlen) + memset(base + ret, 0, rlen - ret); } else { /* if it's an anonymous mapping, then just clear it */ - memset(base, 0, len); + memset(base, 0, rlen); } return 0; error_free: - kfree(base); - vma->vm_start = 0; + free_page_series(region->vm_start, region->vm_end); + region->vm_start = vma->vm_start = 0; + region->vm_end = vma->vm_end = 0; return ret; enomem: @@ -903,13 +1129,14 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long flags, unsigned long pgoff) { - struct vm_list_struct *vml = NULL; - struct vm_area_struct *vma = NULL; + struct vm_area_struct *vma; + struct vm_region *region; struct rb_node *rb; - unsigned long capabilities, vm_flags; - void *result; + unsigned long capabilities, vm_flags, result; int ret; + kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); + if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); @@ -917,73 +1144,120 @@ unsigned long do_mmap_pgoff(struct file *file, * mapping */ ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, &capabilities); - if (ret < 0) + if (ret < 0) { + kleave(" = %d [val]", ret); return ret; + } /* we've determined that we can make the mapping, now translate what we * now know into VMA flags */ vm_flags = determine_vm_flags(file, prot, flags, capabilities); - /* we're going to need to record the mapping if it works */ - vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL); - if (!vml) - goto error_getting_vml; + /* we're going to need to record the mapping */ + region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); + if (!region) + goto error_getting_region; + + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!vma) + goto error_getting_vma; + + atomic_set(®ion->vm_usage, 1); + region->vm_flags = vm_flags; + region->vm_pgoff = pgoff; - down_write(&nommu_vma_sem); + INIT_LIST_HEAD(&vma->anon_vma_node); + vma->vm_flags = vm_flags; + vma->vm_pgoff = pgoff; - /* if we want to share, we need to check for VMAs created by other + if (file) { + region->vm_file = file; + get_file(file); + vma->vm_file = file; + get_file(file); + if (vm_flags & VM_EXECUTABLE) { + added_exe_file_vma(current->mm); + vma->vm_mm = current->mm; + } + } + + down_write(&nommu_region_sem); + + /* if we want to share, we need to check for regions created by other * mmap() calls that overlap with our proposed mapping - * - we can only share with an exact match on most regular files + * - we can only share with a superset match on most regular files * - shared mappings on character devices and memory backed files are * permitted to overlap inexactly as far as we are concerned for in * these cases, sharing is handled in the driver or filesystem rather * than here */ if (vm_flags & VM_MAYSHARE) { - unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; - unsigned long vmpglen; + struct vm_region *pregion; + unsigned long pglen, rpglen, pgend, rpgend, start; - /* suppress VMA sharing for shared regions */ - if (vm_flags & VM_SHARED && - capabilities & BDI_CAP_MAP_DIRECT) - goto dont_share_VMAs; + pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; + pgend = pgoff + pglen; - for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) { - vma = rb_entry(rb, struct vm_area_struct, vm_rb); + for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { + pregion = rb_entry(rb, struct vm_region, vm_rb); - if (!(vma->vm_flags & VM_MAYSHARE)) + if (!(pregion->vm_flags & VM_MAYSHARE)) continue; /* search for overlapping mappings on the same file */ - if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode) + if (pregion->vm_file->f_path.dentry->d_inode != + file->f_path.dentry->d_inode) continue; - if (vma->vm_pgoff >= pgoff + pglen) + if (pregion->vm_pgoff >= pgend) continue; - vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1; - vmpglen >>= PAGE_SHIFT; - if (pgoff >= vma->vm_pgoff + vmpglen) + rpglen = pregion->vm_end - pregion->vm_start; + rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; + rpgend = pregion->vm_pgoff + rpglen; + if (pgoff >= rpgend) continue; - /* handle inexactly overlapping matches between mappings */ - if (vma->vm_pgoff != pgoff || vmpglen != pglen) { + /* handle inexactly overlapping matches between + * mappings */ + if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && + !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { + /* new mapping is not a subset of the region */ if (!(capabilities & BDI_CAP_MAP_DIRECT)) goto sharing_violation; continue; } - /* we've found a VMA we can share */ - atomic_inc(&vma->vm_usage); - - vml->vma = vma; - result = (void *) vma->vm_start; - goto shared; + /* we've found a region we can share */ + atomic_inc(&pregion->vm_usage); + vma->vm_region = pregion; + start = pregion->vm_start; + start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; + vma->vm_start = start; + vma->vm_end = start + len; + + if (pregion->vm_flags & VM_MAPPED_COPY) { + kdebug("share copy"); + vma->vm_flags |= VM_MAPPED_COPY; + } else { + kdebug("share mmap"); + ret = do_mmap_shared_file(vma); + if (ret < 0) { + vma->vm_region = NULL; + vma->vm_start = 0; + vma->vm_end = 0; + atomic_dec(&pregion->vm_usage); + pregion = NULL; + goto error_just_free; + } + } + fput(region->vm_file); + kmem_cache_free(vm_region_jar, region); + region = pregion; + result = start; + goto share; } - dont_share_VMAs: - vma = NULL; - /* obtain the address at which to make a shared mapping * - this is the hook for quasi-memory character devices to * tell us the location of a shared mapping @@ -994,102 +1268,93 @@ unsigned long do_mmap_pgoff(struct file *file, if (IS_ERR((void *) addr)) { ret = addr; if (ret != (unsigned long) -ENOSYS) - goto error; + goto error_just_free; /* the driver refused to tell us where to site * the mapping so we'll have to attempt to copy * it */ ret = (unsigned long) -ENODEV; if (!(capabilities & BDI_CAP_MAP_COPY)) - goto error; + goto error_just_free; capabilities &= ~BDI_CAP_MAP_DIRECT; + } else { + vma->vm_start = region->vm_start = addr; + vma->vm_end = region->vm_end = addr + len; } } } - /* we're going to need a VMA struct as well */ - vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL); - if (!vma) - goto error_getting_vma; - - INIT_LIST_HEAD(&vma->anon_vma_node); - atomic_set(&vma->vm_usage, 1); - if (file) { - get_file(file); - if (vm_flags & VM_EXECUTABLE) { - added_exe_file_vma(current->mm); - vma->vm_mm = current->mm; - } - } - vma->vm_file = file; - vma->vm_flags = vm_flags; - vma->vm_start = addr; - vma->vm_end = addr + len; - vma->vm_pgoff = pgoff; - - vml->vma = vma; + vma->vm_region = region; /* set up the mapping */ if (file && vma->vm_flags & VM_SHARED) - ret = do_mmap_shared_file(vma, len); + ret = do_mmap_shared_file(vma); else - ret = do_mmap_private(vma, len); + ret = do_mmap_private(vma, region, len); if (ret < 0) - goto error; + goto error_put_region; + + add_nommu_region(region); /* okay... we have a mapping; now we have to register it */ - result = (void *) vma->vm_start; + result = vma->vm_start; current->mm->total_vm += len >> PAGE_SHIFT; - add_nommu_vma(vma); +share: + add_vma_to_mm(current->mm, vma); - shared: - add_vma_to_mm(current->mm, vml); - - up_write(&nommu_vma_sem); + up_write(&nommu_region_sem); if (prot & PROT_EXEC) - flush_icache_range((unsigned long) result, - (unsigned long) result + len); + flush_icache_range(result, result + len); -#ifdef DEBUG - printk("do_mmap:\n"); - show_process_blocks(); -#endif + kleave(" = %lx", result); + return result; - return (unsigned long) result; - - error: - up_write(&nommu_vma_sem); - kfree(vml); +error_put_region: + __put_nommu_region(region); if (vma) { if (vma->vm_file) { fput(vma->vm_file); if (vma->vm_flags & VM_EXECUTABLE) removed_exe_file_vma(vma->vm_mm); } - kfree(vma); + kmem_cache_free(vm_area_cachep, vma); } + kleave(" = %d [pr]", ret); return ret; - sharing_violation: - up_write(&nommu_vma_sem); - printk("Attempt to share mismatched mappings\n"); - kfree(vml); - return -EINVAL; +error_just_free: + up_write(&nommu_region_sem); +error: + fput(region->vm_file); + kmem_cache_free(vm_region_jar, region); + fput(vma->vm_file); + if (vma->vm_flags & VM_EXECUTABLE) + removed_exe_file_vma(vma->vm_mm); + kmem_cache_free(vm_area_cachep, vma); + kleave(" = %d", ret); + return ret; + +sharing_violation: + up_write(&nommu_region_sem); + printk(KERN_WARNING "Attempt to share mismatched mappings\n"); + ret = -EINVAL; + goto error; - error_getting_vma: - up_write(&nommu_vma_sem); - kfree(vml); - printk("Allocation of vma for %lu byte allocation from process %d failed\n", +error_getting_vma: + kmem_cache_free(vm_region_jar, region); + printk(KERN_WARNING "Allocation of vma for %lu byte allocation" + " from process %d failed\n", len, current->pid); show_free_areas(); return -ENOMEM; - error_getting_vml: - printk("Allocation of vml for %lu byte allocation from process %d failed\n", +error_getting_region: + printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" + " from process %d failed\n", len, current->pid); show_free_areas(); return -ENOMEM; @@ -1097,77 +1362,180 @@ unsigned long do_mmap_pgoff(struct file *file, EXPORT_SYMBOL(do_mmap_pgoff); /* - * handle mapping disposal for uClinux + * split a vma into two pieces at address 'addr', a new vma is allocated either + * for the first part or the tail. */ -static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma) +int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, int new_below) { - if (vma) { - down_write(&nommu_vma_sem); + struct vm_area_struct *new; + struct vm_region *region; + unsigned long npages; - if (atomic_dec_and_test(&vma->vm_usage)) { - delete_nommu_vma(vma); + kenter(""); - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); + /* we're only permitted to split anonymous regions that have a single + * owner */ + if (vma->vm_file || + atomic_read(&vma->vm_region->vm_usage) != 1) + return -ENOMEM; - /* IO memory and memory shared directly out of the pagecache from - * ramfs/tmpfs mustn't be released here */ - if (vma->vm_flags & VM_MAPPED_COPY) - kfree((void *) vma->vm_start); + if (mm->map_count >= sysctl_max_map_count) + return -ENOMEM; - if (vma->vm_file) { - fput(vma->vm_file); - if (vma->vm_flags & VM_EXECUTABLE) - removed_exe_file_vma(mm); - } - kfree(vma); - } + region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); + if (!region) + return -ENOMEM; + + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + if (!new) { + kmem_cache_free(vm_region_jar, region); + return -ENOMEM; + } + + /* most fields are the same, copy all, and then fixup */ + *new = *vma; + *region = *vma->vm_region; + new->vm_region = region; + + npages = (addr - vma->vm_start) >> PAGE_SHIFT; + + if (new_below) { + region->vm_end = new->vm_end = addr; + } else { + region->vm_start = new->vm_start = addr; + region->vm_pgoff = new->vm_pgoff += npages; + } - up_write(&nommu_vma_sem); + if (new->vm_ops && new->vm_ops->open) + new->vm_ops->open(new); + + delete_vma_from_mm(vma); + down_write(&nommu_region_sem); + delete_nommu_region(vma->vm_region); + if (new_below) { + vma->vm_region->vm_start = vma->vm_start = addr; + vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; + } else { + vma->vm_region->vm_end = vma->vm_end = addr; } + add_nommu_region(vma->vm_region); + add_nommu_region(new->vm_region); + up_write(&nommu_region_sem); + add_vma_to_mm(mm, vma); + add_vma_to_mm(mm, new); + return 0; } /* - * release a mapping - * - under NOMMU conditions the parameters must match exactly to the mapping to - * be removed + * shrink a VMA by removing the specified chunk from either the beginning or + * the end */ -int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) +static int shrink_vma(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long from, unsigned long to) { - struct vm_list_struct *vml, **parent; - unsigned long end = addr + len; + struct vm_region *region; -#ifdef DEBUG - printk("do_munmap:\n"); -#endif + kenter(""); - for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) { - if ((*parent)->vma->vm_start > addr) - break; - if ((*parent)->vma->vm_start == addr && - ((len == 0) || ((*parent)->vma->vm_end == end))) - goto found; - } + /* adjust the VMA's pointers, which may reposition it in the MM's tree + * and list */ + delete_vma_from_mm(vma); + if (from > vma->vm_start) + vma->vm_end = from; + else + vma->vm_start = to; + add_vma_to_mm(mm, vma); - printk("munmap of non-mmaped memory by process %d (%s): %p\n", - current->pid, current->comm, (void *) addr); - return -EINVAL; + /* cut the backing region down to size */ + region = vma->vm_region; + BUG_ON(atomic_read(®ion->vm_usage) != 1); - found: - vml = *parent; + down_write(&nommu_region_sem); + delete_nommu_region(region); + if (from > region->vm_start) + region->vm_end = from; + else + region->vm_start = to; + add_nommu_region(region); + up_write(&nommu_region_sem); - put_vma(mm, vml->vma); + free_page_series(from, to); + return 0; +} - *parent = vml->next; - kfree(vml); +/* + * release a mapping + * - under NOMMU conditions the chunk to be unmapped must be backed by a single + * VMA, though it need not cover the whole VMA + */ +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) +{ + struct vm_area_struct *vma; + struct rb_node *rb; + unsigned long end = start + len; + int ret; - update_hiwater_vm(mm); - mm->total_vm -= len >> PAGE_SHIFT; + kenter(",%lx,%zx", start, len); -#ifdef DEBUG - show_process_blocks(); -#endif + if (len == 0) + return -EINVAL; + + /* find the first potentially overlapping VMA */ + vma = find_vma(mm, start); + if (!vma) { + printk(KERN_WARNING + "munmap of memory not mmapped by process %d (%s):" + " 0x%lx-0x%lx\n", + current->pid, current->comm, start, start + len - 1); + return -EINVAL; + } + /* we're allowed to split an anonymous VMA but not a file-backed one */ + if (vma->vm_file) { + do { + if (start > vma->vm_start) { + kleave(" = -EINVAL [miss]"); + return -EINVAL; + } + if (end == vma->vm_end) + goto erase_whole_vma; + rb = rb_next(&vma->vm_rb); + vma = rb_entry(rb, struct vm_area_struct, vm_rb); + } while (rb); + kleave(" = -EINVAL [split file]"); + return -EINVAL; + } else { + /* the chunk must be a subset of the VMA found */ + if (start == vma->vm_start && end == vma->vm_end) + goto erase_whole_vma; + if (start < vma->vm_start || end > vma->vm_end) { + kleave(" = -EINVAL [superset]"); + return -EINVAL; + } + if (start & ~PAGE_MASK) { + kleave(" = -EINVAL [unaligned start]"); + return -EINVAL; + } + if (end != vma->vm_end && end & ~PAGE_MASK) { + kleave(" = -EINVAL [unaligned split]"); + return -EINVAL; + } + if (start != vma->vm_start && end != vma->vm_end) { + ret = split_vma(mm, vma, start, 1); + if (ret < 0) { + kleave(" = %d [split]", ret); + return ret; + } + } + return shrink_vma(mm, vma, start, end); + } + +erase_whole_vma: + delete_vma_from_mm(vma); + delete_vma(mm, vma); + kleave(" = 0"); return 0; } EXPORT_SYMBOL(do_munmap); @@ -1184,29 +1552,26 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len) } /* - * Release all mappings + * release all the mappings made in a process's VM space */ -void exit_mmap(struct mm_struct * mm) +void exit_mmap(struct mm_struct *mm) { - struct vm_list_struct *tmp; + struct vm_area_struct *vma; - if (mm) { -#ifdef DEBUG - printk("Exit_mmap:\n"); -#endif + if (!mm) + return; - mm->total_vm = 0; + kenter(""); - while ((tmp = mm->context.vmlist)) { - mm->context.vmlist = tmp->next; - put_vma(mm, tmp->vma); - kfree(tmp); - } + mm->total_vm = 0; -#ifdef DEBUG - show_process_blocks(); -#endif + while ((vma = mm->mmap)) { + mm->mmap = vma->vm_next; + delete_vma_from_mm(vma); + delete_vma(mm, vma); } + + kleave(""); } unsigned long do_brk(unsigned long addr, unsigned long len) @@ -1219,8 +1584,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len) * time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * under NOMMU conditions, we only permit changing a mapping's size, and only - * as long as it stays within the hole allocated by the kmalloc() call in - * do_mmap_pgoff() and the block is not shareable + * as long as it stays within the region allocated by do_mmap_private() and the + * block is not shareable * * MREMAP_FIXED is not supported under NOMMU conditions */ @@ -1231,13 +1596,16 @@ unsigned long do_mremap(unsigned long addr, struct vm_area_struct *vma; /* insanity checks first */ - if (new_len == 0) + if (old_len == 0 || new_len == 0) return (unsigned long) -EINVAL; + if (addr & ~PAGE_MASK) + return -EINVAL; + if (flags & MREMAP_FIXED && new_addr != addr) return (unsigned long) -EINVAL; - vma = find_vma_exact(current->mm, addr); + vma = find_vma_exact(current->mm, addr, old_len); if (!vma) return (unsigned long) -EINVAL; @@ -1247,19 +1615,19 @@ unsigned long do_mremap(unsigned long addr, if (vma->vm_flags & VM_MAYSHARE) return (unsigned long) -EPERM; - if (new_len > kobjsize((void *) addr)) + if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) return (unsigned long) -ENOMEM; /* all checks complete - do it */ vma->vm_end = vma->vm_start + new_len; - return vma->vm_start; } EXPORT_SYMBOL(do_mremap); -asmlinkage unsigned long sys_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr) +asmlinkage +unsigned long sys_mremap(unsigned long addr, + unsigned long old_len, unsigned long new_len, + unsigned long flags, unsigned long new_addr) { unsigned long ret; -- cgit v1.2.2 From dd8632a12e500a684478fea0951f380478d56fed Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 8 Jan 2009 12:04:47 +0000 Subject: NOMMU: Make mmap allocation page trimming behaviour configurable. NOMMU mmap allocates a piece of memory for an mmap that's rounded up in size to the nearest power-of-2 number of pages. Currently it then discards the excess pages back to the page allocator, making that memory available for use by other things. This can, however, cause greater amount of fragmentation. To counter this, a sysctl is added in order to fine-tune the trimming behaviour. The default behaviour remains to trim pages aggressively, while this can either be disabled completely or set to a higher page-granular watermark in order to have finer-grained control. vm region vm_top bits taken from an earlier patch by David Howells. Signed-off-by: Paul Mundt Signed-off-by: David Howells Tested-by: Mike Frysinger --- mm/nommu.c | 65 ++++++++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 42 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 0d363dfcf10e..a6e8ccfbd400 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -10,7 +10,7 @@ * Copyright (c) 2000-2003 David McCullough * Copyright (c) 2000-2001 D Jeff Dionne * Copyright (c) 2002 Greg Ungerer - * Copyright (c) 2007 Paul Mundt + * Copyright (c) 2007-2008 Paul Mundt */ #include @@ -66,6 +66,7 @@ atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio = 50; /* default is 50% */ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; +int sysctl_nr_trim_pages = 1; /* page trimming behaviour */ int heap_stack_gap = 0; atomic_t mmap_pages_allocated; @@ -455,6 +456,8 @@ static noinline void validate_nommu_regions(void) last = rb_entry(lastp, struct vm_region, vm_rb); if (unlikely(last->vm_end <= last->vm_start)) BUG(); + if (unlikely(last->vm_top < last->vm_end)) + BUG(); while ((p = rb_next(lastp))) { region = rb_entry(p, struct vm_region, vm_rb); @@ -462,7 +465,9 @@ static noinline void validate_nommu_regions(void) if (unlikely(region->vm_end <= region->vm_start)) BUG(); - if (unlikely(region->vm_start < last->vm_end)) + if (unlikely(region->vm_top < region->vm_end)) + BUG(); + if (unlikely(region->vm_start < last->vm_top)) BUG(); lastp = p; @@ -536,7 +541,7 @@ static void free_page_series(unsigned long from, unsigned long to) /* * release a reference to a region * - the caller must hold the region semaphore, which this releases - * - the region may not have been added to the tree yet, in which case vm_end + * - the region may not have been added to the tree yet, in which case vm_top * will equal vm_start */ static void __put_nommu_region(struct vm_region *region) @@ -547,7 +552,7 @@ static void __put_nommu_region(struct vm_region *region) BUG_ON(!nommu_region_tree.rb_node); if (atomic_dec_and_test(®ion->vm_usage)) { - if (region->vm_end > region->vm_start) + if (region->vm_top > region->vm_start) delete_nommu_region(region); up_write(&nommu_region_sem); @@ -558,7 +563,7 @@ static void __put_nommu_region(struct vm_region *region) * from ramfs/tmpfs mustn't be released here */ if (region->vm_flags & VM_MAPPED_COPY) { kdebug("free series"); - free_page_series(region->vm_start, region->vm_end); + free_page_series(region->vm_start, region->vm_top); } kmem_cache_free(vm_region_jar, region); } else { @@ -999,6 +1004,10 @@ static int do_mmap_shared_file(struct vm_area_struct *vma) int ret; ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); + if (ret == 0) { + vma->vm_region->vm_top = vma->vm_region->vm_end; + return ret; + } if (ret != -ENOSYS) return ret; @@ -1027,11 +1036,14 @@ static int do_mmap_private(struct vm_area_struct *vma, */ if (vma->vm_file) { ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); - if (ret != -ENOSYS) { + if (ret == 0) { /* shouldn't return success if we're not sharing */ - BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE)); - return ret; /* success or a real error */ + BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); + vma->vm_region->vm_top = vma->vm_region->vm_end; + return ret; } + if (ret != -ENOSYS) + return ret; /* getting an ENOSYS error indicates that direct mmap isn't * possible (as opposed to tried but failed) so we'll try to @@ -1051,23 +1063,25 @@ static int do_mmap_private(struct vm_area_struct *vma, if (!pages) goto enomem; - /* we allocated a power-of-2 sized page set, so we need to trim off the - * excess */ total = 1 << order; atomic_add(total, &mmap_pages_allocated); point = rlen >> PAGE_SHIFT; - while (total > point) { - order = ilog2(total - point); - n = 1 << order; - kdebug("shave %lu/%lu @%lu", n, total - point, total); - atomic_sub(n, &mmap_pages_allocated); - total -= n; - set_page_refcounted(pages + total); - __free_pages(pages + total, order); + + /* we allocated a power-of-2 sized page set, so we may want to trim off + * the excess */ + if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { + while (total > point) { + order = ilog2(total - point); + n = 1 << order; + kdebug("shave %lu/%lu @%lu", n, total - point, total); + atomic_sub(n, &mmap_pages_allocated); + total -= n; + set_page_refcounted(pages + total); + __free_pages(pages + total, order); + } } - total = rlen >> PAGE_SHIFT; for (point = 1; point < total; point++) set_page_refcounted(&pages[point]); @@ -1075,6 +1089,7 @@ static int do_mmap_private(struct vm_area_struct *vma, region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; region->vm_start = (unsigned long) base; region->vm_end = region->vm_start + rlen; + region->vm_top = region->vm_start + (total << PAGE_SHIFT); vma->vm_start = region->vm_start; vma->vm_end = region->vm_start + len; @@ -1110,6 +1125,7 @@ error_free: free_page_series(region->vm_start, region->vm_end); region->vm_start = vma->vm_start = 0; region->vm_end = vma->vm_end = 0; + region->vm_top = 0; return ret; enomem: @@ -1401,7 +1417,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, npages = (addr - vma->vm_start) >> PAGE_SHIFT; if (new_below) { - region->vm_end = new->vm_end = addr; + region->vm_top = region->vm_end = new->vm_end = addr; } else { region->vm_start = new->vm_start = addr; region->vm_pgoff = new->vm_pgoff += npages; @@ -1418,6 +1434,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; } else { vma->vm_region->vm_end = vma->vm_end = addr; + vma->vm_region->vm_top = addr; } add_nommu_region(vma->vm_region); add_nommu_region(new->vm_region); @@ -1454,10 +1471,12 @@ static int shrink_vma(struct mm_struct *mm, down_write(&nommu_region_sem); delete_nommu_region(region); - if (from > region->vm_start) - region->vm_end = from; - else + if (from > region->vm_start) { + to = region->vm_top; + region->vm_top = region->vm_end = from; + } else { region->vm_start = to; + } add_nommu_region(region); up_write(&nommu_region_sem); -- cgit v1.2.2 From ab2e83ead4eca9e045daac4cbf66eb1e7a244bb2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 8 Jan 2009 12:04:48 +0000 Subject: NOMMU: Teach kobjsize() about VMA regions. Now that we no longer use compound pages for all large allocations, kobjsize() actively breaks things like binfmt_flat by always handing back PAGE_SIZE for mmap'ed regions. Fix this up by looking up the VMA region for non-compounds. Ideally binfmt_flat wants to get rid of kobjsize() completely, but this is an incremental step. Signed-off-by: Paul Mundt Signed-off-by: David Howells Tested-by: Mike Frysinger --- mm/nommu.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index a6e8ccfbd400..60ed8375c986 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -148,6 +148,20 @@ unsigned int kobjsize(const void *objp) if (PageSlab(page)) return ksize(objp); + /* + * If it's not a compound page, see if we have a matching VMA + * region. This test is intentionally done in reverse order, + * so if there's no VMA, we still fall through and hand back + * PAGE_SIZE for 0-order pages. + */ + if (!PageCompound(page)) { + struct vm_area_struct *vma; + + vma = find_vma(current->mm, (unsigned long)objp); + if (vma) + return vma->vm_end - vma->vm_start; + } + /* * The ksize() function is only guaranteed to work for pointers * returned by kmalloc(). So handle arbitrary pointers here. -- cgit v1.2.2 From 7a81b88cb53e335ff7d019e6398c95792c817d93 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:07:48 -0800 Subject: memcg: introduce charge-commit-cancel style of functions There is a small race in do_swap_page(). When the page swapped-in is charged, the mapcount can be greater than 0. But, at the same time some process (shares it ) call unmap and make mapcount 1->0 and the page is uncharged. CPUA CPUB mapcount == 1. (1) charge if mapcount==0 zap_pte_range() (2) mapcount 1 => 0. (3) uncharge(). (success) (4) set page's rmap() mapcount 0=>1 Then, this swap page's account is leaked. For fixing this, I added a new interface. - charge account to res_counter by PAGE_SIZE and try to free pages if necessary. - commit register page_cgroup and add to LRU if necessary. - cancel uncharge PAGE_SIZE because of do_swap_page failure. CPUA (1) charge (always) (2) set page's rmap (mapcount > 0) (3) commit charge was necessary or not after set_pte(). This protocol uses PCG_USED bit on page_cgroup for avoiding over accounting. Usual mem_cgroup_charge_common() does charge -> commit at a time. And this patch also adds following function to clarify all charges. - mem_cgroup_newpage_charge() ....replacement for mem_cgroup_charge() called against newly allocated anon pages. - mem_cgroup_charge_migrate_fixup() called only from remove_migration_ptes(). we'll have to rewrite this later.(this patch just keeps old behavior) This function will be removed by additional patch to make migration clearer. Good for clarifying "what we do" Then, we have 4 following charge points. - newpage - swap-in - add-to-cache. - migration. [akpm@linux-foundation.org: add missing inline directives to stubs] Signed-off-by: KAMEZAWA Hiroyuki Reviewed-by: Daisuke Nishimura Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 155 ++++++++++++++++++++++++++++++++++++++++++++------------ mm/memory.c | 12 +++-- mm/migrate.c | 2 +- mm/swapfile.c | 6 ++- 4 files changed, 136 insertions(+), 39 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 51ee96545579..f568b1964551 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -467,35 +467,31 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, return nr_taken; } -/* - * Charge the memory controller for page usage. - * Return - * 0 if the charge was successful - * < 0 if the cgroup is over its limit + +/** + * mem_cgroup_try_charge - get charge of PAGE_SIZE. + * @mm: an mm_struct which is charged against. (when *memcg is NULL) + * @gfp_mask: gfp_mask for reclaim. + * @memcg: a pointer to memory cgroup which is charged against. + * + * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated + * memory cgroup from @mm is got and stored in *memcg. + * + * Returns 0 if success. -ENOMEM at failure. */ -static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask, enum charge_type ctype, - struct mem_cgroup *memcg) + +int mem_cgroup_try_charge(struct mm_struct *mm, + gfp_t gfp_mask, struct mem_cgroup **memcg) { struct mem_cgroup *mem; - struct page_cgroup *pc; - unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; - struct mem_cgroup_per_zone *mz; - unsigned long flags; - - pc = lookup_page_cgroup(page); - /* can happen at boot */ - if (unlikely(!pc)) - return 0; - prefetchw(pc); + int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; /* * We always charge the cgroup the mm_struct belongs to. * The mm_struct's mem_cgroup changes on task migration if the * thread group leader migrates. It's possible that mm is not * set, if so charge the init_mm (happens for pagecache usage). */ - - if (likely(!memcg)) { + if (likely(!*memcg)) { rcu_read_lock(); mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (unlikely(!mem)) { @@ -506,15 +502,17 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, * For every charge from the cgroup, increment reference count */ css_get(&mem->css); + *memcg = mem; rcu_read_unlock(); } else { - mem = memcg; - css_get(&memcg->css); + mem = *memcg; + css_get(&mem->css); } + while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) { if (!(gfp_mask & __GFP_WAIT)) - goto out; + goto nomem; if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) continue; @@ -531,18 +529,37 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, if (!nr_retries--) { mem_cgroup_out_of_memory(mem, gfp_mask); - goto out; + goto nomem; } } + return 0; +nomem: + css_put(&mem->css); + return -ENOMEM; +} + +/* + * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be + * USED state. If already USED, uncharge and return. + */ + +static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, + struct page_cgroup *pc, + enum charge_type ctype) +{ + struct mem_cgroup_per_zone *mz; + unsigned long flags; + /* try_charge() can return NULL to *memcg, taking care of it. */ + if (!mem) + return; lock_page_cgroup(pc); if (unlikely(PageCgroupUsed(pc))) { unlock_page_cgroup(pc); res_counter_uncharge(&mem->res, PAGE_SIZE); css_put(&mem->css); - - goto done; + return; } pc->mem_cgroup = mem; /* @@ -557,15 +574,39 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, __mem_cgroup_add_list(mz, pc); spin_unlock_irqrestore(&mz->lru_lock, flags); unlock_page_cgroup(pc); +} -done: +/* + * Charge the memory controller for page usage. + * Return + * 0 if the charge was successful + * < 0 if the cgroup is over its limit + */ +static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask, enum charge_type ctype, + struct mem_cgroup *memcg) +{ + struct mem_cgroup *mem; + struct page_cgroup *pc; + int ret; + + pc = lookup_page_cgroup(page); + /* can happen at boot */ + if (unlikely(!pc)) + return 0; + prefetchw(pc); + + mem = memcg; + ret = mem_cgroup_try_charge(mm, gfp_mask, &mem); + if (ret) + return ret; + + __mem_cgroup_commit_charge(mem, pc, ctype); return 0; -out: - css_put(&mem->css); - return -ENOMEM; } -int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) +int mem_cgroup_newpage_charge(struct page *page, + struct mm_struct *mm, gfp_t gfp_mask) { if (mem_cgroup_subsys.disabled) return 0; @@ -586,6 +627,34 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); } +/* + * same as mem_cgroup_newpage_charge(), now. + * But what we assume is different from newpage, and this is special case. + * treat this in special function. easy for maintenance. + */ + +int mem_cgroup_charge_migrate_fixup(struct page *page, + struct mm_struct *mm, gfp_t gfp_mask) +{ + if (mem_cgroup_subsys.disabled) + return 0; + + if (PageCompound(page)) + return 0; + + if (page_mapped(page) || (page->mapping && !PageAnon(page))) + return 0; + + if (unlikely(!mm)) + mm = &init_mm; + + return mem_cgroup_charge_common(page, mm, gfp_mask, + MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); +} + + + + int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { @@ -628,6 +697,30 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); } + +void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) +{ + struct page_cgroup *pc; + + if (mem_cgroup_subsys.disabled) + return; + if (!ptr) + return; + pc = lookup_page_cgroup(page); + __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED); +} + +void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) +{ + if (mem_cgroup_subsys.disabled) + return; + if (!mem) + return; + res_counter_uncharge(&mem->res, PAGE_SIZE); + css_put(&mem->css); +} + + /* * uncharge if !page_mapped(page) */ diff --git a/mm/memory.c b/mm/memory.c index 3f8fa06b963b..7f210f160990 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2000,7 +2000,7 @@ gotten: cow_user_page(new_page, old_page, address, vma); __SetPageUptodate(new_page); - if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) + if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) goto oom_free_new; /* @@ -2392,6 +2392,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page; swp_entry_t entry; pte_t pte; + struct mem_cgroup *ptr = NULL; int ret = 0; if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) @@ -2430,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, lock_page(page); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); - if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { + if (mem_cgroup_try_charge(mm, GFP_KERNEL, &ptr) == -ENOMEM) { ret = VM_FAULT_OOM; unlock_page(page); goto out; @@ -2460,6 +2461,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, flush_icache_page(vma, page); set_pte_at(mm, address, page_table, pte); page_add_anon_rmap(page, vma, address); + mem_cgroup_commit_charge_swapin(page, ptr); swap_free(entry); if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) @@ -2480,7 +2482,7 @@ unlock: out: return ret; out_nomap: - mem_cgroup_uncharge_page(page); + mem_cgroup_cancel_charge_swapin(ptr); pte_unmap_unlock(page_table, ptl); unlock_page(page); page_cache_release(page); @@ -2510,7 +2512,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, goto oom; __SetPageUptodate(page); - if (mem_cgroup_charge(page, mm, GFP_KERNEL)) + if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) goto oom_free_page; entry = mk_pte(page, vma->vm_page_prot); @@ -2601,7 +2603,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ret = VM_FAULT_OOM; goto out; } - if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { + if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { ret = VM_FAULT_OOM; page_cache_release(page); goto out; diff --git a/mm/migrate.c b/mm/migrate.c index 55373983c9c6..246dcb973ae7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -133,7 +133,7 @@ static void remove_migration_pte(struct vm_area_struct *vma, * be reliable, and this charge can actually fail: oh well, we don't * make the situation any worse by proceeding as if it had succeeded. */ - mem_cgroup_charge(new, mm, GFP_ATOMIC); + mem_cgroup_charge_migrate_fixup(new, mm, GFP_ATOMIC); get_page(new); pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); diff --git a/mm/swapfile.c b/mm/swapfile.c index eec5ca758a23..fb926efb5167 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -690,17 +690,18 @@ unsigned int count_swap_pages(int type, int free) static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, swp_entry_t entry, struct page *page) { + struct mem_cgroup *ptr = NULL; spinlock_t *ptl; pte_t *pte; int ret = 1; - if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) + if (mem_cgroup_try_charge(vma->vm_mm, GFP_KERNEL, &ptr)) ret = -ENOMEM; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { if (ret > 0) - mem_cgroup_uncharge_page(page); + mem_cgroup_cancel_charge_swapin(ptr); ret = 0; goto out; } @@ -710,6 +711,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, set_pte_at(vma->vm_mm, addr, pte, pte_mkold(mk_pte(page, vma->vm_page_prot))); page_add_anon_rmap(page, vma, addr); + mem_cgroup_commit_charge_swapin(page, ptr); swap_free(entry); /* * Move the page to the active list so it is not -- cgit v1.2.2 From bced0520fe462bb94021dcabd32e99630c171be2 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:07:49 -0800 Subject: memcg: fix gfp_mask of callers of charge Fix misuse of gfp_kernel. Now, most of callers of mem_cgroup_charge_xxx functions uses GFP_KERNEL. I think that this is from the fact that page_cgroup *was* dynamically allocated. But now, we allocate all page_cgroup at boot. And mem_cgroup_try_to_free_pages() reclaim memory from GFP_HIGHUSER_MOVABLE + specified GFP_RECLAIM_MASK. * This is because we just want to reduce memory usage. "Where we should reclaim from ?" is not a problem in memcg. This patch modifies gfp masks to be GFP_HIGUSER_MOVABLE if possible. Note: This patch is not for fixing behavior but for showing sane information in source code. Signed-off-by: KAMEZAWA Hiroyuki Reviewed-by: Daisuke Nishimura Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 8 +++++--- mm/memory.c | 9 +++++---- mm/shmem.c | 6 +++--- mm/swapfile.c | 2 +- 4 files changed, 14 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f568b1964551..c34eb52bdc3f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -808,8 +808,9 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) } unlock_page_cgroup(pc); if (mem) { - ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL, - ctype, mem); + ret = mem_cgroup_charge_common(newpage, NULL, + GFP_HIGHUSER_MOVABLE, + ctype, mem); css_put(&mem->css); } return ret; @@ -889,7 +890,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, ret = -EBUSY; break; } - progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL); + progress = try_to_free_mem_cgroup_pages(memcg, + GFP_HIGHUSER_MOVABLE); if (!progress) retry_count--; } diff --git a/mm/memory.c b/mm/memory.c index 7f210f160990..ba5189e322e6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2000,7 +2000,7 @@ gotten: cow_user_page(new_page, old_page, address, vma); __SetPageUptodate(new_page); - if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) + if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE)) goto oom_free_new; /* @@ -2431,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, lock_page(page); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); - if (mem_cgroup_try_charge(mm, GFP_KERNEL, &ptr) == -ENOMEM) { + if (mem_cgroup_try_charge(mm, GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) { ret = VM_FAULT_OOM; unlock_page(page); goto out; @@ -2512,7 +2512,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, goto oom; __SetPageUptodate(page); - if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) + if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE)) goto oom_free_page; entry = mk_pte(page, vma->vm_page_prot); @@ -2603,7 +2603,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ret = VM_FAULT_OOM; goto out; } - if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { + if (mem_cgroup_newpage_charge(page, + mm, GFP_HIGHUSER_MOVABLE)) { ret = VM_FAULT_OOM; page_cache_release(page); goto out; diff --git a/mm/shmem.c b/mm/shmem.c index 5941f9801363..bd9b4ea307b2 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -928,8 +928,8 @@ found: error = 1; if (!inode) goto out; - /* Precharge page using GFP_KERNEL while we can wait */ - error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); + /* Charge page using GFP_HIGHUSER_MOVABLE while we can wait */ + error = mem_cgroup_cache_charge(page, current->mm, GFP_HIGHUSER_MOVABLE); if (error) goto out; error = radix_tree_preload(GFP_KERNEL); @@ -1379,7 +1379,7 @@ repeat: /* Precharge page while we can wait, compensate after */ error = mem_cgroup_cache_charge(filepage, current->mm, - gfp & ~__GFP_HIGHMEM); + GFP_HIGHUSER_MOVABLE); if (error) { page_cache_release(filepage); shmem_unacct_blocks(info->flags, 1); diff --git a/mm/swapfile.c b/mm/swapfile.c index fb926efb5167..ddc6d92be2cb 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -695,7 +695,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, pte_t *pte; int ret = 1; - if (mem_cgroup_try_charge(vma->vm_mm, GFP_KERNEL, &ptr)) + if (mem_cgroup_try_charge(vma->vm_mm, GFP_HIGHUSER_MOVABLE, &ptr)) ret = -ENOMEM; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); -- cgit v1.2.2 From 01b1ae63c2270cbacfd43fea94578c17950eb548 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:07:50 -0800 Subject: memcg: simple migration handling Now, management of "charge" under page migration is done under following manner. (Assume migrate page contents from oldpage to newpage) before - "newpage" is charged before migration. at success. - "oldpage" is uncharged at somewhere(unmap, radix-tree-replace) at failure - "newpage" is uncharged. - "oldpage" is charged if necessary (*1) But (*1) is not reliable....because of GFP_ATOMIC. This patch tries to change behavior as following by charge/commit/cancel ops. before - charge PAGE_SIZE (no target page) success - commit charge against "newpage". failure - commit charge against "oldpage". (PCG_USED bit works effectively to avoid double-counting) - if "oldpage" is obsolete, cancel charge of PAGE_SIZE. Signed-off-by: KAMEZAWA Hiroyuki Reviewed-by: Daisuke Nishimura Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 108 +++++++++++++++++++++++++++----------------------------- mm/migrate.c | 42 ++++++++-------------- 2 files changed, 66 insertions(+), 84 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c34eb52bdc3f..b71195e8198b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -627,34 +627,6 @@ int mem_cgroup_newpage_charge(struct page *page, MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); } -/* - * same as mem_cgroup_newpage_charge(), now. - * But what we assume is different from newpage, and this is special case. - * treat this in special function. easy for maintenance. - */ - -int mem_cgroup_charge_migrate_fixup(struct page *page, - struct mm_struct *mm, gfp_t gfp_mask) -{ - if (mem_cgroup_subsys.disabled) - return 0; - - if (PageCompound(page)) - return 0; - - if (page_mapped(page) || (page->mapping && !PageAnon(page))) - return 0; - - if (unlikely(!mm)) - mm = &init_mm; - - return mem_cgroup_charge_common(page, mm, gfp_mask, - MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); -} - - - - int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { @@ -697,7 +669,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); } - void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) { struct page_cgroup *pc; @@ -782,13 +753,13 @@ void mem_cgroup_uncharge_cache_page(struct page *page) } /* - * Before starting migration, account against new page. + * Before starting migration, account PAGE_SIZE to mem_cgroup that the old + * page belongs to. */ -int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) +int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) { struct page_cgroup *pc; struct mem_cgroup *mem = NULL; - enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; int ret = 0; if (mem_cgroup_subsys.disabled) @@ -799,42 +770,67 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) if (PageCgroupUsed(pc)) { mem = pc->mem_cgroup; css_get(&mem->css); - if (PageCgroupCache(pc)) { - if (page_is_file_cache(page)) - ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; - else - ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; - } } unlock_page_cgroup(pc); + if (mem) { - ret = mem_cgroup_charge_common(newpage, NULL, - GFP_HIGHUSER_MOVABLE, - ctype, mem); + ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem); css_put(&mem->css); } + *ptr = mem; return ret; } /* remove redundant charge if migration failed*/ -void mem_cgroup_end_migration(struct page *newpage) +void mem_cgroup_end_migration(struct mem_cgroup *mem, + struct page *oldpage, struct page *newpage) { + struct page *target, *unused; + struct page_cgroup *pc; + enum charge_type ctype; + + if (!mem) + return; + + /* at migration success, oldpage->mapping is NULL. */ + if (oldpage->mapping) { + target = oldpage; + unused = NULL; + } else { + target = newpage; + unused = oldpage; + } + + if (PageAnon(target)) + ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; + else if (page_is_file_cache(target)) + ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; + else + ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; + + /* unused page is not on radix-tree now. */ + if (unused && ctype != MEM_CGROUP_CHARGE_TYPE_MAPPED) + __mem_cgroup_uncharge_common(unused, ctype); + + pc = lookup_page_cgroup(target); /* - * At success, page->mapping is not NULL. - * special rollback care is necessary when - * 1. at migration failure. (newpage->mapping is cleared in this case) - * 2. the newpage was moved but not remapped again because the task - * exits and the newpage is obsolete. In this case, the new page - * may be a swapcache. So, we just call mem_cgroup_uncharge_page() - * always for avoiding mess. The page_cgroup will be removed if - * unnecessary. File cache pages is still on radix-tree. Don't - * care it. + * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup. + * So, double-counting is effectively avoided. + */ + __mem_cgroup_commit_charge(mem, pc, ctype); + + /* + * Both of oldpage and newpage are still under lock_page(). + * Then, we don't have to care about race in radix-tree. + * But we have to be careful that this page is unmapped or not. + * + * There is a case for !page_mapped(). At the start of + * migration, oldpage was mapped. But now, it's zapped. + * But we know *target* page is not freed/reused under us. + * mem_cgroup_uncharge_page() does all necessary checks. */ - if (!newpage->mapping) - __mem_cgroup_uncharge_common(newpage, - MEM_CGROUP_CHARGE_TYPE_FORCE); - else if (PageAnon(newpage)) - mem_cgroup_uncharge_page(newpage); + if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) + mem_cgroup_uncharge_page(target); } /* diff --git a/mm/migrate.c b/mm/migrate.c index 246dcb973ae7..a30ea5fcf9f1 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -121,20 +121,6 @@ static void remove_migration_pte(struct vm_area_struct *vma, if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) goto out; - /* - * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge. - * Failure is not an option here: we're now expected to remove every - * migration pte, and will cause crashes otherwise. Normally this - * is not an issue: mem_cgroup_prepare_migration bumped up the old - * page_cgroup count for safety, that's now attached to the new page, - * so this charge should just be another incrementation of the count, - * to keep in balance with rmap.c's mem_cgroup_uncharging. But if - * there's been a force_empty, those reference counts may no longer - * be reliable, and this charge can actually fail: oh well, we don't - * make the situation any worse by proceeding as if it had succeeded. - */ - mem_cgroup_charge_migrate_fixup(new, mm, GFP_ATOMIC); - get_page(new); pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); if (is_write_migration_entry(entry)) @@ -378,9 +364,6 @@ static void migrate_page_copy(struct page *newpage, struct page *page) anon = PageAnon(page); page->mapping = NULL; - if (!anon) /* This page was removed from radix-tree. */ - mem_cgroup_uncharge_cache_page(page); - /* * If any waiters have accumulated on the new page then * wake them up. @@ -614,6 +597,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, struct page *newpage = get_new_page(page, private, &result); int rcu_locked = 0; int charge = 0; + struct mem_cgroup *mem; if (!newpage) return -ENOMEM; @@ -623,24 +607,26 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, goto move_newpage; } - charge = mem_cgroup_prepare_migration(page, newpage); - if (charge == -ENOMEM) { - rc = -ENOMEM; - goto move_newpage; - } /* prepare cgroup just returns 0 or -ENOMEM */ - BUG_ON(charge); - rc = -EAGAIN; + if (!trylock_page(page)) { if (!force) goto move_newpage; lock_page(page); } + /* charge against new page */ + charge = mem_cgroup_prepare_migration(page, &mem); + if (charge == -ENOMEM) { + rc = -ENOMEM; + goto unlock; + } + BUG_ON(charge); + if (PageWriteback(page)) { if (!force) - goto unlock; + goto uncharge; wait_on_page_writeback(page); } /* @@ -693,7 +679,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, rcu_unlock: if (rcu_locked) rcu_read_unlock(); - +uncharge: + if (!charge) + mem_cgroup_end_migration(mem, page, newpage); unlock: unlock_page(page); @@ -709,8 +697,6 @@ unlock: } move_newpage: - if (!charge) - mem_cgroup_end_migration(newpage); /* * Move the new page to the LRU. If migration was not successful -- cgit v1.2.2 From 0753b0ef3b301895234fed02bea2c099c7ff4feb Mon Sep 17 00:00:00 2001 From: Fernando Luis Vazquez Cao Date: Wed, 7 Jan 2009 18:07:51 -0800 Subject: memcg: do not recalculate section unnecessarily in init_section_page_cgroup In init_section_page_cgroup() the section a given pfn belongs to is calculated at the top of the function and, despite the fact that the pfn/section correspondence does not change, it is recalculated further down the same function. By computing this just once and reusing that value we save some bytes in the object file and do not waste CPU cycles. Signed-off-by: Fernando Luis Vazquez Cao Reviewed-by: KAMEZAWA Hiroyuki Cc: Daisuke Nishimura Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_cgroup.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index d6507a660ed6..df1e54a5ed19 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -103,13 +103,11 @@ struct page_cgroup *lookup_page_cgroup(struct page *page) /* __alloc_bootmem...() is protected by !slab_available() */ static int __init_refok init_section_page_cgroup(unsigned long pfn) { - struct mem_section *section; + struct mem_section *section = __pfn_to_section(pfn); struct page_cgroup *base, *pc; unsigned long table_size; int nid, index; - section = __pfn_to_section(pfn); - if (!section->page_cgroup) { nid = page_to_nid(pfn_to_page(pfn)); table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; @@ -145,7 +143,6 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn) __init_page_cgroup(pc, pfn + index); } - section = __pfn_to_section(pfn); section->page_cgroup = base - pfn; total_usage += table_size; return 0; -- cgit v1.2.2 From f817ed48535ac6510ebae7c4116f24a5f9268834 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:07:53 -0800 Subject: memcg: move all acccounting to parent at rmdir() This patch provides a function to move account information of a page between mem_cgroups and rewrite force_empty to make use of this. This moving of page_cgroup is done under - lru_lock of source/destination mem_cgroup is held. - lock_page_cgroup() is held. Then, a routine which touches pc->mem_cgroup without lock_page_cgroup() should confirm pc->mem_cgroup is still valid or not. Typical code can be following. (while page is not under lock_page()) mem = pc->mem_cgroup; mz = page_cgroup_zoneinfo(pc) spin_lock_irqsave(&mz->lru_lock); if (pc->mem_cgroup == mem) ...../* some list handling */ spin_unlock_irqrestore(&mz->lru_lock); Of course, better way is lock_page_cgroup(pc); .... unlock_page_cgroup(pc); But you should confirm the nest of lock and avoid deadlock. If you treats page_cgroup from mem_cgroup's LRU under mz->lru_lock, you don't have to worry about what pc->mem_cgroup points to. moved pages are added to head of lru, not to tail. Expected users of this routine is: - force_empty (rmdir) - moving tasks between cgroup (for moving account information.) - hierarchy (maybe useful.) force_empty(rmdir) uses this move_account and move pages to its parent. This "move" will not cause OOM (I added "oom" parameter to try_charge().) If the parent is busy (not enough memory), force_empty calls try_to_free_page() and reduce usage. Purpose of this behavior is - Fix "forget all" behavior of force_empty and avoid leak of accounting. - By "moving first, free if necessary", keep pages on memory as much as possible. Adding a switch to change behavior of force_empty to - free first, move if necessary - free all, if there is mlocked/busy pages, return -EBUSY. is under consideration. (I'll add if someone requtests.) This patch also removes memory.force_empty file, a brutal debug-only interface. Reviewed-by: Daisuke Nishimura Tested-by: Daisuke Nishimura Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 277 ++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 210 insertions(+), 67 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b71195e8198b..49234d93988a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -257,7 +257,7 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, } static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, - struct page_cgroup *pc) + struct page_cgroup *pc, bool hot) { int lru = LRU_BASE; @@ -271,7 +271,10 @@ static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, } MEM_CGROUP_ZSTAT(mz, lru) += 1; - list_add(&pc->lru, &mz->lists[lru]); + if (hot) + list_add(&pc->lru, &mz->lists[lru]); + else + list_add_tail(&pc->lru, &mz->lists[lru]); mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true); } @@ -467,21 +470,12 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, return nr_taken; } - -/** - * mem_cgroup_try_charge - get charge of PAGE_SIZE. - * @mm: an mm_struct which is charged against. (when *memcg is NULL) - * @gfp_mask: gfp_mask for reclaim. - * @memcg: a pointer to memory cgroup which is charged against. - * - * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated - * memory cgroup from @mm is got and stored in *memcg. - * - * Returns 0 if success. -ENOMEM at failure. +/* + * Unlike exported interface, "oom" parameter is added. if oom==true, + * oom-killer can be invoked. */ - -int mem_cgroup_try_charge(struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcg) +static int __mem_cgroup_try_charge(struct mm_struct *mm, + gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom) { struct mem_cgroup *mem; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; @@ -528,7 +522,8 @@ int mem_cgroup_try_charge(struct mm_struct *mm, continue; if (!nr_retries--) { - mem_cgroup_out_of_memory(mem, gfp_mask); + if (oom) + mem_cgroup_out_of_memory(mem, gfp_mask); goto nomem; } } @@ -538,6 +533,25 @@ nomem: return -ENOMEM; } +/** + * mem_cgroup_try_charge - get charge of PAGE_SIZE. + * @mm: an mm_struct which is charged against. (when *memcg is NULL) + * @gfp_mask: gfp_mask for reclaim. + * @memcg: a pointer to memory cgroup which is charged against. + * + * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated + * memory cgroup from @mm is got and stored in *memcg. + * + * Returns 0 if success. -ENOMEM at failure. + * This call can invoke OOM-Killer. + */ + +int mem_cgroup_try_charge(struct mm_struct *mm, + gfp_t mask, struct mem_cgroup **memcg) +{ + return __mem_cgroup_try_charge(mm, mask, memcg, true); +} + /* * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be * USED state. If already USED, uncharge and return. @@ -571,11 +585,109 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, mz = page_cgroup_zoneinfo(pc); spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_add_list(mz, pc); + __mem_cgroup_add_list(mz, pc, true); spin_unlock_irqrestore(&mz->lru_lock, flags); unlock_page_cgroup(pc); } +/** + * mem_cgroup_move_account - move account of the page + * @pc: page_cgroup of the page. + * @from: mem_cgroup which the page is moved from. + * @to: mem_cgroup which the page is moved to. @from != @to. + * + * The caller must confirm following. + * 1. disable irq. + * 2. lru_lock of old mem_cgroup(@from) should be held. + * + * returns 0 at success, + * returns -EBUSY when lock is busy or "pc" is unstable. + * + * This function does "uncharge" from old cgroup but doesn't do "charge" to + * new cgroup. It should be done by a caller. + */ + +static int mem_cgroup_move_account(struct page_cgroup *pc, + struct mem_cgroup *from, struct mem_cgroup *to) +{ + struct mem_cgroup_per_zone *from_mz, *to_mz; + int nid, zid; + int ret = -EBUSY; + + VM_BUG_ON(!irqs_disabled()); + VM_BUG_ON(from == to); + + nid = page_cgroup_nid(pc); + zid = page_cgroup_zid(pc); + from_mz = mem_cgroup_zoneinfo(from, nid, zid); + to_mz = mem_cgroup_zoneinfo(to, nid, zid); + + + if (!trylock_page_cgroup(pc)) + return ret; + + if (!PageCgroupUsed(pc)) + goto out; + + if (pc->mem_cgroup != from) + goto out; + + if (spin_trylock(&to_mz->lru_lock)) { + __mem_cgroup_remove_list(from_mz, pc); + css_put(&from->css); + res_counter_uncharge(&from->res, PAGE_SIZE); + pc->mem_cgroup = to; + css_get(&to->css); + __mem_cgroup_add_list(to_mz, pc, false); + ret = 0; + spin_unlock(&to_mz->lru_lock); + } +out: + unlock_page_cgroup(pc); + return ret; +} + +/* + * move charges to its parent. + */ + +static int mem_cgroup_move_parent(struct page_cgroup *pc, + struct mem_cgroup *child, + gfp_t gfp_mask) +{ + struct cgroup *cg = child->css.cgroup; + struct cgroup *pcg = cg->parent; + struct mem_cgroup *parent; + struct mem_cgroup_per_zone *mz; + unsigned long flags; + int ret; + + /* Is ROOT ? */ + if (!pcg) + return -EINVAL; + + parent = mem_cgroup_from_cont(pcg); + + ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false); + if (ret) + return ret; + + mz = mem_cgroup_zoneinfo(child, + page_cgroup_nid(pc), page_cgroup_zid(pc)); + + spin_lock_irqsave(&mz->lru_lock, flags); + ret = mem_cgroup_move_account(pc, child, parent); + spin_unlock_irqrestore(&mz->lru_lock, flags); + + /* drop extra refcnt */ + css_put(&parent->css); + /* uncharge if move fails */ + if (ret) + res_counter_uncharge(&parent->res, PAGE_SIZE); + + return ret; +} + /* * Charge the memory controller for page usage. * Return @@ -597,7 +709,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, prefetchw(pc); mem = memcg; - ret = mem_cgroup_try_charge(mm, gfp_mask, &mem); + ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true); if (ret) return ret; @@ -899,46 +1011,52 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, * This routine traverse page_cgroup in given list and drop them all. * *And* this routine doesn't reclaim page itself, just removes page_cgroup. */ -#define FORCE_UNCHARGE_BATCH (128) -static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, +static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, struct mem_cgroup_per_zone *mz, enum lru_list lru) { - struct page_cgroup *pc; - struct page *page; - int count = FORCE_UNCHARGE_BATCH; + struct page_cgroup *pc, *busy; unsigned long flags; + unsigned long loop; struct list_head *list; + int ret = 0; list = &mz->lists[lru]; - spin_lock_irqsave(&mz->lru_lock, flags); - while (!list_empty(list)) { - pc = list_entry(list->prev, struct page_cgroup, lru); - page = pc->page; - if (!PageCgroupUsed(pc)) + loop = MEM_CGROUP_ZSTAT(mz, lru); + /* give some margin against EBUSY etc...*/ + loop += 256; + busy = NULL; + while (loop--) { + ret = 0; + spin_lock_irqsave(&mz->lru_lock, flags); + if (list_empty(list)) { + spin_unlock_irqrestore(&mz->lru_lock, flags); break; - get_page(page); + } + pc = list_entry(list->prev, struct page_cgroup, lru); + if (busy == pc) { + list_move(&pc->lru, list); + busy = 0; + spin_unlock_irqrestore(&mz->lru_lock, flags); + continue; + } spin_unlock_irqrestore(&mz->lru_lock, flags); - /* - * Check if this page is on LRU. !LRU page can be found - * if it's under page migration. - */ - if (PageLRU(page)) { - __mem_cgroup_uncharge_common(page, - MEM_CGROUP_CHARGE_TYPE_FORCE); - put_page(page); - if (--count <= 0) { - count = FORCE_UNCHARGE_BATCH; - cond_resched(); - } - } else { - spin_lock_irqsave(&mz->lru_lock, flags); + + ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE); + if (ret == -ENOMEM) break; - } - spin_lock_irqsave(&mz->lru_lock, flags); + + if (ret == -EBUSY || ret == -EINVAL) { + /* found lock contention or "pc" is obsolete. */ + busy = pc; + cond_resched(); + } else + busy = NULL; } - spin_unlock_irqrestore(&mz->lru_lock, flags); + if (!ret && !list_empty(list)) + return -EBUSY; + return ret; } /* @@ -947,34 +1065,68 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, */ static int mem_cgroup_force_empty(struct mem_cgroup *mem) { - int ret = -EBUSY; - int node, zid; + int ret; + int node, zid, shrink; + int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; css_get(&mem->css); - /* - * page reclaim code (kswapd etc..) will move pages between - * active_list <-> inactive_list while we don't take a lock. - * So, we have to do loop here until all lists are empty. - */ + + shrink = 0; +move_account: while (mem->res.usage > 0) { + ret = -EBUSY; if (atomic_read(&mem->css.cgroup->count) > 0) goto out; + /* This is for making all *used* pages to be on LRU. */ lru_add_drain_all(); - for_each_node_state(node, N_POSSIBLE) - for (zid = 0; zid < MAX_NR_ZONES; zid++) { + ret = 0; + for_each_node_state(node, N_POSSIBLE) { + for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { struct mem_cgroup_per_zone *mz; enum lru_list l; mz = mem_cgroup_zoneinfo(mem, node, zid); - for_each_lru(l) - mem_cgroup_force_empty_list(mem, mz, l); + for_each_lru(l) { + ret = mem_cgroup_force_empty_list(mem, + mz, l); + if (ret) + break; + } } + if (ret) + break; + } + /* it seems parent cgroup doesn't have enough mem */ + if (ret == -ENOMEM) + goto try_to_free; cond_resched(); } ret = 0; out: css_put(&mem->css); return ret; + +try_to_free: + /* returns EBUSY if we come here twice. */ + if (shrink) { + ret = -EBUSY; + goto out; + } + /* try to free all pages in this cgroup */ + shrink = 1; + while (nr_retries && mem->res.usage > 0) { + int progress; + progress = try_to_free_mem_cgroup_pages(mem, + GFP_HIGHUSER_MOVABLE); + if (!progress) + nr_retries--; + + } + /* try move_account...there may be some *locked* pages. */ + if (mem->res.usage) + goto move_account; + ret = 0; + goto out; } static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) @@ -1023,11 +1175,6 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) return 0; } -static int mem_force_empty_write(struct cgroup *cont, unsigned int event) -{ - return mem_cgroup_force_empty(mem_cgroup_from_cont(cont)); -} - static const struct mem_cgroup_stat_desc { const char *msg; u64 unit; @@ -1103,10 +1250,6 @@ static struct cftype mem_cgroup_files[] = { .trigger = mem_cgroup_reset, .read_u64 = mem_cgroup_read, }, - { - .name = "force_empty", - .trigger = mem_force_empty_write, - }, { .name = "stat", .read_map = mem_control_stat_show, -- cgit v1.2.2 From c8dad2bb6307f5b00f804a686917105206a4d5c9 Mon Sep 17 00:00:00 2001 From: Jan Blunck Date: Wed, 7 Jan 2009 18:07:53 -0800 Subject: memcg: reduce size of mem_cgroup by using nr_cpu_ids As Jan Blunck pointed out, allocating per-cpu stat for memcg to the size of NR_CPUS is not good. This patch changes mem_cgroup's cpustat allocation not based on NR_CPUS but based on nr_cpu_ids. Reviewed-by: Li Zefan Signed-off-by: KAMEZAWA Hiroyuki Cc: Li Zefan Cc: Balbir Singh Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 49234d93988a..e00f25e6545f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -60,7 +60,7 @@ struct mem_cgroup_stat_cpu { } ____cacheline_aligned_in_smp; struct mem_cgroup_stat { - struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; + struct mem_cgroup_stat_cpu cpustat[0]; }; /* @@ -129,11 +129,10 @@ struct mem_cgroup { int prev_priority; /* for recording reclaim priority */ /* - * statistics. + * statistics. This must be placed at the end of memcg. */ struct mem_cgroup_stat stat; }; -static struct mem_cgroup init_mem_cgroup; enum charge_type { MEM_CGROUP_CHARGE_TYPE_CACHE = 0, @@ -1293,23 +1292,30 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) kfree(mem->info.nodeinfo[node]); } +static int mem_cgroup_size(void) +{ + int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu); + return sizeof(struct mem_cgroup) + cpustat_size; +} + static struct mem_cgroup *mem_cgroup_alloc(void) { struct mem_cgroup *mem; + int size = mem_cgroup_size(); - if (sizeof(*mem) < PAGE_SIZE) - mem = kmalloc(sizeof(*mem), GFP_KERNEL); + if (size < PAGE_SIZE) + mem = kmalloc(size, GFP_KERNEL); else - mem = vmalloc(sizeof(*mem)); + mem = vmalloc(size); if (mem) - memset(mem, 0, sizeof(*mem)); + memset(mem, 0, size); return mem; } static void mem_cgroup_free(struct mem_cgroup *mem) { - if (sizeof(*mem) < PAGE_SIZE) + if (mem_cgroup_size() < PAGE_SIZE) kfree(mem); else vfree(mem); @@ -1322,13 +1328,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) struct mem_cgroup *mem; int node; - if (unlikely((cont->parent) == NULL)) { - mem = &init_mem_cgroup; - } else { - mem = mem_cgroup_alloc(); - if (!mem) - return ERR_PTR(-ENOMEM); - } + mem = mem_cgroup_alloc(); + if (!mem) + return ERR_PTR(-ENOMEM); res_counter_init(&mem->res); @@ -1340,8 +1342,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) free_out: for_each_node_state(node, N_POSSIBLE) free_mem_cgroup_per_zone_info(mem, node); - if (cont->parent != NULL) - mem_cgroup_free(mem); + mem_cgroup_free(mem); return ERR_PTR(-ENOMEM); } -- cgit v1.2.2 From c1e862c1f5ad34771b6d0a528cf681e0dcad7c86 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:07:55 -0800 Subject: memcg: new force_empty to free pages under group By memcg-move-all-accounts-to-parent-at-rmdir.patch, there is no leak of memory usage and force_empty is removed. This patch adds "force_empty" again, in reasonable manner. memory.force_empty file works when #echo 0 (or some) > memory.force_empty and have following function. 1. only works when there are no task in this cgroup. 2. free all page under this cgroup as much as possible. 3. page which cannot be freed will be moved up to parent. 4. Then, memcg will be empty after above echo returns. This is much better behavior than old "force_empty" which just forget all accounts. This patch also check signal_pending() and above "echo" can be stopped by "Ctrl-C". [akpm@linux-foundation.org: cleanup] Signed-off-by: KAMEZAWA Hiroyuki Cc: Li Zefan Cc: Balbir Singh Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 41 ++++++++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e00f25e6545f..decace3bb57e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1062,21 +1062,27 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, * make mem_cgroup's charge to be 0 if there is no task. * This enables deleting this mem_cgroup. */ -static int mem_cgroup_force_empty(struct mem_cgroup *mem) +static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) { int ret; int node, zid, shrink; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; + struct cgroup *cgrp = mem->css.cgroup; css_get(&mem->css); shrink = 0; + /* should free all ? */ + if (free_all) + goto try_to_free; move_account: while (mem->res.usage > 0) { ret = -EBUSY; - if (atomic_read(&mem->css.cgroup->count) > 0) + if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) + goto out; + ret = -EINTR; + if (signal_pending(current)) goto out; - /* This is for making all *used* pages to be on LRU. */ lru_add_drain_all(); ret = 0; @@ -1106,19 +1112,29 @@ out: return ret; try_to_free: - /* returns EBUSY if we come here twice. */ - if (shrink) { + /* returns EBUSY if there is a task or if we come here twice. */ + if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) { ret = -EBUSY; goto out; } + /* we call try-to-free pages for make this cgroup empty */ + lru_add_drain_all(); /* try to free all pages in this cgroup */ shrink = 1; while (nr_retries && mem->res.usage > 0) { int progress; + + if (signal_pending(current)) { + ret = -EINTR; + goto out; + } progress = try_to_free_mem_cgroup_pages(mem, GFP_HIGHUSER_MOVABLE); - if (!progress) + if (!progress) { nr_retries--; + /* maybe some writeback is necessary */ + congestion_wait(WRITE, HZ/10); + } } /* try move_account...there may be some *locked* pages. */ @@ -1128,6 +1144,12 @@ try_to_free: goto out; } +int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) +{ + return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true); +} + + static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) { return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, @@ -1225,6 +1247,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, return 0; } + static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", @@ -1253,6 +1276,10 @@ static struct cftype mem_cgroup_files[] = { .name = "stat", .read_map = mem_control_stat_show, }, + { + .name = "force_empty", + .trigger = mem_cgroup_force_empty_write, + }, }; static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) @@ -1350,7 +1377,7 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { struct mem_cgroup *mem = mem_cgroup_from_cont(cont); - mem_cgroup_force_empty(mem); + mem_cgroup_force_empty(mem, false); } static void mem_cgroup_destroy(struct cgroup_subsys *ss, -- cgit v1.2.2 From d13d144309d2e5a3e6ad978b16c1d0226ddc9231 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:07:56 -0800 Subject: memcg: handle swap caches SwapCache support for memory resource controller (memcg) Before mem+swap controller, memcg itself should handle SwapCache in proper way. This is cut-out from it. In current memcg, SwapCache is just leaked and the user can create tons of SwapCache. This is a leak of account and should be handled. SwapCache accounting is done as following. charge (anon) - charged when it's mapped. (because of readahead, charge at add_to_swap_cache() is not sane) uncharge (anon) - uncharged when it's dropped from swapcache and fully unmapped. means it's not uncharged at unmap. Note: delete from swap cache at swap-in is done after rmap information is established. charge (shmem) - charged at swap-in. this prevents charge at add_to_page_cache(). uncharge (shmem) - uncharged when it's dropped from swapcache and not on shmem's radix-tree. at migration, check against 'old page' is modified to handle shmem. Comparing to the old version discussed (and caused troubles), we have advantages of - PCG_USED bit. - simple migrating handling. So, situation is much easier than several months ago, maybe. [hugh@veritas.com: memcg: handle swap caches build fix] Reviewed-by: Daisuke Nishimura Tested-by: Daisuke Nishimura Signed-off-by: KAMEZAWA Hiroyuki Cc: Hugh Dickins Cc: Li Zefan Cc: Balbir Singh Cc: Pavel Emelyanov Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++------ mm/shmem.c | 18 ++++++++++++++-- mm/swap_state.c | 1 + 3 files changed, 78 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index decace3bb57e..7288e9d85ca7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -139,6 +140,7 @@ enum charge_type { MEM_CGROUP_CHARGE_TYPE_MAPPED, MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ + MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ NR_CHARGE_TYPE, }; @@ -780,6 +782,33 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); } +#ifdef CONFIG_SWAP +int mem_cgroup_cache_charge_swapin(struct page *page, + struct mm_struct *mm, gfp_t mask, bool locked) +{ + int ret = 0; + + if (mem_cgroup_subsys.disabled) + return 0; + if (unlikely(!mm)) + mm = &init_mm; + if (!locked) + lock_page(page); + /* + * If not locked, the page can be dropped from SwapCache until + * we reach here. + */ + if (PageSwapCache(page)) { + ret = mem_cgroup_charge_common(page, mm, mask, + MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); + } + if (!locked) + unlock_page(page); + + return ret; +} +#endif + void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) { struct page_cgroup *pc; @@ -817,6 +846,9 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) if (mem_cgroup_subsys.disabled) return; + if (PageSwapCache(page)) + return; + /* * Check if our page_cgroup is valid */ @@ -825,12 +857,26 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) return; lock_page_cgroup(pc); - if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page)) - || !PageCgroupUsed(pc)) { - /* This happens at race in zap_pte_range() and do_swap_page()*/ - unlock_page_cgroup(pc); - return; + + if (!PageCgroupUsed(pc)) + goto unlock_out; + + switch (ctype) { + case MEM_CGROUP_CHARGE_TYPE_MAPPED: + if (page_mapped(page)) + goto unlock_out; + break; + case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: + if (!PageAnon(page)) { /* Shared memory */ + if (page->mapping && !page_is_file_cache(page)) + goto unlock_out; + } else if (page_mapped(page)) /* Anon */ + goto unlock_out; + break; + default: + break; } + ClearPageCgroupUsed(pc); mem = pc->mem_cgroup; @@ -844,6 +890,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) css_put(&mem->css); return; + +unlock_out: + unlock_page_cgroup(pc); + return; } void mem_cgroup_uncharge_page(struct page *page) @@ -863,6 +913,11 @@ void mem_cgroup_uncharge_cache_page(struct page *page) __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); } +void mem_cgroup_uncharge_swapcache(struct page *page) +{ + __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_SWAPOUT); +} + /* * Before starting migration, account PAGE_SIZE to mem_cgroup that the old * page belongs to. @@ -920,7 +975,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; /* unused page is not on radix-tree now. */ - if (unused && ctype != MEM_CGROUP_CHARGE_TYPE_MAPPED) + if (unused) __mem_cgroup_uncharge_common(unused, ctype); pc = lookup_page_cgroup(target); diff --git a/mm/shmem.c b/mm/shmem.c index bd9b4ea307b2..adf5c3eedbc9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -928,8 +928,12 @@ found: error = 1; if (!inode) goto out; - /* Charge page using GFP_HIGHUSER_MOVABLE while we can wait */ - error = mem_cgroup_cache_charge(page, current->mm, GFP_HIGHUSER_MOVABLE); + /* + * Charge page using GFP_HIGHUSER_MOVABLE while we can wait. + * charged back to the user(not to caller) when swap account is used. + */ + error = mem_cgroup_cache_charge_swapin(page, + current->mm, GFP_HIGHUSER_MOVABLE, true); if (error) goto out; error = radix_tree_preload(GFP_KERNEL); @@ -1266,6 +1270,16 @@ repeat: goto repeat; } wait_on_page_locked(swappage); + /* + * We want to avoid charge at add_to_page_cache(). + * charge against this swap cache here. + */ + if (mem_cgroup_cache_charge_swapin(swappage, + current->mm, gfp, false)) { + page_cache_release(swappage); + error = -ENOMEM; + goto failed; + } page_cache_release(swappage); goto repeat; } diff --git a/mm/swap_state.c b/mm/swap_state.c index 81c825f67a7f..09291ca11f5f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -118,6 +118,7 @@ void __delete_from_swap_cache(struct page *page) total_swapcache_pages--; __dec_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(del_total); + mem_cgroup_uncharge_swapcache(page); } /** -- cgit v1.2.2 From c077719be8e9e6b55702117513d1b5f41d80404a Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:07:57 -0800 Subject: memcg: mem+swap controller Kconfig Config and control variable for mem+swap controller. This patch adds CONFIG_CGROUP_MEM_RES_CTLR_SWAP (memory resource controller swap extension.) For accounting swap, it's obvious that we have to use additional memory to remember "who uses swap". This adds more overhead. So, it's better to offer "choice" to users. This patch adds 2 choices. This patch adds 2 parameters to enable swap extension or not. - CONFIG - boot option Reviewed-by: Daisuke Nishimura Signed-off-by: KAMEZAWA Hiroyuki Cc: Li Zefan Cc: Balbir Singh Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7288e9d85ca7..59dd8c116372 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -41,6 +41,15 @@ struct cgroup_subsys mem_cgroup_subsys __read_mostly; #define MEM_CGROUP_RECLAIM_RETRIES 5 +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */ +int do_swap_account __read_mostly; +static int really_do_swap_account __initdata = 1; /* for remember boot option*/ +#else +#define do_swap_account (0) +#endif + + /* * Statistics for memory cgroup. */ @@ -1404,6 +1413,18 @@ static void mem_cgroup_free(struct mem_cgroup *mem) } +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +static void __init enable_swap_cgroup(void) +{ + if (!mem_cgroup_subsys.disabled && really_do_swap_account) + do_swap_account = 1; +} +#else +static void __init enable_swap_cgroup(void) +{ +} +#endif + static struct cgroup_subsys_state * mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) { @@ -1419,6 +1440,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) for_each_node_state(node, N_POSSIBLE) if (alloc_mem_cgroup_per_zone_info(mem, node)) goto free_out; + /* root ? */ + if (cont->parent == NULL) + enable_swap_cgroup(); return &mem->css; free_out: @@ -1490,3 +1514,13 @@ struct cgroup_subsys mem_cgroup_subsys = { .attach = mem_cgroup_move_task, .early_init = 0, }; + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP + +static int __init disable_swap_account(char *s) +{ + really_do_swap_account = 0; + return 1; +} +__setup("noswapaccount", disable_swap_account); +#endif -- cgit v1.2.2 From 27a7faa0779dd13729196c1a818c294f44bbd1ee Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:07:58 -0800 Subject: memcg: swap cgroup for remembering usage For accounting swap, we need a record per swap entry, at least. This patch adds following function. - swap_cgroup_swapon() .... called from swapon - swap_cgroup_swapoff() ... called at the end of swapoff. - swap_cgroup_record() .... record information of swap entry. - swap_cgroup_lookup() .... lookup information of swap entry. This patch just implements "how to record information". No actual method for limit the usage of swap. These routine uses flat table to record and lookup. "wise" lookup system like radix-tree requires requires memory allocation at new records but swap-out is usually called under memory shortage (or memcg hits limit.) So, I used static allocation. (maybe dynamic allocation is not very hard but it adds additional memory allocation in memory shortage path.) Note1: In this, we use pointer to record information and this means 8bytes per swap entry. I think we can reduce this when we create "id of cgroup" in the range of 0-65535 or 0-255. Reported-by: Daisuke Nishimura Reviewed-by: Daisuke Nishimura Tested-by: Daisuke Nishimura Reported-by: Hugh Dickins Reported-by: Balbir Singh Reported-by: Andrew Morton Signed-off-by: KAMEZAWA Hiroyuki Cc: Pavel Emelianov Cc: Li Zefan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_cgroup.c | 197 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ mm/swapfile.c | 10 +++ 2 files changed, 207 insertions(+) (limited to 'mm') diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index df1e54a5ed19..685e7c8e1fd6 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -8,6 +8,7 @@ #include #include #include +#include static void __meminit __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) @@ -270,3 +271,199 @@ void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) } #endif + + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP + +static DEFINE_MUTEX(swap_cgroup_mutex); +struct swap_cgroup_ctrl { + struct page **map; + unsigned long length; +}; + +struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; + +/* + * This 8bytes seems big..maybe we can reduce this when we can use "id" for + * cgroup rather than pointer. + */ +struct swap_cgroup { + struct mem_cgroup *val; +}; +#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) +#define SC_POS_MASK (SC_PER_PAGE - 1) + +/* + * SwapCgroup implements "lookup" and "exchange" operations. + * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge + * against SwapCache. At swap_free(), this is accessed directly from swap. + * + * This means, + * - we have no race in "exchange" when we're accessed via SwapCache because + * SwapCache(and its swp_entry) is under lock. + * - When called via swap_free(), there is no user of this entry and no race. + * Then, we don't need lock around "exchange". + * + * TODO: we can push these buffers out to HIGHMEM. + */ + +/* + * allocate buffer for swap_cgroup. + */ +static int swap_cgroup_prepare(int type) +{ + struct page *page; + struct swap_cgroup_ctrl *ctrl; + unsigned long idx, max; + + if (!do_swap_account) + return 0; + ctrl = &swap_cgroup_ctrl[type]; + + for (idx = 0; idx < ctrl->length; idx++) { + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto not_enough_page; + ctrl->map[idx] = page; + } + return 0; +not_enough_page: + max = idx; + for (idx = 0; idx < max; idx++) + __free_page(ctrl->map[idx]); + + return -ENOMEM; +} + +/** + * swap_cgroup_record - record mem_cgroup for this swp_entry. + * @ent: swap entry to be recorded into + * @mem: mem_cgroup to be recorded + * + * Returns old value at success, NULL at failure. + * (Of course, old value can be NULL.) + */ +struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) +{ + int type = swp_type(ent); + unsigned long offset = swp_offset(ent); + unsigned long idx = offset / SC_PER_PAGE; + unsigned long pos = offset & SC_POS_MASK; + struct swap_cgroup_ctrl *ctrl; + struct page *mappage; + struct swap_cgroup *sc; + struct mem_cgroup *old; + + if (!do_swap_account) + return NULL; + + ctrl = &swap_cgroup_ctrl[type]; + + mappage = ctrl->map[idx]; + sc = page_address(mappage); + sc += pos; + old = sc->val; + sc->val = mem; + + return old; +} + +/** + * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry + * @ent: swap entry to be looked up. + * + * Returns pointer to mem_cgroup at success. NULL at failure. + */ +struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) +{ + int type = swp_type(ent); + unsigned long offset = swp_offset(ent); + unsigned long idx = offset / SC_PER_PAGE; + unsigned long pos = offset & SC_POS_MASK; + struct swap_cgroup_ctrl *ctrl; + struct page *mappage; + struct swap_cgroup *sc; + struct mem_cgroup *ret; + + if (!do_swap_account) + return NULL; + + ctrl = &swap_cgroup_ctrl[type]; + mappage = ctrl->map[idx]; + sc = page_address(mappage); + sc += pos; + ret = sc->val; + return ret; +} + +int swap_cgroup_swapon(int type, unsigned long max_pages) +{ + void *array; + unsigned long array_size; + unsigned long length; + struct swap_cgroup_ctrl *ctrl; + + if (!do_swap_account) + return 0; + + length = ((max_pages/SC_PER_PAGE) + 1); + array_size = length * sizeof(void *); + + array = vmalloc(array_size); + if (!array) + goto nomem; + + memset(array, 0, array_size); + ctrl = &swap_cgroup_ctrl[type]; + mutex_lock(&swap_cgroup_mutex); + ctrl->length = length; + ctrl->map = array; + if (swap_cgroup_prepare(type)) { + /* memory shortage */ + ctrl->map = NULL; + ctrl->length = 0; + vfree(array); + mutex_unlock(&swap_cgroup_mutex); + goto nomem; + } + mutex_unlock(&swap_cgroup_mutex); + + printk(KERN_INFO + "swap_cgroup: uses %ld bytes of vmalloc for pointer array space" + " and %ld bytes to hold mem_cgroup pointers on swap\n", + array_size, length * PAGE_SIZE); + printk(KERN_INFO + "swap_cgroup can be disabled by noswapaccount boot option.\n"); + + return 0; +nomem: + printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); + printk(KERN_INFO + "swap_cgroup can be disabled by noswapaccount boot option\n"); + return -ENOMEM; +} + +void swap_cgroup_swapoff(int type) +{ + int i; + struct swap_cgroup_ctrl *ctrl; + + if (!do_swap_account) + return; + + mutex_lock(&swap_cgroup_mutex); + ctrl = &swap_cgroup_ctrl[type]; + if (ctrl->map) { + for (i = 0; i < ctrl->length; i++) { + struct page *page = ctrl->map[i]; + if (page) + __free_page(page); + } + vfree(ctrl->map); + ctrl->map = NULL; + ctrl->length = 0; + } + mutex_unlock(&swap_cgroup_mutex); +} + +#endif diff --git a/mm/swapfile.c b/mm/swapfile.c index ddc6d92be2cb..1e7a715a3866 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -33,6 +33,7 @@ #include #include #include +#include static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; @@ -1494,6 +1495,9 @@ asmlinkage long sys_swapoff(const char __user * specialfile) spin_unlock(&swap_lock); mutex_unlock(&swapon_mutex); vfree(swap_map); + /* Destroy swap account informatin */ + swap_cgroup_swapoff(type); + inode = mapping->host; if (S_ISBLK(inode->i_mode)) { struct block_device *bdev = I_BDEV(inode); @@ -1811,6 +1815,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) } swap_map[page_nr] = SWAP_MAP_BAD; } + + error = swap_cgroup_swapon(type, maxpages); + if (error) + goto bad_swap; + nr_good_pages = swap_header->info.last_page - swap_header->info.nr_badpages - 1 /* header page */; @@ -1882,6 +1891,7 @@ bad_swap: bd_release(bdev); } destroy_swap_extents(p); + swap_cgroup_swapoff(type); bad_swap_2: spin_lock(&swap_lock); p->swap_file = NULL; -- cgit v1.2.2 From 8c7c6e34a1256a5082d38c8e9bd1474476912715 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:00 -0800 Subject: memcg: mem+swap controller core This patch implements per cgroup limit for usage of memory+swap. However there are SwapCache, double counting of swap-cache and swap-entry is avoided. Mem+Swap controller works as following. - memory usage is limited by memory.limit_in_bytes. - memory + swap usage is limited by memory.memsw_limit_in_bytes. This has following benefits. - A user can limit total resource usage of mem+swap. Without this, because memory resource controller doesn't take care of usage of swap, a process can exhaust all the swap (by memory leak.) We can avoid this case. And Swap is shared resource but it cannot be reclaimed (goes back to memory) until it's used. This characteristic can be trouble when the memory is divided into some parts by cpuset or memcg. Assume group A and group B. After some application executes, the system can be.. Group A -- very large free memory space but occupy 99% of swap. Group B -- under memory shortage but cannot use swap...it's nearly full. Ability to set appropriate swap limit for each group is required. Maybe someone wonder "why not swap but mem+swap ?" - The global LRU(kswapd) can swap out arbitrary pages. Swap-out means to move account from memory to swap...there is no change in usage of mem+swap. In other words, when we want to limit the usage of swap without affecting global LRU, mem+swap limit is better than just limiting swap. Accounting target information is stored in swap_cgroup which is per swap entry record. Charge is done as following. map - charge page and memsw. unmap - uncharge page/memsw if not SwapCache. swap-out (__delete_from_swap_cache) - uncharge page - record mem_cgroup information to swap_cgroup. swap-in (do_swap_page) - charged as page and memsw. record in swap_cgroup is cleared. memsw accounting is decremented. swap-free (swap_free()) - if swap entry is freed, memsw is uncharged by PAGE_SIZE. There are people work under never-swap environments and consider swap as something bad. For such people, this mem+swap controller extension is just an overhead. This overhead is avoided by config or boot option. (see Kconfig. detail is not in this patch.) TODO: - maybe more optimization can be don in swap-in path. (but not very safe.) But we just do simple accounting at this stage. [nishimura@mxp.nes.nec.co.jp: make resize limit hold mutex] [hugh@veritas.com: memswap controller core swapcache fixes] Signed-off-by: KAMEZAWA Hiroyuki Cc: Li Zefan Cc: Balbir Singh Cc: Pavel Emelyanov Signed-off-by: Daisuke Nishimura Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 400 ++++++++++++++++++++++++++++++++++++++++++++++++++------ mm/memory.c | 18 ++- mm/swap_state.c | 5 +- mm/swapfile.c | 11 +- mm/vmscan.c | 6 +- 5 files changed, 393 insertions(+), 47 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 59dd8c116372..2efcf38f3b73 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -131,6 +132,10 @@ struct mem_cgroup { * the counter to account for memory usage */ struct res_counter res; + /* + * the counter to account for mem+swap usage. + */ + struct res_counter memsw; /* * Per cgroup active and inactive list, similar to the * per zone LRU lists. @@ -138,6 +143,8 @@ struct mem_cgroup { struct mem_cgroup_lru_info info; int prev_priority; /* for recording reclaim priority */ + int obsolete; + atomic_t refcnt; /* * statistics. This must be placed at the end of memcg. */ @@ -167,6 +174,17 @@ pcg_default_flags[NR_CHARGE_TYPE] = { 0, /* FORCE */ }; + +/* for encoding cft->private value on file */ +#define _MEM (0) +#define _MEMSWAP (1) +#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) +#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) +#define MEMFILE_ATTR(val) ((val) & 0xffff) + +static void mem_cgroup_get(struct mem_cgroup *mem); +static void mem_cgroup_put(struct mem_cgroup *mem); + /* * Always modified under lru lock. Then, not necessary to preempt_disable() */ @@ -485,7 +503,8 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, * oom-killer can be invoked. */ static int __mem_cgroup_try_charge(struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom) + gfp_t gfp_mask, struct mem_cgroup **memcg, + bool oom) { struct mem_cgroup *mem; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; @@ -513,12 +532,25 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, css_get(&mem->css); } + while (1) { + int ret; + bool noswap = false; - while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) { + ret = res_counter_charge(&mem->res, PAGE_SIZE); + if (likely(!ret)) { + if (!do_swap_account) + break; + ret = res_counter_charge(&mem->memsw, PAGE_SIZE); + if (likely(!ret)) + break; + /* mem+swap counter fails */ + res_counter_uncharge(&mem->res, PAGE_SIZE); + noswap = true; + } if (!(gfp_mask & __GFP_WAIT)) goto nomem; - if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) + if (try_to_free_mem_cgroup_pages(mem, gfp_mask, noswap)) continue; /* @@ -527,8 +559,13 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, * moved to swap cache or just unmapped from the cgroup. * Check the limit again to see if the reclaim reduced the * current usage of the cgroup before giving up + * */ - if (res_counter_check_under_limit(&mem->res)) + if (!do_swap_account && + res_counter_check_under_limit(&mem->res)) + continue; + if (do_swap_account && + res_counter_check_under_limit(&mem->memsw)) continue; if (!nr_retries--) { @@ -582,6 +619,8 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, if (unlikely(PageCgroupUsed(pc))) { unlock_page_cgroup(pc); res_counter_uncharge(&mem->res, PAGE_SIZE); + if (do_swap_account) + res_counter_uncharge(&mem->memsw, PAGE_SIZE); css_put(&mem->css); return; } @@ -646,6 +685,8 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, __mem_cgroup_remove_list(from_mz, pc); css_put(&from->css); res_counter_uncharge(&from->res, PAGE_SIZE); + if (do_swap_account) + res_counter_uncharge(&from->memsw, PAGE_SIZE); pc->mem_cgroup = to; css_get(&to->css); __mem_cgroup_add_list(to_mz, pc, false); @@ -692,8 +733,11 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, /* drop extra refcnt */ css_put(&parent->css); /* uncharge if move fails */ - if (ret) + if (ret) { res_counter_uncharge(&parent->res, PAGE_SIZE); + if (do_swap_account) + res_counter_uncharge(&parent->memsw, PAGE_SIZE); + } return ret; } @@ -791,7 +835,42 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); } +int mem_cgroup_try_charge_swapin(struct mm_struct *mm, + struct page *page, + gfp_t mask, struct mem_cgroup **ptr) +{ + struct mem_cgroup *mem; + swp_entry_t ent; + + if (mem_cgroup_subsys.disabled) + return 0; + + if (!do_swap_account) + goto charge_cur_mm; + + /* + * A racing thread's fault, or swapoff, may have already updated + * the pte, and even removed page from swap cache: return success + * to go on to do_swap_page()'s pte_same() test, which should fail. + */ + if (!PageSwapCache(page)) + return 0; + + ent.val = page_private(page); + + mem = lookup_swap_cgroup(ent); + if (!mem || mem->obsolete) + goto charge_cur_mm; + *ptr = mem; + return __mem_cgroup_try_charge(NULL, mask, ptr, true); +charge_cur_mm: + if (unlikely(!mm)) + mm = &init_mm; + return __mem_cgroup_try_charge(mm, mask, ptr, true); +} + #ifdef CONFIG_SWAP + int mem_cgroup_cache_charge_swapin(struct page *page, struct mm_struct *mm, gfp_t mask, bool locked) { @@ -808,8 +887,28 @@ int mem_cgroup_cache_charge_swapin(struct page *page, * we reach here. */ if (PageSwapCache(page)) { + struct mem_cgroup *mem = NULL; + swp_entry_t ent; + + ent.val = page_private(page); + if (do_swap_account) { + mem = lookup_swap_cgroup(ent); + if (mem && mem->obsolete) + mem = NULL; + if (mem) + mm = NULL; + } ret = mem_cgroup_charge_common(page, mm, mask, - MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); + MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); + + if (!ret && do_swap_account) { + /* avoid double counting */ + mem = swap_cgroup_record(ent, NULL); + if (mem) { + res_counter_uncharge(&mem->memsw, PAGE_SIZE); + mem_cgroup_put(mem); + } + } } if (!locked) unlock_page(page); @@ -828,6 +927,23 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) return; pc = lookup_page_cgroup(page); __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED); + /* + * Now swap is on-memory. This means this page may be + * counted both as mem and swap....double count. + * Fix it by uncharging from memsw. This SwapCache is stable + * because we're still under lock_page(). + */ + if (do_swap_account) { + swp_entry_t ent = {.val = page_private(page)}; + struct mem_cgroup *memcg; + memcg = swap_cgroup_record(ent, NULL); + if (memcg) { + /* If memcg is obsolete, memcg can be != ptr */ + res_counter_uncharge(&memcg->memsw, PAGE_SIZE); + mem_cgroup_put(memcg); + } + + } } void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) @@ -837,6 +953,8 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) if (!mem) return; res_counter_uncharge(&mem->res, PAGE_SIZE); + if (do_swap_account) + res_counter_uncharge(&mem->memsw, PAGE_SIZE); css_put(&mem->css); } @@ -844,29 +962,31 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) /* * uncharge if !page_mapped(page) */ -static void +static struct mem_cgroup * __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) { struct page_cgroup *pc; - struct mem_cgroup *mem; + struct mem_cgroup *mem = NULL; struct mem_cgroup_per_zone *mz; unsigned long flags; if (mem_cgroup_subsys.disabled) - return; + return NULL; if (PageSwapCache(page)) - return; + return NULL; /* * Check if our page_cgroup is valid */ pc = lookup_page_cgroup(page); if (unlikely(!pc || !PageCgroupUsed(pc))) - return; + return NULL; lock_page_cgroup(pc); + mem = pc->mem_cgroup; + if (!PageCgroupUsed(pc)) goto unlock_out; @@ -886,8 +1006,11 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) break; } + res_counter_uncharge(&mem->res, PAGE_SIZE); + if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)) + res_counter_uncharge(&mem->memsw, PAGE_SIZE); + ClearPageCgroupUsed(pc); - mem = pc->mem_cgroup; mz = page_cgroup_zoneinfo(pc); spin_lock_irqsave(&mz->lru_lock, flags); @@ -895,14 +1018,13 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) spin_unlock_irqrestore(&mz->lru_lock, flags); unlock_page_cgroup(pc); - res_counter_uncharge(&mem->res, PAGE_SIZE); css_put(&mem->css); - return; + return mem; unlock_out: unlock_page_cgroup(pc); - return; + return NULL; } void mem_cgroup_uncharge_page(struct page *page) @@ -922,10 +1044,42 @@ void mem_cgroup_uncharge_cache_page(struct page *page) __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); } -void mem_cgroup_uncharge_swapcache(struct page *page) +/* + * called from __delete_from_swap_cache() and drop "page" account. + * memcg information is recorded to swap_cgroup of "ent" + */ +void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) +{ + struct mem_cgroup *memcg; + + memcg = __mem_cgroup_uncharge_common(page, + MEM_CGROUP_CHARGE_TYPE_SWAPOUT); + /* record memcg information */ + if (do_swap_account && memcg) { + swap_cgroup_record(ent, memcg); + mem_cgroup_get(memcg); + } +} + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +/* + * called from swap_entry_free(). remove record in swap_cgroup and + * uncharge "memsw" account. + */ +void mem_cgroup_uncharge_swap(swp_entry_t ent) { - __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_SWAPOUT); + struct mem_cgroup *memcg; + + if (!do_swap_account) + return; + + memcg = swap_cgroup_record(ent, NULL); + if (memcg) { + res_counter_uncharge(&memcg->memsw, PAGE_SIZE); + mem_cgroup_put(memcg); + } } +#endif /* * Before starting migration, account PAGE_SIZE to mem_cgroup that the old @@ -1034,7 +1188,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) rcu_read_unlock(); do { - progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); + progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true); progress += res_counter_check_under_limit(&mem->res); } while (!progress && --retry); @@ -1044,26 +1198,84 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) return 0; } +static DEFINE_MUTEX(set_limit_mutex); + static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, - unsigned long long val) + unsigned long long val) { int retry_count = MEM_CGROUP_RECLAIM_RETRIES; int progress; + u64 memswlimit; int ret = 0; - while (res_counter_set_limit(&memcg->res, val)) { + while (retry_count) { if (signal_pending(current)) { ret = -EINTR; break; } - if (!retry_count) { - ret = -EBUSY; + /* + * Rather than hide all in some function, I do this in + * open coded manner. You see what this really does. + * We have to guarantee mem->res.limit < mem->memsw.limit. + */ + mutex_lock(&set_limit_mutex); + memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); + if (memswlimit < val) { + ret = -EINVAL; + mutex_unlock(&set_limit_mutex); break; } + ret = res_counter_set_limit(&memcg->res, val); + mutex_unlock(&set_limit_mutex); + + if (!ret) + break; + progress = try_to_free_mem_cgroup_pages(memcg, - GFP_HIGHUSER_MOVABLE); - if (!progress) + GFP_HIGHUSER_MOVABLE, false); + if (!progress) retry_count--; + } + return ret; +} + +int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, + unsigned long long val) +{ + int retry_count = MEM_CGROUP_RECLAIM_RETRIES; + u64 memlimit, oldusage, curusage; + int ret; + + if (!do_swap_account) + return -EINVAL; + + while (retry_count) { + if (signal_pending(current)) { + ret = -EINTR; + break; + } + /* + * Rather than hide all in some function, I do this in + * open coded manner. You see what this really does. + * We have to guarantee mem->res.limit < mem->memsw.limit. + */ + mutex_lock(&set_limit_mutex); + memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); + if (memlimit > val) { + ret = -EINVAL; + mutex_unlock(&set_limit_mutex); + break; + } + ret = res_counter_set_limit(&memcg->memsw, val); + mutex_unlock(&set_limit_mutex); + + if (!ret) + break; + + oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); + try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true); + curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); + if (curusage >= oldusage) retry_count--; } return ret; @@ -1193,7 +1405,7 @@ try_to_free: goto out; } progress = try_to_free_mem_cgroup_pages(mem, - GFP_HIGHUSER_MOVABLE); + GFP_HIGHUSER_MOVABLE, false); if (!progress) { nr_retries--; /* maybe some writeback is necessary */ @@ -1216,8 +1428,25 @@ int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) { - return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, - cft->private); + struct mem_cgroup *mem = mem_cgroup_from_cont(cont); + u64 val = 0; + int type, name; + + type = MEMFILE_TYPE(cft->private); + name = MEMFILE_ATTR(cft->private); + switch (type) { + case _MEM: + val = res_counter_read_u64(&mem->res, name); + break; + case _MEMSWAP: + if (do_swap_account) + val = res_counter_read_u64(&mem->memsw, name); + break; + default: + BUG(); + break; + } + return val; } /* * The user of this function is... @@ -1227,15 +1456,22 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, const char *buffer) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); + int type, name; unsigned long long val; int ret; - switch (cft->private) { + type = MEMFILE_TYPE(cft->private); + name = MEMFILE_ATTR(cft->private); + switch (name) { case RES_LIMIT: /* This function does all necessary parse...reuse it */ ret = res_counter_memparse_write_strategy(buffer, &val); - if (!ret) + if (ret) + break; + if (type == _MEM) ret = mem_cgroup_resize_limit(memcg, val); + else + ret = mem_cgroup_resize_memsw_limit(memcg, val); break; default: ret = -EINVAL; /* should be BUG() ? */ @@ -1247,14 +1483,23 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) { struct mem_cgroup *mem; + int type, name; mem = mem_cgroup_from_cont(cont); - switch (event) { + type = MEMFILE_TYPE(event); + name = MEMFILE_ATTR(event); + switch (name) { case RES_MAX_USAGE: - res_counter_reset_max(&mem->res); + if (type == _MEM) + res_counter_reset_max(&mem->res); + else + res_counter_reset_max(&mem->memsw); break; case RES_FAILCNT: - res_counter_reset_failcnt(&mem->res); + if (type == _MEM) + res_counter_reset_failcnt(&mem->res); + else + res_counter_reset_failcnt(&mem->memsw); break; } return 0; @@ -1315,24 +1560,24 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", - .private = RES_USAGE, + .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), .read_u64 = mem_cgroup_read, }, { .name = "max_usage_in_bytes", - .private = RES_MAX_USAGE, + .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), .trigger = mem_cgroup_reset, .read_u64 = mem_cgroup_read, }, { .name = "limit_in_bytes", - .private = RES_LIMIT, + .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), .write_string = mem_cgroup_write, .read_u64 = mem_cgroup_read, }, { .name = "failcnt", - .private = RES_FAILCNT, + .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), .trigger = mem_cgroup_reset, .read_u64 = mem_cgroup_read, }, @@ -1346,6 +1591,47 @@ static struct cftype mem_cgroup_files[] = { }, }; +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +static struct cftype memsw_cgroup_files[] = { + { + .name = "memsw.usage_in_bytes", + .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), + .read_u64 = mem_cgroup_read, + }, + { + .name = "memsw.max_usage_in_bytes", + .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), + .trigger = mem_cgroup_reset, + .read_u64 = mem_cgroup_read, + }, + { + .name = "memsw.limit_in_bytes", + .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), + .write_string = mem_cgroup_write, + .read_u64 = mem_cgroup_read, + }, + { + .name = "memsw.failcnt", + .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), + .trigger = mem_cgroup_reset, + .read_u64 = mem_cgroup_read, + }, +}; + +static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) +{ + if (!do_swap_account) + return 0; + return cgroup_add_files(cont, ss, memsw_cgroup_files, + ARRAY_SIZE(memsw_cgroup_files)); +}; +#else +static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) +{ + return 0; +} +#endif + static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) { struct mem_cgroup_per_node *pn; @@ -1404,14 +1690,44 @@ static struct mem_cgroup *mem_cgroup_alloc(void) return mem; } +/* + * At destroying mem_cgroup, references from swap_cgroup can remain. + * (scanning all at force_empty is too costly...) + * + * Instead of clearing all references at force_empty, we remember + * the number of reference from swap_cgroup and free mem_cgroup when + * it goes down to 0. + * + * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and + * entry which points to this memcg will be ignore at swapin. + * + * Removal of cgroup itself succeeds regardless of refs from swap. + */ + static void mem_cgroup_free(struct mem_cgroup *mem) { + if (atomic_read(&mem->refcnt) > 0) + return; if (mem_cgroup_size() < PAGE_SIZE) kfree(mem); else vfree(mem); } +static void mem_cgroup_get(struct mem_cgroup *mem) +{ + atomic_inc(&mem->refcnt); +} + +static void mem_cgroup_put(struct mem_cgroup *mem) +{ + if (atomic_dec_and_test(&mem->refcnt)) { + if (!mem->obsolete) + return; + mem_cgroup_free(mem); + } +} + #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP static void __init enable_swap_cgroup(void) @@ -1436,6 +1752,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) return ERR_PTR(-ENOMEM); res_counter_init(&mem->res); + res_counter_init(&mem->memsw); for_each_node_state(node, N_POSSIBLE) if (alloc_mem_cgroup_per_zone_info(mem, node)) @@ -1456,6 +1773,7 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { struct mem_cgroup *mem = mem_cgroup_from_cont(cont); + mem->obsolete = 1; mem_cgroup_force_empty(mem, false); } @@ -1474,8 +1792,14 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss, static int mem_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) { - return cgroup_add_files(cont, ss, mem_cgroup_files, - ARRAY_SIZE(mem_cgroup_files)); + int ret; + + ret = cgroup_add_files(cont, ss, mem_cgroup_files, + ARRAY_SIZE(mem_cgroup_files)); + + if (!ret) + ret = register_memsw_files(cont, ss); + return ret; } static void mem_cgroup_move_task(struct cgroup_subsys *ss, diff --git a/mm/memory.c b/mm/memory.c index ba5189e322e6..1358012ffa73 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2431,7 +2431,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, lock_page(page); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); - if (mem_cgroup_try_charge(mm, GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) { + if (mem_cgroup_try_charge_swapin(mm, page, + GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) { ret = VM_FAULT_OOM; unlock_page(page); goto out; @@ -2449,8 +2450,20 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, goto out_nomap; } - /* The page isn't present yet, go ahead with the fault. */ + /* + * The page isn't present yet, go ahead with the fault. + * + * Be careful about the sequence of operations here. + * To get its accounting right, reuse_swap_page() must be called + * while the page is counted on swap but not yet in mapcount i.e. + * before page_add_anon_rmap() and swap_free(); try_to_free_swap() + * must be called after the swap_free(), or it will never succeed. + * And mem_cgroup_commit_charge_swapin(), which uses the swp_entry + * in page->private, must be called before reuse_swap_page(), + * which may delete_from_swap_cache(). + */ + mem_cgroup_commit_charge_swapin(page, ptr); inc_mm_counter(mm, anon_rss); pte = mk_pte(page, vma->vm_page_prot); if (write_access && reuse_swap_page(page)) { @@ -2461,7 +2474,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, flush_icache_page(vma, page); set_pte_at(mm, address, page_table, pte); page_add_anon_rmap(page, vma, address); - mem_cgroup_commit_charge_swapin(page, ptr); swap_free(entry); if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) diff --git a/mm/swap_state.c b/mm/swap_state.c index 09291ca11f5f..3ecea98ecb45 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -108,6 +109,8 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) */ void __delete_from_swap_cache(struct page *page) { + swp_entry_t ent = {.val = page_private(page)}; + VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageSwapCache(page)); VM_BUG_ON(PageWriteback(page)); @@ -118,7 +121,7 @@ void __delete_from_swap_cache(struct page *page) total_swapcache_pages--; __dec_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(del_total); - mem_cgroup_uncharge_swapcache(page); + mem_cgroup_uncharge_swapcache(page, ent); } /** diff --git a/mm/swapfile.c b/mm/swapfile.c index 1e7a715a3866..0579d9069b61 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -471,8 +471,9 @@ out: return NULL; } -static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) +static int swap_entry_free(struct swap_info_struct *p, swp_entry_t ent) { + unsigned long offset = swp_offset(ent); int count = p->swap_map[offset]; if (count < SWAP_MAP_MAX) { @@ -487,6 +488,7 @@ static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) swap_list.next = p - swap_info; nr_swap_pages++; p->inuse_pages--; + mem_cgroup_uncharge_swap(ent); } } return count; @@ -502,7 +504,7 @@ void swap_free(swp_entry_t entry) p = swap_info_get(entry); if (p) { - swap_entry_free(p, swp_offset(entry)); + swap_entry_free(p, entry); spin_unlock(&swap_lock); } } @@ -582,7 +584,7 @@ int free_swap_and_cache(swp_entry_t entry) p = swap_info_get(entry); if (p) { - if (swap_entry_free(p, swp_offset(entry)) == 1) { + if (swap_entry_free(p, entry) == 1) { page = find_get_page(&swapper_space, entry.val); if (page && !trylock_page(page)) { page_cache_release(page); @@ -696,7 +698,8 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, pte_t *pte; int ret = 1; - if (mem_cgroup_try_charge(vma->vm_mm, GFP_HIGHUSER_MOVABLE, &ptr)) + if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, + GFP_HIGHUSER_MOVABLE, &ptr)) ret = -ENOMEM; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); diff --git a/mm/vmscan.c b/mm/vmscan.c index b07c48b09a93..f63b20dd7714 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1661,7 +1661,8 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, #ifdef CONFIG_CGROUP_MEM_RES_CTLR unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, - gfp_t gfp_mask) + gfp_t gfp_mask, + bool noswap) { struct scan_control sc = { .may_writepage = !laptop_mode, @@ -1674,6 +1675,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, }; struct zonelist *zonelist; + if (noswap) + sc.may_swap = 0; + sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); zonelist = NODE_DATA(numa_node_id())->node_zonelists; -- cgit v1.2.2 From 08e552c69c6930d64722de3ec18c51844d06ee28 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:01 -0800 Subject: memcg: synchronized LRU A big patch for changing memcg's LRU semantics. Now, - page_cgroup is linked to mem_cgroup's its own LRU (per zone). - LRU of page_cgroup is not synchronous with global LRU. - page and page_cgroup is one-to-one and statically allocated. - To find page_cgroup is on what LRU, you have to check pc->mem_cgroup as - lru = page_cgroup_zoneinfo(pc, nid_of_pc, zid_of_pc); - SwapCache is handled. And, when we handle LRU list of page_cgroup, we do following. pc = lookup_page_cgroup(page); lock_page_cgroup(pc); .....................(1) mz = page_cgroup_zoneinfo(pc); spin_lock(&mz->lru_lock); .....add to LRU spin_unlock(&mz->lru_lock); unlock_page_cgroup(pc); But (1) is spin_lock and we have to be afraid of dead-lock with zone->lru_lock. So, trylock() is used at (1), now. Without (1), we can't trust "mz" is correct. This is a trial to remove this dirty nesting of locks. This patch changes mz->lru_lock to be zone->lru_lock. Then, above sequence will be written as spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU mem_cgroup_add/remove/etc_lru() { pc = lookup_page_cgroup(page); mz = page_cgroup_zoneinfo(pc); if (PageCgroupUsed(pc)) { ....add to LRU } spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU This is much simpler. (*) We're safe even if we don't take lock_page_cgroup(pc). Because.. 1. When pc->mem_cgroup can be modified. - at charge. - at account_move(). 2. at charge the PCG_USED bit is not set before pc->mem_cgroup is fixed. 3. at account_move() the page is isolated and not on LRU. Pros. - easy for maintenance. - memcg can make use of laziness of pagevec. - we don't have to duplicated LRU/Active/Unevictable bit in page_cgroup. - LRU status of memcg will be synchronized with global LRU's one. - # of locks are reduced. - account_move() is simplified very much. Cons. - may increase cost of LRU rotation. (no impact if memcg is not configured.) Signed-off-by: KAMEZAWA Hiroyuki Cc: Li Zefan Cc: Balbir Singh Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 323 ++++++++++++++++++++++++------------------------------- mm/page_cgroup.c | 1 + mm/swap.c | 1 - mm/vmscan.c | 9 +- 4 files changed, 147 insertions(+), 187 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2efcf38f3b73..8ce4e9e47959 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -36,6 +36,7 @@ #include #include #include +#include "internal.h" #include @@ -100,7 +101,6 @@ struct mem_cgroup_per_zone { /* * spin_lock to protect the per cgroup LRU */ - spinlock_t lru_lock; struct list_head lists[NR_LRU_LISTS]; unsigned long count[NR_LRU_LISTS]; }; @@ -163,14 +163,12 @@ enum charge_type { /* only for here (for easy reading.) */ #define PCGF_CACHE (1UL << PCG_CACHE) #define PCGF_USED (1UL << PCG_USED) -#define PCGF_ACTIVE (1UL << PCG_ACTIVE) #define PCGF_LOCK (1UL << PCG_LOCK) -#define PCGF_FILE (1UL << PCG_FILE) static const unsigned long pcg_default_flags[NR_CHARGE_TYPE] = { - PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */ - PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */ - PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */ + PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */ + PCGF_USED | PCGF_LOCK, /* Anon */ + PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */ 0, /* FORCE */ }; @@ -185,9 +183,6 @@ pcg_default_flags[NR_CHARGE_TYPE] = { static void mem_cgroup_get(struct mem_cgroup *mem); static void mem_cgroup_put(struct mem_cgroup *mem); -/* - * Always modified under lru lock. Then, not necessary to preempt_disable() - */ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, struct page_cgroup *pc, bool charge) @@ -195,10 +190,9 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int val = (charge)? 1 : -1; struct mem_cgroup_stat *stat = &mem->stat; struct mem_cgroup_stat_cpu *cpustat; + int cpu = get_cpu(); - VM_BUG_ON(!irqs_disabled()); - - cpustat = &stat->cpustat[smp_processor_id()]; + cpustat = &stat->cpustat[cpu]; if (PageCgroupCache(pc)) __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val); else @@ -210,6 +204,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, else __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_PGPGOUT_COUNT, 1); + put_cpu(); } static struct mem_cgroup_per_zone * @@ -264,80 +259,95 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) struct mem_cgroup, css); } -static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, - struct page_cgroup *pc) -{ - int lru = LRU_BASE; +/* + * Following LRU functions are allowed to be used without PCG_LOCK. + * Operations are called by routine of global LRU independently from memcg. + * What we have to take care of here is validness of pc->mem_cgroup. + * + * Changes to pc->mem_cgroup happens when + * 1. charge + * 2. moving account + * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. + * It is added to LRU before charge. + * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. + * When moving account, the page is not on LRU. It's isolated. + */ - if (PageCgroupUnevictable(pc)) - lru = LRU_UNEVICTABLE; - else { - if (PageCgroupActive(pc)) - lru += LRU_ACTIVE; - if (PageCgroupFile(pc)) - lru += LRU_FILE; - } +void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) +{ + struct page_cgroup *pc; + struct mem_cgroup *mem; + struct mem_cgroup_per_zone *mz; + if (mem_cgroup_subsys.disabled) + return; + pc = lookup_page_cgroup(page); + /* can happen while we handle swapcache. */ + if (list_empty(&pc->lru)) + return; + mz = page_cgroup_zoneinfo(pc); + mem = pc->mem_cgroup; MEM_CGROUP_ZSTAT(mz, lru) -= 1; - - mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false); - list_del(&pc->lru); + list_del_init(&pc->lru); + return; } -static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, - struct page_cgroup *pc, bool hot) +void mem_cgroup_del_lru(struct page *page) { - int lru = LRU_BASE; + mem_cgroup_del_lru_list(page, page_lru(page)); +} - if (PageCgroupUnevictable(pc)) - lru = LRU_UNEVICTABLE; - else { - if (PageCgroupActive(pc)) - lru += LRU_ACTIVE; - if (PageCgroupFile(pc)) - lru += LRU_FILE; - } +void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) +{ + struct mem_cgroup_per_zone *mz; + struct page_cgroup *pc; - MEM_CGROUP_ZSTAT(mz, lru) += 1; - if (hot) - list_add(&pc->lru, &mz->lists[lru]); - else - list_add_tail(&pc->lru, &mz->lists[lru]); + if (mem_cgroup_subsys.disabled) + return; - mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true); + pc = lookup_page_cgroup(page); + smp_rmb(); + /* unused page is not rotated. */ + if (!PageCgroupUsed(pc)) + return; + mz = page_cgroup_zoneinfo(pc); + list_move(&pc->lru, &mz->lists[lru]); } -static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru) +void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) { - struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); - int active = PageCgroupActive(pc); - int file = PageCgroupFile(pc); - int unevictable = PageCgroupUnevictable(pc); - enum lru_list from = unevictable ? LRU_UNEVICTABLE : - (LRU_FILE * !!file + !!active); + struct page_cgroup *pc; + struct mem_cgroup_per_zone *mz; - if (lru == from) + if (mem_cgroup_subsys.disabled) + return; + pc = lookup_page_cgroup(page); + /* barrier to sync with "charge" */ + smp_rmb(); + if (!PageCgroupUsed(pc)) return; - MEM_CGROUP_ZSTAT(mz, from) -= 1; - /* - * However this is done under mz->lru_lock, another flags, which - * are not related to LRU, will be modified from out-of-lock. - * We have to use atomic set/clear flags. - */ - if (is_unevictable_lru(lru)) { - ClearPageCgroupActive(pc); - SetPageCgroupUnevictable(pc); - } else { - if (is_active_lru(lru)) - SetPageCgroupActive(pc); - else - ClearPageCgroupActive(pc); - ClearPageCgroupUnevictable(pc); - } - + mz = page_cgroup_zoneinfo(pc); MEM_CGROUP_ZSTAT(mz, lru) += 1; - list_move(&pc->lru, &mz->lists[lru]); + list_add(&pc->lru, &mz->lists[lru]); +} +/* + * To add swapcache into LRU. Be careful to all this function. + * zone->lru_lock shouldn't be held and irq must not be disabled. + */ +static void mem_cgroup_lru_fixup(struct page *page) +{ + if (!isolate_lru_page(page)) + putback_lru_page(page); +} + +void mem_cgroup_move_lists(struct page *page, + enum lru_list from, enum lru_list to) +{ + if (mem_cgroup_subsys.disabled) + return; + mem_cgroup_del_lru_list(page, from); + mem_cgroup_add_lru_list(page, to); } int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) @@ -350,37 +360,6 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) return ret; } -/* - * This routine assumes that the appropriate zone's lru lock is already held - */ -void mem_cgroup_move_lists(struct page *page, enum lru_list lru) -{ - struct page_cgroup *pc; - struct mem_cgroup_per_zone *mz; - unsigned long flags; - - if (mem_cgroup_subsys.disabled) - return; - - /* - * We cannot lock_page_cgroup while holding zone's lru_lock, - * because other holders of lock_page_cgroup can be interrupted - * with an attempt to rotate_reclaimable_page. But we cannot - * safely get to page_cgroup without it, so just try_lock it: - * mem_cgroup_isolate_pages allows for page left on wrong list. - */ - pc = lookup_page_cgroup(page); - if (!trylock_page_cgroup(pc)) - return; - if (pc && PageCgroupUsed(pc)) { - mz = page_cgroup_zoneinfo(pc); - spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_move_lists(pc, lru); - spin_unlock_irqrestore(&mz->lru_lock, flags); - } - unlock_page_cgroup(pc); -} - /* * Calculate mapped_ratio under memory controller. This will be used in * vmscan.c for deteremining we have to reclaim mapped pages. @@ -460,40 +439,24 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); src = &mz->lists[lru]; - spin_lock(&mz->lru_lock); scan = 0; list_for_each_entry_safe_reverse(pc, tmp, src, lru) { if (scan >= nr_to_scan) break; + + page = pc->page; if (unlikely(!PageCgroupUsed(pc))) continue; - page = pc->page; - if (unlikely(!PageLRU(page))) continue; - /* - * TODO: play better with lumpy reclaim, grabbing anything. - */ - if (PageUnevictable(page) || - (PageActive(page) && !active) || - (!PageActive(page) && active)) { - __mem_cgroup_move_lists(pc, page_lru(page)); - continue; - } - scan++; - list_move(&pc->lru, &pc_list); - if (__isolate_lru_page(page, mode, file) == 0) { list_move(&page->lru, dst); nr_taken++; } } - list_splice(&pc_list, src); - spin_unlock(&mz->lru_lock); - *scanned = scan; return nr_taken; } @@ -608,9 +571,6 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, struct page_cgroup *pc, enum charge_type ctype) { - struct mem_cgroup_per_zone *mz; - unsigned long flags; - /* try_charge() can return NULL to *memcg, taking care of it. */ if (!mem) return; @@ -625,17 +585,11 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, return; } pc->mem_cgroup = mem; - /* - * If a page is accounted as a page cache, insert to inactive list. - * If anon, insert to active list. - */ + smp_wmb(); pc->flags = pcg_default_flags[ctype]; - mz = page_cgroup_zoneinfo(pc); + mem_cgroup_charge_statistics(mem, pc, true); - spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_add_list(mz, pc, true); - spin_unlock_irqrestore(&mz->lru_lock, flags); unlock_page_cgroup(pc); } @@ -646,8 +600,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, * @to: mem_cgroup which the page is moved to. @from != @to. * * The caller must confirm following. - * 1. disable irq. - * 2. lru_lock of old mem_cgroup(@from) should be held. + * - page is not on LRU (isolate_page() is useful.) * * returns 0 at success, * returns -EBUSY when lock is busy or "pc" is unstable. @@ -663,15 +616,14 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, int nid, zid; int ret = -EBUSY; - VM_BUG_ON(!irqs_disabled()); VM_BUG_ON(from == to); + VM_BUG_ON(PageLRU(pc->page)); nid = page_cgroup_nid(pc); zid = page_cgroup_zid(pc); from_mz = mem_cgroup_zoneinfo(from, nid, zid); to_mz = mem_cgroup_zoneinfo(to, nid, zid); - if (!trylock_page_cgroup(pc)) return ret; @@ -681,18 +633,15 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, if (pc->mem_cgroup != from) goto out; - if (spin_trylock(&to_mz->lru_lock)) { - __mem_cgroup_remove_list(from_mz, pc); - css_put(&from->css); - res_counter_uncharge(&from->res, PAGE_SIZE); - if (do_swap_account) - res_counter_uncharge(&from->memsw, PAGE_SIZE); - pc->mem_cgroup = to; - css_get(&to->css); - __mem_cgroup_add_list(to_mz, pc, false); - ret = 0; - spin_unlock(&to_mz->lru_lock); - } + css_put(&from->css); + res_counter_uncharge(&from->res, PAGE_SIZE); + mem_cgroup_charge_statistics(from, pc, false); + if (do_swap_account) + res_counter_uncharge(&from->memsw, PAGE_SIZE); + pc->mem_cgroup = to; + mem_cgroup_charge_statistics(to, pc, true); + css_get(&to->css); + ret = 0; out: unlock_page_cgroup(pc); return ret; @@ -706,39 +655,47 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, struct mem_cgroup *child, gfp_t gfp_mask) { + struct page *page = pc->page; struct cgroup *cg = child->css.cgroup; struct cgroup *pcg = cg->parent; struct mem_cgroup *parent; - struct mem_cgroup_per_zone *mz; - unsigned long flags; int ret; /* Is ROOT ? */ if (!pcg) return -EINVAL; + parent = mem_cgroup_from_cont(pcg); + ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false); if (ret) return ret; - mz = mem_cgroup_zoneinfo(child, - page_cgroup_nid(pc), page_cgroup_zid(pc)); + if (!get_page_unless_zero(page)) + return -EBUSY; + + ret = isolate_lru_page(page); + + if (ret) + goto cancel; - spin_lock_irqsave(&mz->lru_lock, flags); ret = mem_cgroup_move_account(pc, child, parent); - spin_unlock_irqrestore(&mz->lru_lock, flags); - /* drop extra refcnt */ + /* drop extra refcnt by try_charge() (move_account increment one) */ css_put(&parent->css); - /* uncharge if move fails */ - if (ret) { - res_counter_uncharge(&parent->res, PAGE_SIZE); - if (do_swap_account) - res_counter_uncharge(&parent->memsw, PAGE_SIZE); + putback_lru_page(page); + if (!ret) { + put_page(page); + return 0; } - + /* uncharge if move fails */ +cancel: + res_counter_uncharge(&parent->res, PAGE_SIZE); + if (do_swap_account) + res_counter_uncharge(&parent->memsw, PAGE_SIZE); + put_page(page); return ret; } @@ -912,6 +869,8 @@ int mem_cgroup_cache_charge_swapin(struct page *page, } if (!locked) unlock_page(page); + /* add this page(page_cgroup) to the LRU we want. */ + mem_cgroup_lru_fixup(page); return ret; } @@ -944,6 +903,8 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) } } + /* add this page(page_cgroup) to the LRU we want. */ + mem_cgroup_lru_fixup(page); } void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) @@ -968,7 +929,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) struct page_cgroup *pc; struct mem_cgroup *mem = NULL; struct mem_cgroup_per_zone *mz; - unsigned long flags; if (mem_cgroup_subsys.disabled) return NULL; @@ -1010,12 +970,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)) res_counter_uncharge(&mem->memsw, PAGE_SIZE); + mem_cgroup_charge_statistics(mem, pc, false); ClearPageCgroupUsed(pc); mz = page_cgroup_zoneinfo(pc); - spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_remove_list(mz, pc); - spin_unlock_irqrestore(&mz->lru_lock, flags); unlock_page_cgroup(pc); css_put(&mem->css); @@ -1281,21 +1239,22 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, return ret; } - /* * This routine traverse page_cgroup in given list and drop them all. * *And* this routine doesn't reclaim page itself, just removes page_cgroup. */ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, - struct mem_cgroup_per_zone *mz, - enum lru_list lru) + int node, int zid, enum lru_list lru) { + struct zone *zone; + struct mem_cgroup_per_zone *mz; struct page_cgroup *pc, *busy; - unsigned long flags; - unsigned long loop; + unsigned long flags, loop; struct list_head *list; int ret = 0; + zone = &NODE_DATA(node)->node_zones[zid]; + mz = mem_cgroup_zoneinfo(mem, node, zid); list = &mz->lists[lru]; loop = MEM_CGROUP_ZSTAT(mz, lru); @@ -1304,19 +1263,19 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, busy = NULL; while (loop--) { ret = 0; - spin_lock_irqsave(&mz->lru_lock, flags); + spin_lock_irqsave(&zone->lru_lock, flags); if (list_empty(list)) { - spin_unlock_irqrestore(&mz->lru_lock, flags); + spin_unlock_irqrestore(&zone->lru_lock, flags); break; } pc = list_entry(list->prev, struct page_cgroup, lru); if (busy == pc) { list_move(&pc->lru, list); busy = 0; - spin_unlock_irqrestore(&mz->lru_lock, flags); + spin_unlock_irqrestore(&zone->lru_lock, flags); continue; } - spin_unlock_irqrestore(&mz->lru_lock, flags); + spin_unlock_irqrestore(&zone->lru_lock, flags); ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE); if (ret == -ENOMEM) @@ -1329,6 +1288,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, } else busy = NULL; } + if (!ret && !list_empty(list)) return -EBUSY; return ret; @@ -1364,12 +1324,10 @@ move_account: ret = 0; for_each_node_state(node, N_POSSIBLE) { for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { - struct mem_cgroup_per_zone *mz; enum lru_list l; - mz = mem_cgroup_zoneinfo(mem, node, zid); for_each_lru(l) { ret = mem_cgroup_force_empty_list(mem, - mz, l); + node, zid, l); if (ret) break; } @@ -1413,6 +1371,7 @@ try_to_free: } } + lru_add_drain(); /* try move_account...there may be some *locked* pages. */ if (mem->res.usage) goto move_account; @@ -1657,7 +1616,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) for (zone = 0; zone < MAX_NR_ZONES; zone++) { mz = &pn->zoneinfo[zone]; - spin_lock_init(&mz->lru_lock); for_each_lru(l) INIT_LIST_HEAD(&mz->lists[l]); } @@ -1706,8 +1664,15 @@ static struct mem_cgroup *mem_cgroup_alloc(void) static void mem_cgroup_free(struct mem_cgroup *mem) { + int node; + if (atomic_read(&mem->refcnt) > 0) return; + + + for_each_node_state(node, N_POSSIBLE) + free_mem_cgroup_per_zone_info(mem, node); + if (mem_cgroup_size() < PAGE_SIZE) kfree(mem); else @@ -1780,12 +1745,6 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, static void mem_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { - int node; - struct mem_cgroup *mem = mem_cgroup_from_cont(cont); - - for_each_node_state(node, N_POSSIBLE) - free_mem_cgroup_per_zone_info(mem, node); - mem_cgroup_free(mem_cgroup_from_cont(cont)); } diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 685e7c8e1fd6..74ae8e01d071 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -16,6 +16,7 @@ __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) pc->flags = 0; pc->mem_cgroup = NULL; pc->page = pfn_to_page(pfn); + INIT_LIST_HEAD(&pc->lru); } static unsigned long total_usage; diff --git a/mm/swap.c b/mm/swap.c index ba2c0e8b8b54..8a98a9c90704 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -168,7 +168,6 @@ void activate_page(struct page *page) lru += LRU_ACTIVE; add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); - mem_cgroup_move_lists(page, lru); zone->recent_rotated[!!file]++; zone->recent_scanned[!!file]++; diff --git a/mm/vmscan.c b/mm/vmscan.c index f63b20dd7714..45983af1de3d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -512,7 +512,6 @@ redo: lru = LRU_UNEVICTABLE; add_page_to_unevictable_list(page); } - mem_cgroup_move_lists(page, lru); /* * page's status can change while we move it among lru. If an evictable @@ -547,7 +546,6 @@ void putback_lru_page(struct page *page) lru = !!TestClearPageActive(page) + page_is_file_cache(page); lru_cache_add_lru(page, lru); - mem_cgroup_move_lists(page, lru); put_page(page); } #endif /* CONFIG_UNEVICTABLE_LRU */ @@ -813,6 +811,7 @@ int __isolate_lru_page(struct page *page, int mode, int file) return ret; ret = -EBUSY; + if (likely(get_page_unless_zero(page))) { /* * Be careful not to clear PageLRU until after we're @@ -821,6 +820,7 @@ int __isolate_lru_page(struct page *page, int mode, int file) */ ClearPageLRU(page); ret = 0; + mem_cgroup_del_lru(page); } return ret; @@ -1134,7 +1134,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, SetPageLRU(page); lru = page_lru(page); add_page_to_lru_list(zone, page, lru); - mem_cgroup_move_lists(page, lru); if (PageActive(page) && scan_global_lru(sc)) { int file = !!page_is_file_cache(page); zone->recent_rotated[file]++; @@ -1263,7 +1262,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ClearPageActive(page); list_move(&page->lru, &zone->lru[lru].list); - mem_cgroup_move_lists(page, lru); + mem_cgroup_add_lru_list(page, lru); pgmoved++; if (!pagevec_add(&pvec, page)) { __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); @@ -2408,6 +2407,7 @@ retry: __dec_zone_state(zone, NR_UNEVICTABLE); list_move(&page->lru, &zone->lru[l].list); + mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); __inc_zone_state(zone, NR_INACTIVE_ANON + l); __count_vm_event(UNEVICTABLE_PGRESCUED); } else { @@ -2416,6 +2416,7 @@ retry: */ SetPageUnevictable(page); list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); + mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); if (page_evictable(page, NULL)) goto retry; } -- cgit v1.2.2 From f8d665422603ee1b8ed04dcad4242f14d623c941 Mon Sep 17 00:00:00 2001 From: Hirokazu Takahashi Date: Wed, 7 Jan 2009 18:08:02 -0800 Subject: memcg: add mem_cgroup_disabled() We check mem_cgroup is disabled or not by checking mem_cgroup_subsys.disabled. I think it has more references than expected, now. replacing if (mem_cgroup_subsys.disabled) with if (mem_cgroup_disabled()) give us good look, I think. [kamezawa.hiroyu@jp.fujitsu.com: fix typo] Signed-off-by: KAMEZAWA Hiroyuki Cc: Li Zefan Cc: Balbir Singh Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 28 ++++++++++++++-------------- mm/page_cgroup.c | 4 ++-- 2 files changed, 16 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8ce4e9e47959..9846f617115d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -279,7 +279,7 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) struct mem_cgroup *mem; struct mem_cgroup_per_zone *mz; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return; pc = lookup_page_cgroup(page); /* can happen while we handle swapcache. */ @@ -302,7 +302,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) struct mem_cgroup_per_zone *mz; struct page_cgroup *pc; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return; pc = lookup_page_cgroup(page); @@ -319,7 +319,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) struct page_cgroup *pc; struct mem_cgroup_per_zone *mz; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return; pc = lookup_page_cgroup(page); /* barrier to sync with "charge" */ @@ -344,7 +344,7 @@ static void mem_cgroup_lru_fixup(struct page *page) void mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) { - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return; mem_cgroup_del_lru_list(page, from); mem_cgroup_add_lru_list(page, to); @@ -731,7 +731,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return 0; if (PageCompound(page)) return 0; @@ -753,7 +753,7 @@ int mem_cgroup_newpage_charge(struct page *page, int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return 0; if (PageCompound(page)) return 0; @@ -799,7 +799,7 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct mem_cgroup *mem; swp_entry_t ent; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return 0; if (!do_swap_account) @@ -833,7 +833,7 @@ int mem_cgroup_cache_charge_swapin(struct page *page, { int ret = 0; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return 0; if (unlikely(!mm)) mm = &init_mm; @@ -880,7 +880,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) { struct page_cgroup *pc; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return; if (!ptr) return; @@ -909,7 +909,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) { - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return; if (!mem) return; @@ -930,7 +930,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) struct mem_cgroup *mem = NULL; struct mem_cgroup_per_zone *mz; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return NULL; if (PageSwapCache(page)) @@ -1049,7 +1049,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) struct mem_cgroup *mem = NULL; int ret = 0; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return 0; pc = lookup_page_cgroup(page); @@ -1131,7 +1131,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) int progress = 0; int retry = MEM_CGROUP_RECLAIM_RETRIES; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return 0; if (!mm) return 0; @@ -1697,7 +1697,7 @@ static void mem_cgroup_put(struct mem_cgroup *mem) #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP static void __init enable_swap_cgroup(void) { - if (!mem_cgroup_subsys.disabled && really_do_swap_account) + if (!mem_cgroup_disabled() && really_do_swap_account) do_swap_account = 1; } #else diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 74ae8e01d071..7006a11350c8 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -74,7 +74,7 @@ void __init page_cgroup_init(void) int nid, fail; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return; for_each_online_node(nid) { @@ -247,7 +247,7 @@ void __init page_cgroup_init(void) unsigned long pfn; int fail = 0; - if (mem_cgroup_subsys.disabled) + if (mem_cgroup_disabled()) return; for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { -- cgit v1.2.2 From 28dbc4b6a01fb579a9441c7b81e3d3413dc452df Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Wed, 7 Jan 2009 18:08:05 -0800 Subject: memcg: memory cgroup resource counters for hierarchy Add support for building hierarchies in resource counters. Cgroups allows us to build a deep hierarchy, but we currently don't link the resource counters belonging to the memory controller control groups, in the same fashion as the corresponding cgroup entries in the cgroup hierarchy. This patch provides the infrastructure for resource counters that have the same hiearchy as their cgroup counter parts. These set of patches are based on the resource counter hiearchy patches posted by Pavel Emelianov. NOTE: Building hiearchies is expensive, deeper hierarchies imply charging the all the way up to the root. It is known that hiearchies are expensive, so the user needs to be careful and aware of the trade-offs before creating very deep ones. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Balbir Singh Cc: YAMAMOTO Takashi Cc: Paul Menage Cc: Li Zefan Cc: David Rientjes Cc: Pavel Emelianov Cc: Dhaval Giani Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9846f617115d..e72fb2b4a7d8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -471,6 +471,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, { struct mem_cgroup *mem; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; + struct res_counter *fail_res; /* * We always charge the cgroup the mm_struct belongs to. * The mm_struct's mem_cgroup changes on task migration if the @@ -499,11 +500,12 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, int ret; bool noswap = false; - ret = res_counter_charge(&mem->res, PAGE_SIZE); + ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res); if (likely(!ret)) { if (!do_swap_account) break; - ret = res_counter_charge(&mem->memsw, PAGE_SIZE); + ret = res_counter_charge(&mem->memsw, PAGE_SIZE, + &fail_res); if (likely(!ret)) break; /* mem+swap counter fails */ @@ -1709,22 +1711,26 @@ static void __init enable_swap_cgroup(void) static struct cgroup_subsys_state * mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) { - struct mem_cgroup *mem; + struct mem_cgroup *mem, *parent; int node; mem = mem_cgroup_alloc(); if (!mem) return ERR_PTR(-ENOMEM); - res_counter_init(&mem->res); - res_counter_init(&mem->memsw); - for_each_node_state(node, N_POSSIBLE) if (alloc_mem_cgroup_per_zone_info(mem, node)) goto free_out; /* root ? */ - if (cont->parent == NULL) + if (cont->parent == NULL) { enable_swap_cgroup(); + parent = NULL; + } else + parent = mem_cgroup_from_cont(cont->parent); + + res_counter_init(&mem->res, parent ? &parent->res : NULL); + res_counter_init(&mem->memsw, parent ? &parent->memsw : NULL); + return &mem->css; free_out: -- cgit v1.2.2 From 6d61ef409d6ba168972f7c2f8c35baaade636a58 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Wed, 7 Jan 2009 18:08:06 -0800 Subject: memcg: memory cgroup hierarchical reclaim This patch introduces hierarchical reclaim. When an ancestor goes over its limit, the charging routine points to the parent that is above its limit. The reclaim process then starts from the last scanned child of the ancestor and reclaims until the ancestor goes below its limit. [akpm@linux-foundation.org: coding-style fixes] [d-nishimura@mtf.biglobe.ne.jp: mem_cgroup_from_res_counter should handle both mem->res and mem->memsw] Signed-off-by: Balbir Singh Cc: YAMAMOTO Takashi Cc: Paul Menage Cc: Li Zefan Cc: David Rientjes Cc: Pavel Emelianov Cc: Dhaval Giani Cc: KAMEZAWA Hiroyuki Signed-off-by: Daisuke Nishimura Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 166 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 162 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e72fb2b4a7d8..20e1d90b3363 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -143,6 +143,13 @@ struct mem_cgroup { struct mem_cgroup_lru_info info; int prev_priority; /* for recording reclaim priority */ + + /* + * While reclaiming in a hiearchy, we cache the last child we + * reclaimed from. Protected by cgroup_lock() + */ + struct mem_cgroup *last_scanned_child; + int obsolete; atomic_t refcnt; /* @@ -461,6 +468,149 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, return nr_taken; } +#define mem_cgroup_from_res_counter(counter, member) \ + container_of(counter, struct mem_cgroup, member) + +/* + * This routine finds the DFS walk successor. This routine should be + * called with cgroup_mutex held + */ +static struct mem_cgroup * +mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem) +{ + struct cgroup *cgroup, *curr_cgroup, *root_cgroup; + + curr_cgroup = curr->css.cgroup; + root_cgroup = root_mem->css.cgroup; + + if (!list_empty(&curr_cgroup->children)) { + /* + * Walk down to children + */ + mem_cgroup_put(curr); + cgroup = list_entry(curr_cgroup->children.next, + struct cgroup, sibling); + curr = mem_cgroup_from_cont(cgroup); + mem_cgroup_get(curr); + goto done; + } + +visit_parent: + if (curr_cgroup == root_cgroup) { + mem_cgroup_put(curr); + curr = root_mem; + mem_cgroup_get(curr); + goto done; + } + + /* + * Goto next sibling + */ + if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) { + mem_cgroup_put(curr); + cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup, + sibling); + curr = mem_cgroup_from_cont(cgroup); + mem_cgroup_get(curr); + goto done; + } + + /* + * Go up to next parent and next parent's sibling if need be + */ + curr_cgroup = curr_cgroup->parent; + goto visit_parent; + +done: + root_mem->last_scanned_child = curr; + return curr; +} + +/* + * Visit the first child (need not be the first child as per the ordering + * of the cgroup list, since we track last_scanned_child) of @mem and use + * that to reclaim free pages from. + */ +static struct mem_cgroup * +mem_cgroup_get_first_node(struct mem_cgroup *root_mem) +{ + struct cgroup *cgroup; + struct mem_cgroup *ret; + bool obsolete = (root_mem->last_scanned_child && + root_mem->last_scanned_child->obsolete); + + /* + * Scan all children under the mem_cgroup mem + */ + cgroup_lock(); + if (list_empty(&root_mem->css.cgroup->children)) { + ret = root_mem; + goto done; + } + + if (!root_mem->last_scanned_child || obsolete) { + + if (obsolete) + mem_cgroup_put(root_mem->last_scanned_child); + + cgroup = list_first_entry(&root_mem->css.cgroup->children, + struct cgroup, sibling); + ret = mem_cgroup_from_cont(cgroup); + mem_cgroup_get(ret); + } else + ret = mem_cgroup_get_next_node(root_mem->last_scanned_child, + root_mem); + +done: + root_mem->last_scanned_child = ret; + cgroup_unlock(); + return ret; +} + +/* + * Dance down the hierarchy if needed to reclaim memory. We remember the + * last child we reclaimed from, so that we don't end up penalizing + * one child extensively based on its position in the children list. + * + * root_mem is the original ancestor that we've been reclaim from. + */ +static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, + gfp_t gfp_mask, bool noswap) +{ + struct mem_cgroup *next_mem; + int ret = 0; + + /* + * Reclaim unconditionally and don't check for return value. + * We need to reclaim in the current group and down the tree. + * One might think about checking for children before reclaiming, + * but there might be left over accounting, even after children + * have left. + */ + ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap); + if (res_counter_check_under_limit(&root_mem->res)) + return 0; + + next_mem = mem_cgroup_get_first_node(root_mem); + + while (next_mem != root_mem) { + if (next_mem->obsolete) { + mem_cgroup_put(next_mem); + cgroup_lock(); + next_mem = mem_cgroup_get_first_node(root_mem); + cgroup_unlock(); + continue; + } + ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap); + if (res_counter_check_under_limit(&root_mem->res)) + return 0; + cgroup_lock(); + next_mem = mem_cgroup_get_next_node(next_mem, root_mem); + cgroup_unlock(); + } + return ret; +} + /* * Unlike exported interface, "oom" parameter is added. if oom==true, * oom-killer can be invoked. @@ -469,7 +619,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom) { - struct mem_cgroup *mem; + struct mem_cgroup *mem, *mem_over_limit; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; struct res_counter *fail_res; /* @@ -511,12 +661,18 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, /* mem+swap counter fails */ res_counter_uncharge(&mem->res, PAGE_SIZE); noswap = true; - } + mem_over_limit = mem_cgroup_from_res_counter(fail_res, + memsw); + } else + /* mem counter fails */ + mem_over_limit = mem_cgroup_from_res_counter(fail_res, + res); + if (!(gfp_mask & __GFP_WAIT)) goto nomem; - if (try_to_free_mem_cgroup_pages(mem, gfp_mask, noswap)) - continue; + ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask, + noswap); /* * try_to_free_mem_cgroup_pages() might not give us a full @@ -1732,6 +1888,8 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) res_counter_init(&mem->memsw, parent ? &parent->memsw : NULL); + mem->last_scanned_child = NULL; + return &mem->css; free_out: for_each_node_state(node, N_POSSIBLE) -- cgit v1.2.2 From 18f59ea7de08db2449ba99185e8d8cc30e7acac5 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Wed, 7 Jan 2009 18:08:07 -0800 Subject: memcg: memory cgroup hierarchy feature selector Don't enable multiple hierarchy support by default. This patch introduces a features element that can be set to enable the nested depth hierarchy feature. This feature can only be enabled when the cgroup for which the feature this is enabled, has no children. Signed-off-by: Balbir Singh Cc: YAMAMOTO Takashi Cc: Paul Menage Cc: Li Zefan Cc: David Rientjes Cc: Pavel Emelianov Cc: Dhaval Giani Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 57 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 20e1d90b3363..886e2224c5fd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -149,6 +149,10 @@ struct mem_cgroup { * reclaimed from. Protected by cgroup_lock() */ struct mem_cgroup *last_scanned_child; + /* + * Should the accounting and control be hierarchical, per subtree? + */ + bool use_hierarchy; int obsolete; atomic_t refcnt; @@ -1543,6 +1547,44 @@ int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) } +static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft) +{ + return mem_cgroup_from_cont(cont)->use_hierarchy; +} + +static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, + u64 val) +{ + int retval = 0; + struct mem_cgroup *mem = mem_cgroup_from_cont(cont); + struct cgroup *parent = cont->parent; + struct mem_cgroup *parent_mem = NULL; + + if (parent) + parent_mem = mem_cgroup_from_cont(parent); + + cgroup_lock(); + /* + * If parent's use_hiearchy is set, we can't make any modifications + * in the child subtrees. If it is unset, then the change can + * occur, provided the current cgroup has no children. + * + * For the root cgroup, parent_mem is NULL, we allow value to be + * set if there are no children. + */ + if ((!parent_mem || !parent_mem->use_hierarchy) && + (val == 1 || val == 0)) { + if (list_empty(&cont->children)) + mem->use_hierarchy = val; + else + retval = -EBUSY; + } else + retval = -EINVAL; + cgroup_unlock(); + + return retval; +} + static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) { struct mem_cgroup *mem = mem_cgroup_from_cont(cont); @@ -1706,6 +1748,11 @@ static struct cftype mem_cgroup_files[] = { .name = "force_empty", .trigger = mem_cgroup_force_empty_write, }, + { + .name = "use_hierarchy", + .write_u64 = mem_cgroup_hierarchy_write, + .read_u64 = mem_cgroup_hierarchy_read, + }, }; #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP @@ -1881,12 +1928,18 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) if (cont->parent == NULL) { enable_swap_cgroup(); parent = NULL; - } else + } else { parent = mem_cgroup_from_cont(cont->parent); + mem->use_hierarchy = parent->use_hierarchy; + } - res_counter_init(&mem->res, parent ? &parent->res : NULL); - res_counter_init(&mem->memsw, parent ? &parent->memsw : NULL); - + if (parent && parent->use_hierarchy) { + res_counter_init(&mem->res, &parent->res); + res_counter_init(&mem->memsw, &parent->memsw); + } else { + res_counter_init(&mem->res, NULL); + res_counter_init(&mem->memsw, NULL); + } mem->last_scanned_child = NULL; -- cgit v1.2.2 From a636b327f731143ccc544b966cfd8de6cb6d72c6 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:08 -0800 Subject: memcg: avoid unnecessary system-wide-oom-killer Current mmtom has new oom function as pagefault_out_of_memory(). It's added for select bad process rathar than killing current. When memcg hit limit and calls OOM at page_fault, this handler called and system-wide-oom handling happens. (means kernel panics if panic_on_oom is true....) To avoid overkill, check memcg's recent behavior before starting system-wide-oom. And this patch also fixes to guarantee "don't accnout against process with TIF_MEMDIE". This is necessary for smooth OOM. [akpm@linux-foundation.org: build fix] Signed-off-by: KAMEZAWA Hiroyuki Cc: Li Zefan Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Badari Pulavarty Cc: Jan Blunck Cc: Hirokazu Takahashi Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 33 +++++++++++++++++++++++++++++---- mm/oom_kill.c | 8 ++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 886e2224c5fd..659b0c58f13e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -153,7 +153,7 @@ struct mem_cgroup { * Should the accounting and control be hierarchical, per subtree? */ bool use_hierarchy; - + unsigned long last_oom_jiffies; int obsolete; atomic_t refcnt; /* @@ -615,6 +615,22 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, return ret; } +bool mem_cgroup_oom_called(struct task_struct *task) +{ + bool ret = false; + struct mem_cgroup *mem; + struct mm_struct *mm; + + rcu_read_lock(); + mm = task->mm; + if (!mm) + mm = &init_mm; + mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10)) + ret = true; + rcu_read_unlock(); + return ret; +} /* * Unlike exported interface, "oom" parameter is added. if oom==true, * oom-killer can be invoked. @@ -626,6 +642,13 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, struct mem_cgroup *mem, *mem_over_limit; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; struct res_counter *fail_res; + + if (unlikely(test_thread_flag(TIF_MEMDIE))) { + /* Don't account this! */ + *memcg = NULL; + return 0; + } + /* * We always charge the cgroup the mm_struct belongs to. * The mm_struct's mem_cgroup changes on task migration if the @@ -694,8 +717,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, continue; if (!nr_retries--) { - if (oom) + if (oom) { mem_cgroup_out_of_memory(mem, gfp_mask); + mem->last_oom_jiffies = jiffies; + } goto nomem; } } @@ -832,7 +857,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false); - if (ret) + if (ret || !parent) return ret; if (!get_page_unless_zero(page)) @@ -883,7 +908,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, mem = memcg; ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true); - if (ret) + if (ret || !mem) return ret; __mem_cgroup_commit_charge(mem, pc, ctype); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 6b9e758c98a5..fd150e3a2567 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -560,6 +560,13 @@ void pagefault_out_of_memory(void) /* Got some memory back in the last second. */ return; + /* + * If this is from memcg, oom-killer is already invoked. + * and not worth to go system-wide-oom. + */ + if (mem_cgroup_oom_called(current)) + goto rest_and_return; + if (sysctl_panic_on_oom) panic("out of memory from page fault. panic_on_oom is selected.\n"); @@ -571,6 +578,7 @@ void pagefault_out_of_memory(void) * Give "p" a good chance of killing itself before we * retry to allocate memory. */ +rest_and_return: if (!test_thread_flag(TIF_MEMDIE)) schedule_timeout_uninterruptible(1); } -- cgit v1.2.2 From 887007561ae58628f03aa9046949747c04f63be8 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:09 -0800 Subject: memcg: fix reclaim result checks check_under_limit logic was wrong and this check should be against mem_over_limit rather than mem. Reported-by: Li Zefan Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Badari Pulavarty Cc: Jan Blunck Cc: Hirokazu Takahashi Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 659b0c58f13e..9bf5d7c8ede7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -709,17 +709,17 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, * current usage of the cgroup before giving up * */ - if (!do_swap_account && - res_counter_check_under_limit(&mem->res)) - continue; - if (do_swap_account && - res_counter_check_under_limit(&mem->memsw)) - continue; + if (do_swap_account) { + if (res_counter_check_under_limit(&mem_over_limit->res) && + res_counter_check_under_limit(&mem_over_limit->memsw)) + continue; + } else if (res_counter_check_under_limit(&mem_over_limit->res)) + continue; if (!nr_retries--) { if (oom) { - mem_cgroup_out_of_memory(mem, gfp_mask); - mem->last_oom_jiffies = jiffies; + mem_cgroup_out_of_memory(mem_over_limit, gfp_mask); + mem_over_limit->last_oom_jiffies = jiffies; } goto nomem; } -- cgit v1.2.2 From 2c26fdd70c3094fa3e84caf9ef434911933d5477 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:10 -0800 Subject: memcg: revert gfp mask fix My patch, memcg-fix-gfp_mask-of-callers-of-charge.patch changed gfp_mask of callers of charge to be GFP_HIGHUSER_MOVABLE for showing what will happen at memory reclaim. But in recent discussion, it's NACKed because it sounds ugly. This patch is for reverting it and add some clean up to gfp_mask of callers of charge. No behavior change but need review before generating HUNK in deep queue. This patch also adds explanation to meaning of gfp_mask passed to charge functions in memcontrol.h. Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 2 +- mm/memcontrol.c | 10 +++++----- mm/memory.c | 10 ++++------ mm/shmem.c | 8 ++++---- mm/swapfile.c | 3 +-- 5 files changed, 15 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index 2f55a1e2baf7..ceba0bd03662 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, VM_BUG_ON(!PageLocked(page)); error = mem_cgroup_cache_charge(page, current->mm, - gfp_mask & ~__GFP_HIGHMEM); + gfp_mask & GFP_RECLAIM_MASK); if (error) goto out; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9bf5d7c8ede7..b9cd57b667d6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1248,7 +1248,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) unlock_page_cgroup(pc); if (mem) { - ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem); + ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem); css_put(&mem->css); } *ptr = mem; @@ -1378,7 +1378,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, break; progress = try_to_free_mem_cgroup_pages(memcg, - GFP_HIGHUSER_MOVABLE, false); + GFP_KERNEL, false); if (!progress) retry_count--; } return ret; @@ -1418,7 +1418,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, break; oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); - try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true); + try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true); curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); if (curusage >= oldusage) retry_count--; @@ -1464,7 +1464,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, } spin_unlock_irqrestore(&zone->lru_lock, flags); - ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE); + ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL); if (ret == -ENOMEM) break; @@ -1550,7 +1550,7 @@ try_to_free: goto out; } progress = try_to_free_mem_cgroup_pages(mem, - GFP_HIGHUSER_MOVABLE, false); + GFP_KERNEL, false); if (!progress) { nr_retries--; /* maybe some writeback is necessary */ diff --git a/mm/memory.c b/mm/memory.c index 1358012ffa73..e5bfbe6b594c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2000,7 +2000,7 @@ gotten: cow_user_page(new_page, old_page, address, vma); __SetPageUptodate(new_page); - if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE)) + if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) goto oom_free_new; /* @@ -2431,8 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, lock_page(page); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); - if (mem_cgroup_try_charge_swapin(mm, page, - GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) { + if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { ret = VM_FAULT_OOM; unlock_page(page); goto out; @@ -2524,7 +2523,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, goto oom; __SetPageUptodate(page); - if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE)) + if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) goto oom_free_page; entry = mk_pte(page, vma->vm_page_prot); @@ -2615,8 +2614,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ret = VM_FAULT_OOM; goto out; } - if (mem_cgroup_newpage_charge(page, - mm, GFP_HIGHUSER_MOVABLE)) { + if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { ret = VM_FAULT_OOM; page_cache_release(page); goto out; diff --git a/mm/shmem.c b/mm/shmem.c index adf5c3eedbc9..bbb7b043c986 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -932,8 +932,8 @@ found: * Charge page using GFP_HIGHUSER_MOVABLE while we can wait. * charged back to the user(not to caller) when swap account is used. */ - error = mem_cgroup_cache_charge_swapin(page, - current->mm, GFP_HIGHUSER_MOVABLE, true); + error = mem_cgroup_cache_charge_swapin(page, current->mm, GFP_KERNEL, + true); if (error) goto out; error = radix_tree_preload(GFP_KERNEL); @@ -1275,7 +1275,7 @@ repeat: * charge against this swap cache here. */ if (mem_cgroup_cache_charge_swapin(swappage, - current->mm, gfp, false)) { + current->mm, gfp & GFP_RECLAIM_MASK, false)) { page_cache_release(swappage); error = -ENOMEM; goto failed; @@ -1393,7 +1393,7 @@ repeat: /* Precharge page while we can wait, compensate after */ error = mem_cgroup_cache_charge(filepage, current->mm, - GFP_HIGHUSER_MOVABLE); + GFP_KERNEL); if (error) { page_cache_release(filepage); shmem_unacct_blocks(info->flags, 1); diff --git a/mm/swapfile.c b/mm/swapfile.c index 0579d9069b61..da422c47e2ee 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -698,8 +698,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, pte_t *pte; int ret = 1; - if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, - GFP_HIGHUSER_MOVABLE, &ptr)) + if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) ret = -ENOMEM; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); -- cgit v1.2.2 From f9717d28d673468883df8ac34b47268719ac5a3d Mon Sep 17 00:00:00 2001 From: Nikanth Karthikesan Date: Wed, 7 Jan 2009 18:08:11 -0800 Subject: memcg: check group leader fix Remove unnecessary codes (...fragments of not-implemented functionalilty...) Reported-by: Nikanth Karthikesan Signed-off-by: Nikanth Karthikesan Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b9cd57b667d6..b83790083087 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2008,25 +2008,10 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, struct cgroup *old_cont, struct task_struct *p) { - struct mm_struct *mm; - struct mem_cgroup *mem, *old_mem; - - mm = get_task_mm(p); - if (mm == NULL) - return; - - mem = mem_cgroup_from_cont(cont); - old_mem = mem_cgroup_from_cont(old_cont); - /* - * Only thread group leaders are allowed to migrate, the mm_struct is - * in effect owned by the leader + * FIXME: It's better to move charges of this process from old + * memcg to new memcg. But it's just on TODO-List now. */ - if (!thread_group_leader(p)) - goto out; - -out: - mmput(mm); } struct cgroup_subsys mem_cgroup_subsys = { -- cgit v1.2.2 From b85a96c0b6cb79c67e7b01b66368f2e31579d7c5 Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Wed, 7 Jan 2009 18:08:12 -0800 Subject: memcg: memory swap controller: fix limit check There are scatterd calls of res_counter_check_under_limit(), and most of them don't take mem+swap accounting into account. define mem_cgroup_check_under_limit() and avoid direct use of res_counter_check_limit(). Reported-by: Daisuke Nishimura Signed-off-by: Daisuke Nishimura Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b83790083087..6ad309e9825f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -571,6 +571,18 @@ done: return ret; } +static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem) +{ + if (do_swap_account) { + if (res_counter_check_under_limit(&mem->res) && + res_counter_check_under_limit(&mem->memsw)) + return true; + } else + if (res_counter_check_under_limit(&mem->res)) + return true; + return false; +} + /* * Dance down the hierarchy if needed to reclaim memory. We remember the * last child we reclaimed from, so that we don't end up penalizing @@ -592,7 +604,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, * have left. */ ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap); - if (res_counter_check_under_limit(&root_mem->res)) + if (mem_cgroup_check_under_limit(root_mem)) return 0; next_mem = mem_cgroup_get_first_node(root_mem); @@ -606,7 +618,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, continue; } ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap); - if (res_counter_check_under_limit(&root_mem->res)) + if (mem_cgroup_check_under_limit(root_mem)) return 0; cgroup_lock(); next_mem = mem_cgroup_get_next_node(next_mem, root_mem); @@ -709,12 +721,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, * current usage of the cgroup before giving up * */ - if (do_swap_account) { - if (res_counter_check_under_limit(&mem_over_limit->res) && - res_counter_check_under_limit(&mem_over_limit->memsw)) - continue; - } else if (res_counter_check_under_limit(&mem_over_limit->res)) - continue; + if (mem_cgroup_check_under_limit(mem_over_limit)) + continue; if (!nr_retries--) { if (oom) { @@ -1334,7 +1342,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) do { progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true); - progress += res_counter_check_under_limit(&mem->res); + progress += mem_cgroup_check_under_limit(mem); } while (!progress && --retry); css_put(&mem->css); -- cgit v1.2.2 From a7fe942e94b2f66aa0f11d37699c0ec8155d3ad1 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:13 -0800 Subject: memcg: swapout refcnt fix css's refcnt is dropped before end of following access. Hold it until end of access. Reported-by: Li Zefan Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6ad309e9825f..964a70035e8f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1171,7 +1171,9 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) mz = page_cgroup_zoneinfo(pc); unlock_page_cgroup(pc); - css_put(&mem->css); + /* at swapout, this memcg will be accessed to record to swap */ + if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT) + css_put(&mem->css); return mem; @@ -1212,6 +1214,8 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) swap_cgroup_record(ent, memcg); mem_cgroup_get(memcg); } + if (memcg) + css_put(&memcg->css); } #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP -- cgit v1.2.2 From 670ec2f170301425fc4fdfa63d40652071fe85f6 Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Wed, 7 Jan 2009 18:08:13 -0800 Subject: memcg: hierarchy avoid unnecessary reclaim If hierarchy is not used, no tree-walk is necessary. Reviewed-by: KOSAKI Motohiro Signed-off-by: Daisuke Nishimura Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 964a70035e8f..9b4993ee46b3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -606,6 +606,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap); if (mem_cgroup_check_under_limit(root_mem)) return 0; + if (!root_mem->use_hierarchy) + return ret; next_mem = mem_cgroup_get_first_node(root_mem); -- cgit v1.2.2 From f89eb90e33fd4e4e0cc1a6d20afd63c5a561885a Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:14 -0800 Subject: inactive_anon_is_low: move to vmscan The inactive_anon_is_low() is called only vmscan. Then it can move to vmscan.c This patch doesn't have any functional change. Reviewd-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 45983af1de3d..f75d924cb4f4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1291,6 +1291,26 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, pagevec_release(&pvec); } +/** + * inactive_anon_is_low - check if anonymous pages need to be deactivated + * @zone: zone to check + * + * Returns true if the zone does not have enough inactive anon pages, + * meaning some active anon pages need to be deactivated. + */ +static int inactive_anon_is_low(struct zone *zone) +{ + unsigned long active, inactive; + + active = zone_page_state(zone, NR_ACTIVE_ANON); + inactive = zone_page_state(zone, NR_INACTIVE_ANON); + + if (inactive * zone->inactive_ratio < active) + return 1; + + return 0; +} + static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct zone *zone, struct scan_control *sc, int priority) { -- cgit v1.2.2 From 6e9015716ae9b59e9635d692fddfcfb9582c146c Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:15 -0800 Subject: mm: introduce zone_reclaim struct Add zone_reclam_stat struct for later enhancement. A later patch uses this. This patch doesn't any behavior change (yet). Reviewed-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Acked-by: Rik van Riel Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 8 ++++---- mm/swap.c | 12 ++++++++---- mm/vmscan.c | 47 ++++++++++++++++++++++++++++++----------------- 3 files changed, 42 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7bf22e045318..5675b3073854 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3523,10 +3523,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, INIT_LIST_HEAD(&zone->lru[l].list); zone->lru[l].nr_scan = 0; } - zone->recent_rotated[0] = 0; - zone->recent_rotated[1] = 0; - zone->recent_scanned[0] = 0; - zone->recent_scanned[1] = 0; + zone->reclaim_stat.recent_rotated[0] = 0; + zone->reclaim_stat.recent_rotated[1] = 0; + zone->reclaim_stat.recent_scanned[0] = 0; + zone->reclaim_stat.recent_scanned[1] = 0; zap_zone_vm_stats(zone); zone->flags = 0; if (!size) diff --git a/mm/swap.c b/mm/swap.c index 8a98a9c90704..26b07e7bc3d3 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -157,6 +157,7 @@ void rotate_reclaimable_page(struct page *page) void activate_page(struct page *page) { struct zone *zone = page_zone(page); + struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { @@ -169,8 +170,8 @@ void activate_page(struct page *page) add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); - zone->recent_rotated[!!file]++; - zone->recent_scanned[!!file]++; + reclaim_stat->recent_rotated[!!file]++; + reclaim_stat->recent_scanned[!!file]++; } spin_unlock_irq(&zone->lru_lock); } @@ -385,6 +386,8 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { int i; struct zone *zone = NULL; + struct zone_reclaim_stat *reclaim_stat = NULL; + VM_BUG_ON(is_unevictable_lru(lru)); for (i = 0; i < pagevec_count(pvec); i++) { @@ -396,6 +399,7 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; + reclaim_stat = &zone->reclaim_stat; spin_lock_irq(&zone->lru_lock); } VM_BUG_ON(PageActive(page)); @@ -403,10 +407,10 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) VM_BUG_ON(PageLRU(page)); SetPageLRU(page); file = is_file_lru(lru); - zone->recent_scanned[file]++; + reclaim_stat->recent_scanned[file]++; if (is_active_lru(lru)) { SetPageActive(page); - zone->recent_rotated[file]++; + reclaim_stat->recent_rotated[file]++; } add_page_to_lru_list(zone, page, lru); } diff --git a/mm/vmscan.c b/mm/vmscan.c index f75d924cb4f4..03ca923c6656 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -130,6 +130,12 @@ static DECLARE_RWSEM(shrinker_rwsem); #define scan_global_lru(sc) (1) #endif +static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, + struct scan_control *sc) +{ + return &zone->reclaim_stat; +} + /* * Add a shrinker callback to be called from the vm */ @@ -1029,6 +1035,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, struct pagevec pvec; unsigned long nr_scanned = 0; unsigned long nr_reclaimed = 0; + struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); pagevec_init(&pvec, 1); @@ -1072,10 +1079,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, if (scan_global_lru(sc)) { zone->pages_scanned += nr_scan; - zone->recent_scanned[0] += count[LRU_INACTIVE_ANON]; - zone->recent_scanned[0] += count[LRU_ACTIVE_ANON]; - zone->recent_scanned[1] += count[LRU_INACTIVE_FILE]; - zone->recent_scanned[1] += count[LRU_ACTIVE_FILE]; + reclaim_stat->recent_scanned[0] += + count[LRU_INACTIVE_ANON]; + reclaim_stat->recent_scanned[0] += + count[LRU_ACTIVE_ANON]; + reclaim_stat->recent_scanned[1] += + count[LRU_INACTIVE_FILE]; + reclaim_stat->recent_scanned[1] += + count[LRU_ACTIVE_FILE]; } spin_unlock_irq(&zone->lru_lock); @@ -1136,7 +1147,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, add_page_to_lru_list(zone, page, lru); if (PageActive(page) && scan_global_lru(sc)) { int file = !!page_is_file_cache(page); - zone->recent_rotated[file]++; + reclaim_stat->recent_rotated[file]++; } if (!pagevec_add(&pvec, page)) { spin_unlock_irq(&zone->lru_lock); @@ -1196,6 +1207,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, struct page *page; struct pagevec pvec; enum lru_list lru; + struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); lru_add_drain(); spin_lock_irq(&zone->lru_lock); @@ -1208,7 +1220,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, */ if (scan_global_lru(sc)) { zone->pages_scanned += pgscanned; - zone->recent_scanned[!!file] += pgmoved; + reclaim_stat->recent_scanned[!!file] += pgmoved; } if (file) @@ -1251,7 +1263,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * pages in get_scan_ratio. */ if (scan_global_lru(sc)) - zone->recent_rotated[!!file] += pgmoved; + reclaim_stat->recent_rotated[!!file] += pgmoved; while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); @@ -1344,6 +1356,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, unsigned long anon, file, free; unsigned long anon_prio, file_prio; unsigned long ap, fp; + struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); /* If we have no swap space, do not bother scanning anon pages. */ if (nr_swap_pages <= 0) { @@ -1376,17 +1389,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, * * anon in [0], file in [1] */ - if (unlikely(zone->recent_scanned[0] > anon / 4)) { + if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { spin_lock_irq(&zone->lru_lock); - zone->recent_scanned[0] /= 2; - zone->recent_rotated[0] /= 2; + reclaim_stat->recent_scanned[0] /= 2; + reclaim_stat->recent_rotated[0] /= 2; spin_unlock_irq(&zone->lru_lock); } - if (unlikely(zone->recent_scanned[1] > file / 4)) { + if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { spin_lock_irq(&zone->lru_lock); - zone->recent_scanned[1] /= 2; - zone->recent_rotated[1] /= 2; + reclaim_stat->recent_scanned[1] /= 2; + reclaim_stat->recent_rotated[1] /= 2; spin_unlock_irq(&zone->lru_lock); } @@ -1402,11 +1415,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, * proportional to the fraction of recently scanned pages on * each list that were recently referenced and in active use. */ - ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); - ap /= zone->recent_rotated[0] + 1; + ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); + ap /= reclaim_stat->recent_rotated[0] + 1; - fp = (file_prio + 1) * (zone->recent_scanned[1] + 1); - fp /= zone->recent_rotated[1] + 1; + fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); + fp /= reclaim_stat->recent_rotated[1] + 1; /* Normalize to percentages */ percent[0] = 100 * ap / (ap + fp + 1); -- cgit v1.2.2 From c9f299d9862deadf9fbee3ca28d915fdb006975a Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:16 -0800 Subject: mm: add zone nr_pages helper function Add zone_nr_pages() helper function. It is used by a later patch. This patch doesn't have any functional change. Reviewed-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Acked-by: Rik van Riel Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 03ca923c6656..6827d35954fb 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -136,6 +136,13 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, return &zone->reclaim_stat; } +static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc, + enum lru_list lru) +{ + return zone_page_state(zone, NR_LRU_BASE + lru); +} + + /* * Add a shrinker callback to be called from the vm */ @@ -1365,10 +1372,10 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, return; } - anon = zone_page_state(zone, NR_ACTIVE_ANON) + - zone_page_state(zone, NR_INACTIVE_ANON); - file = zone_page_state(zone, NR_ACTIVE_FILE) + - zone_page_state(zone, NR_INACTIVE_FILE); + anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) + + zone_nr_pages(zone, sc, LRU_INACTIVE_ANON); + file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) + + zone_nr_pages(zone, sc, LRU_INACTIVE_FILE); free = zone_page_state(zone, NR_FREE_PAGES); /* If we have very few page cache pages, force-scan anon pages. */ -- cgit v1.2.2 From eeee9a8cd1e93c8b94e7788790fa9e2f8910c779 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:17 -0800 Subject: mm: make get_scan_ratio() safe for memcg Currently, get_scan_ratio() always calculate the balancing value for global reclaim and memcg reclaim doesn't use it. Therefore it doesn't have scan_global_lru() condition. However, we plan to expand get_scan_ratio() to be usable for memcg too, latter. Then, The dependency code of global reclaim in the get_scan_ratio() insert into scan_global_lru() condision explictly. This patch doesn't have any functional change. Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 6827d35954fb..e2b31a522a66 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1376,13 +1376,16 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, zone_nr_pages(zone, sc, LRU_INACTIVE_ANON); file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) + zone_nr_pages(zone, sc, LRU_INACTIVE_FILE); - free = zone_page_state(zone, NR_FREE_PAGES); - /* If we have very few page cache pages, force-scan anon pages. */ - if (unlikely(file + free <= zone->pages_high)) { - percent[0] = 100; - percent[1] = 0; - return; + if (scan_global_lru(sc)) { + free = zone_page_state(zone, NR_FREE_PAGES); + /* If we have very few page cache pages, + force-scan anon pages. */ + if (unlikely(file + free <= zone->pages_high)) { + percent[0] = 100; + percent[1] = 0; + return; + } } /* -- cgit v1.2.2 From 549927620b04a8f8073ce2ee2a8977f209af2ee5 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:18 -0800 Subject: memcg: add null check to page_cgroup_zoneinfo() If CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y, page_cgroup::mem_cgroup can be NULL. Therefore null checking is better. A later patch uses this function. Acked-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9b4993ee46b3..457d671029b8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -231,6 +231,9 @@ page_cgroup_zoneinfo(struct page_cgroup *pc) int nid = page_cgroup_nid(pc); int zid = page_cgroup_zid(pc); + if (!mem) + return NULL; + return mem_cgroup_zoneinfo(mem, nid, zid); } -- cgit v1.2.2 From 14797e2363c2b2f1ce139fd1c5a215e4e05aa1d9 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:18 -0800 Subject: memcg: add inactive_anon_is_low() The inactive_anon_is_low() is key component of active/inactive anon balancing on reclaim. However current inactive_anon_is_low() function only consider global reclaim. Therefore, we need following ugly scan_global_lru() condition. if (lru == LRU_ACTIVE_ANON && (!scan_global_lru(sc) || inactive_anon_is_low(zone))) { shrink_active_list(nr_to_scan, zone, sc, priority, file); return 0; it cause that memcg reclaim always deactivate pages when shrink_list() is called. To make mem_cgroup_inactive_anon_is_low() improve active/inactive anon balancing of memcgroup. Acked-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Cyrill Gorcunov Cc: "Pekka Enberg" Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 46 +++++++++++++++++++++++++++++++++++++++++++++- mm/vmscan.c | 37 +++++++++++++++++++++++-------------- 2 files changed, 68 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 457d671029b8..6611328460e9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -156,6 +156,9 @@ struct mem_cgroup { unsigned long last_oom_jiffies; int obsolete; atomic_t refcnt; + + unsigned int inactive_ratio; + /* * statistics. This must be placed at the end of memcg. */ @@ -431,6 +434,20 @@ long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, return (nr_pages >> priority); } +int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) +{ + unsigned long active; + unsigned long inactive; + + inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON); + active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON); + + if (inactive * memcg->inactive_ratio < active) + return 1; + + return 0; +} + unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, @@ -1360,6 +1377,29 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) return 0; } +/* + * The inactive anon list should be small enough that the VM never has to + * do too much work, but large enough that each inactive page has a chance + * to be referenced again before it is swapped out. + * + * this calculation is straightforward porting from + * page_alloc.c::setup_per_zone_inactive_ratio(). + * it describe more detail. + */ +static void mem_cgroup_set_inactive_ratio(struct mem_cgroup *memcg) +{ + unsigned int gb, ratio; + + gb = res_counter_read_u64(&memcg->res, RES_LIMIT) >> 30; + if (gb) + ratio = int_sqrt(10 * gb); + else + ratio = 1; + + memcg->inactive_ratio = ratio; + +} + static DEFINE_MUTEX(set_limit_mutex); static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, @@ -1398,6 +1438,10 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, GFP_KERNEL, false); if (!progress) retry_count--; } + + if (!ret) + mem_cgroup_set_inactive_ratio(memcg); + return ret; } @@ -1982,7 +2026,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) res_counter_init(&mem->res, NULL); res_counter_init(&mem->memsw, NULL); } - + mem_cgroup_set_inactive_ratio(mem); mem->last_scanned_child = NULL; return &mem->css; diff --git a/mm/vmscan.c b/mm/vmscan.c index e2b31a522a66..b2bc06bffcfb 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1310,14 +1310,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, pagevec_release(&pvec); } -/** - * inactive_anon_is_low - check if anonymous pages need to be deactivated - * @zone: zone to check - * - * Returns true if the zone does not have enough inactive anon pages, - * meaning some active anon pages need to be deactivated. - */ -static int inactive_anon_is_low(struct zone *zone) +static int inactive_anon_is_low_global(struct zone *zone) { unsigned long active, inactive; @@ -1330,6 +1323,25 @@ static int inactive_anon_is_low(struct zone *zone) return 0; } +/** + * inactive_anon_is_low - check if anonymous pages need to be deactivated + * @zone: zone to check + * @sc: scan control of this context + * + * Returns true if the zone does not have enough inactive anon pages, + * meaning some active anon pages need to be deactivated. + */ +static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) +{ + int low; + + if (scan_global_lru(sc)) + low = inactive_anon_is_low_global(zone); + else + low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone); + return low; +} + static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct zone *zone, struct scan_control *sc, int priority) { @@ -1340,8 +1352,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, return 0; } - if (lru == LRU_ACTIVE_ANON && - (!scan_global_lru(sc) || inactive_anon_is_low(zone))) { + if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) { shrink_active_list(nr_to_scan, zone, sc, priority, file); return 0; } @@ -1509,9 +1520,7 @@ static void shrink_zone(int priority, struct zone *zone, * Even if we did not try to evict anon pages at all, we want to * rebalance the anon lru active/inactive ratio. */ - if (!scan_global_lru(sc) || inactive_anon_is_low(zone)) - shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); - else if (!scan_global_lru(sc)) + if (inactive_anon_is_low(zone, sc)) shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); throttle_vm_writeout(sc->gfp_mask); @@ -1807,7 +1816,7 @@ loop_again: * Do some background aging of the anon list, to give * pages a chance to be referenced before reclaiming. */ - if (inactive_anon_is_low(zone)) + if (inactive_anon_is_low(zone, &sc)) shrink_active_list(SWAP_CLUSTER_MAX, zone, &sc, priority, 0); -- cgit v1.2.2 From a3d8e0549d913e30968fa02e505dfe02c0a23e0d Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:19 -0800 Subject: memcg: add mem_cgroup_zone_nr_pages() Introduce mem_cgroup_zone_nr_pages(). It is called by zone_nr_pages() helper function. This patch doesn't have any behavior change. Acked-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Acked-by: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 12 +++++++++++- mm/vmscan.c | 3 +++ 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6611328460e9..313247e6c503 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -186,7 +186,6 @@ pcg_default_flags[NR_CHARGE_TYPE] = { 0, /* FORCE */ }; - /* for encoding cft->private value on file */ #define _MEM (0) #define _MEMSWAP (1) @@ -448,6 +447,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) return 0; } +unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, + struct zone *zone, + enum lru_list lru) +{ + int nid = zone->zone_pgdat->node_id; + int zid = zone_idx(zone); + struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); + + return MEM_CGROUP_ZSTAT(mz, lru); +} + unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, diff --git a/mm/vmscan.c b/mm/vmscan.c index b2bc06bffcfb..d958d624d3ae 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -139,6 +139,9 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc, enum lru_list lru) { + if (!scan_global_lru(sc)) + return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru); + return zone_page_state(zone, NR_LRU_BASE + lru); } -- cgit v1.2.2 From 3e2f41f1f64744f7942980d93cc93dd3e5924560 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:20 -0800 Subject: memcg: add zone_reclaim_stat Introduce mem_cgroup_per_zone::reclaim_stat member and its statics collecting function. Now, get_scan_ratio() can calculate correct value on memcg reclaim. [hugh@veritas.com: avoid reclaim_stat oops when disabled] Acked-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 29 +++++++++++++++++++++++++++++ mm/swap.c | 34 +++++++++++++++++++++++++--------- mm/vmscan.c | 27 +++++++++++++-------------- 3 files changed, 67 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 313247e6c503..7b7f4dc05035 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -103,6 +103,8 @@ struct mem_cgroup_per_zone { */ struct list_head lists[NR_LRU_LISTS]; unsigned long count[NR_LRU_LISTS]; + + struct zone_reclaim_stat reclaim_stat; }; /* Macro for accessing counter */ #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) @@ -458,6 +460,33 @@ unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, return MEM_CGROUP_ZSTAT(mz, lru); } +struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, + struct zone *zone) +{ + int nid = zone->zone_pgdat->node_id; + int zid = zone_idx(zone); + struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); + + return &mz->reclaim_stat; +} + +struct zone_reclaim_stat * +mem_cgroup_get_reclaim_stat_from_page(struct page *page) +{ + struct page_cgroup *pc; + struct mem_cgroup_per_zone *mz; + + if (mem_cgroup_disabled()) + return NULL; + + pc = lookup_page_cgroup(page); + mz = page_cgroup_zoneinfo(pc); + if (!mz) + return NULL; + + return &mz->reclaim_stat; +} + unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, diff --git a/mm/swap.c b/mm/swap.c index 26b07e7bc3d3..8adb9feb61e1 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -151,13 +151,32 @@ void rotate_reclaimable_page(struct page *page) } } +static void update_page_reclaim_stat(struct zone *zone, struct page *page, + int file, int rotated) +{ + struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; + struct zone_reclaim_stat *memcg_reclaim_stat; + + memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); + + reclaim_stat->recent_scanned[file]++; + if (rotated) + reclaim_stat->recent_rotated[file]++; + + if (!memcg_reclaim_stat) + return; + + memcg_reclaim_stat->recent_scanned[file]++; + if (rotated) + memcg_reclaim_stat->recent_rotated[file]++; +} + /* * FIXME: speed this up? */ void activate_page(struct page *page) { struct zone *zone = page_zone(page); - struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { @@ -170,8 +189,7 @@ void activate_page(struct page *page) add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); - reclaim_stat->recent_rotated[!!file]++; - reclaim_stat->recent_scanned[!!file]++; + update_page_reclaim_stat(zone, page, !!file, 1); } spin_unlock_irq(&zone->lru_lock); } @@ -386,7 +404,6 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { int i; struct zone *zone = NULL; - struct zone_reclaim_stat *reclaim_stat = NULL; VM_BUG_ON(is_unevictable_lru(lru)); @@ -394,24 +411,23 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); int file; + int active; if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; - reclaim_stat = &zone->reclaim_stat; spin_lock_irq(&zone->lru_lock); } VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); + active = is_active_lru(lru); file = is_file_lru(lru); - reclaim_stat->recent_scanned[file]++; - if (is_active_lru(lru)) { + if (active) SetPageActive(page); - reclaim_stat->recent_rotated[file]++; - } + update_page_reclaim_stat(zone, page, file, active); add_page_to_lru_list(zone, page, lru); } if (zone) diff --git a/mm/vmscan.c b/mm/vmscan.c index d958d624d3ae..56fc7abe4d23 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -133,6 +133,9 @@ static DECLARE_RWSEM(shrinker_rwsem); static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, struct scan_control *sc) { + if (!scan_global_lru(sc)) + return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); + return &zone->reclaim_stat; } @@ -1087,17 +1090,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, __mod_zone_page_state(zone, NR_INACTIVE_ANON, -count[LRU_INACTIVE_ANON]); - if (scan_global_lru(sc)) { + if (scan_global_lru(sc)) zone->pages_scanned += nr_scan; - reclaim_stat->recent_scanned[0] += - count[LRU_INACTIVE_ANON]; - reclaim_stat->recent_scanned[0] += - count[LRU_ACTIVE_ANON]; - reclaim_stat->recent_scanned[1] += - count[LRU_INACTIVE_FILE]; - reclaim_stat->recent_scanned[1] += - count[LRU_ACTIVE_FILE]; - } + + reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; + reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; + reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE]; + reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE]; + spin_unlock_irq(&zone->lru_lock); nr_scanned += nr_scan; @@ -1155,7 +1155,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, SetPageLRU(page); lru = page_lru(page); add_page_to_lru_list(zone, page, lru); - if (PageActive(page) && scan_global_lru(sc)) { + if (PageActive(page)) { int file = !!page_is_file_cache(page); reclaim_stat->recent_rotated[file]++; } @@ -1230,8 +1230,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, */ if (scan_global_lru(sc)) { zone->pages_scanned += pgscanned; - reclaim_stat->recent_scanned[!!file] += pgmoved; } + reclaim_stat->recent_scanned[!!file] += pgmoved; if (file) __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); @@ -1272,8 +1272,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * This helps balance scan pressure between file and anonymous * pages in get_scan_ratio. */ - if (scan_global_lru(sc)) - reclaim_stat->recent_rotated[!!file] += pgmoved; + reclaim_stat->recent_rotated[!!file] += pgmoved; while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); -- cgit v1.2.2 From 9439c1c95b5c25b8031b2a7eb7e1590eb84be7f5 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:21 -0800 Subject: memcg: remove mem_cgroup_cal_reclaim() Now, get_scan_ratio() return correct value although memcg reclaim. Then, mem_cgroup_calc_reclaim() can be removed. So, memcg reclaim get the same capability of anon/file reclaim balancing as global reclaim now. Acked-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 21 --------------------- mm/vmscan.c | 27 ++++++++++----------------- 2 files changed, 10 insertions(+), 38 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7b7f4dc05035..b8c1e5acc25a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -414,27 +414,6 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) mem->prev_priority = priority; } -/* - * Calculate # of pages to be scanned in this priority/zone. - * See also vmscan.c - * - * priority starts from "DEF_PRIORITY" and decremented in each loop. - * (see include/linux/mmzone.h) - */ - -long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, - int priority, enum lru_list lru) -{ - long nr_pages; - int nid = zone->zone_pgdat->node_id; - int zid = zone_idx(zone); - struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); - - nr_pages = MEM_CGROUP_ZSTAT(mz, lru); - - return (nr_pages >> priority); -} - int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) { unsigned long active; diff --git a/mm/vmscan.c b/mm/vmscan.c index 56fc7abe4d23..66bb6ef44b5f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1466,30 +1466,23 @@ static void shrink_zone(int priority, struct zone *zone, get_scan_ratio(zone, sc, percent); for_each_evictable_lru(l) { - if (scan_global_lru(sc)) { - int file = is_file_lru(l); - int scan; + int file = is_file_lru(l); + int scan; - scan = zone_page_state(zone, NR_LRU_BASE + l); - if (priority) { - scan >>= priority; - scan = (scan * percent[file]) / 100; - } + scan = zone_page_state(zone, NR_LRU_BASE + l); + if (priority) { + scan >>= priority; + scan = (scan * percent[file]) / 100; + } + if (scan_global_lru(sc)) { zone->lru[l].nr_scan += scan; nr[l] = zone->lru[l].nr_scan; if (nr[l] >= swap_cluster_max) zone->lru[l].nr_scan = 0; else nr[l] = 0; - } else { - /* - * This reclaim occurs not because zone memory shortage - * but because memory controller hits its limit. - * Don't modify zone reclaim related data. - */ - nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone, - priority, l); - } + } else + nr[l] = scan; } while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || -- cgit v1.2.2 From 7f016ee8b6a9a43f768e6252021f169abec4fa1f Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:22 -0800 Subject: memcg: show reclaim stat Add the following four fields to memory.stat file: - inactive_ratio - recent_rotated_anon - recent_rotated_file - recent_scanned_anon - recent_scanned_file Acked-by: Rik van Riel Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b8c1e5acc25a..af28e128b749 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1816,6 +1816,36 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, cb->fill(cb, "unevictable", unevictable * PAGE_SIZE); } + +#ifdef CONFIG_DEBUG_VM + cb->fill(cb, "inactive_ratio", mem_cont->inactive_ratio); + + { + int nid, zid; + struct mem_cgroup_per_zone *mz; + unsigned long recent_rotated[2] = {0, 0}; + unsigned long recent_scanned[2] = {0, 0}; + + for_each_online_node(nid) + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); + + recent_rotated[0] += + mz->reclaim_stat.recent_rotated[0]; + recent_rotated[1] += + mz->reclaim_stat.recent_rotated[1]; + recent_scanned[0] += + mz->reclaim_stat.recent_scanned[0]; + recent_scanned[1] += + mz->reclaim_stat.recent_scanned[1]; + } + cb->fill(cb, "recent_rotated_anon", recent_rotated[0]); + cb->fill(cb, "recent_rotated_file", recent_rotated[1]); + cb->fill(cb, "recent_scanned_anon", recent_scanned[0]); + cb->fill(cb, "recent_scanned_file", recent_scanned[1]); + } +#endif + return 0; } -- cgit v1.2.2 From e72e2bd6747c7a5c432197b6614cf3a387e61a0e Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:23 -0800 Subject: memcg: rename scan global lru Rename scan_global_lru() to scanning_global_lru(). Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 66bb6ef44b5f..f03c239440ad 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -125,15 +125,15 @@ static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_CGROUP_MEM_RES_CTLR -#define scan_global_lru(sc) (!(sc)->mem_cgroup) +#define scanning_global_lru(sc) (!(sc)->mem_cgroup) #else -#define scan_global_lru(sc) (1) +#define scanning_global_lru(sc) (1) #endif static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, struct scan_control *sc) { - if (!scan_global_lru(sc)) + if (!scanning_global_lru(sc)) return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); return &zone->reclaim_stat; @@ -142,7 +142,7 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc, enum lru_list lru) { - if (!scan_global_lru(sc)) + if (!scanning_global_lru(sc)) return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru); return zone_page_state(zone, NR_LRU_BASE + lru); @@ -1090,7 +1090,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, __mod_zone_page_state(zone, NR_INACTIVE_ANON, -count[LRU_INACTIVE_ANON]); - if (scan_global_lru(sc)) + if (scanning_global_lru(sc)) zone->pages_scanned += nr_scan; reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; @@ -1129,7 +1129,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, if (current_is_kswapd()) { __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); __count_vm_events(KSWAPD_STEAL, nr_freed); - } else if (scan_global_lru(sc)) + } else if (scanning_global_lru(sc)) __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); __count_zone_vm_events(PGSTEAL, zone, nr_freed); @@ -1228,7 +1228,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * zone->pages_scanned is used for detect zone's oom * mem_cgroup remembers nr_scan by itself. */ - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { zone->pages_scanned += pgscanned; } reclaim_stat->recent_scanned[!!file] += pgmoved; @@ -1337,7 +1337,7 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) { int low; - if (scan_global_lru(sc)) + if (scanning_global_lru(sc)) low = inactive_anon_is_low_global(zone); else low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone); @@ -1390,7 +1390,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) + zone_nr_pages(zone, sc, LRU_INACTIVE_FILE); - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { free = zone_page_state(zone, NR_FREE_PAGES); /* If we have very few page cache pages, force-scan anon pages. */ @@ -1474,7 +1474,7 @@ static void shrink_zone(int priority, struct zone *zone, scan >>= priority; scan = (scan * percent[file]) / 100; } - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { zone->lru[l].nr_scan += scan; nr[l] = zone->lru[l].nr_scan; if (nr[l] >= swap_cluster_max) @@ -1550,7 +1550,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist, * Take care memory controller reclaiming has small influence * to global LRU. */ - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; note_zone_scanning_priority(zone, priority); @@ -1603,12 +1603,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, delayacct_freepages_start(); - if (scan_global_lru(sc)) + if (scanning_global_lru(sc)) count_vm_event(ALLOCSTALL); /* * mem_cgroup will not do shrink_slab. */ - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) @@ -1627,7 +1627,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, * Don't shrink slabs when reclaiming memory from * over limit cgroups */ - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); if (reclaim_state) { sc->nr_reclaimed += reclaim_state->reclaimed_slab; @@ -1658,7 +1658,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, congestion_wait(WRITE, HZ/10); } /* top priority shrink_zones still had more to do? don't OOM, then */ - if (!sc->all_unreclaimable && scan_global_lru(sc)) + if (!sc->all_unreclaimable && scanning_global_lru(sc)) ret = sc->nr_reclaimed; out: /* @@ -1671,7 +1671,7 @@ out: if (priority < 0) priority = 0; - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) -- cgit v1.2.2 From 2733c06ac864ed40b9dfbbd5270f3f16949bd4a1 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:23 -0800 Subject: memcg: protect prev_priority Currently, mem_cgroup doesn't have own lock and almost its member doesn't need. (e.g. mem_cgroup->info is protected by zone lock, mem_cgroup->stat is per cpu variable) However, there is one explict exception. mem_cgroup->prev_priorit need lock, but doesn't protect. Luckly, this is NOT bug because prev_priority isn't used for current reclaim code. However, we plan to use prev_priority future again. Therefore, fixing is better. In addition, we plan to reuse this lock for another member. Then "reclaim_param_lock" name is better than "prev_priority_lock". Acked-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index af28e128b749..027c0dd7a83e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -144,6 +144,11 @@ struct mem_cgroup { */ struct mem_cgroup_lru_info info; + /* + protect against reclaim related member. + */ + spinlock_t reclaim_param_lock; + int prev_priority; /* for recording reclaim priority */ /* @@ -400,18 +405,28 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) */ int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) { - return mem->prev_priority; + int prev_priority; + + spin_lock(&mem->reclaim_param_lock); + prev_priority = mem->prev_priority; + spin_unlock(&mem->reclaim_param_lock); + + return prev_priority; } void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority) { + spin_lock(&mem->reclaim_param_lock); if (priority < mem->prev_priority) mem->prev_priority = priority; + spin_unlock(&mem->reclaim_param_lock); } void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) { + spin_lock(&mem->reclaim_param_lock); mem->prev_priority = priority; + spin_unlock(&mem->reclaim_param_lock); } int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) @@ -2076,6 +2091,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) } mem_cgroup_set_inactive_ratio(mem); mem->last_scanned_child = NULL; + spin_lock_init(&mem->reclaim_param_lock); return &mem->css; free_out: -- cgit v1.2.2 From a7885eb8ad465ec9db99ac5b5e6680f0ca8e11c8 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:24 -0800 Subject: memcg: swappiness Currently, /proc/sys/vm/swappiness can change swappiness ratio for global reclaim. However, memcg reclaim doesn't have tuning parameter for itself. In general, the optimal swappiness depend on workload. (e.g. hpc workload need to low swappiness than the others.) Then, per cgroup swappiness improve administrator tunability. Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++++------ mm/vmscan.c | 7 +++--- 2 files changed, 75 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 027c0dd7a83e..ab2ecbb95b8d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -164,6 +164,9 @@ struct mem_cgroup { int obsolete; atomic_t refcnt; + unsigned int swappiness; + + unsigned int inactive_ratio; /* @@ -636,6 +639,22 @@ static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem) return false; } +static unsigned int get_swappiness(struct mem_cgroup *memcg) +{ + struct cgroup *cgrp = memcg->css.cgroup; + unsigned int swappiness; + + /* root ? */ + if (cgrp->parent == NULL) + return vm_swappiness; + + spin_lock(&memcg->reclaim_param_lock); + swappiness = memcg->swappiness; + spin_unlock(&memcg->reclaim_param_lock); + + return swappiness; +} + /* * Dance down the hierarchy if needed to reclaim memory. We remember the * last child we reclaimed from, so that we don't end up penalizing @@ -656,7 +675,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, * but there might be left over accounting, even after children * have left. */ - ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap); + ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap, + get_swappiness(root_mem)); if (mem_cgroup_check_under_limit(root_mem)) return 0; if (!root_mem->use_hierarchy) @@ -672,7 +692,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, cgroup_unlock(); continue; } - ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap); + ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap, + get_swappiness(next_mem)); if (mem_cgroup_check_under_limit(root_mem)) return 0; cgroup_lock(); @@ -1400,7 +1421,8 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) rcu_read_unlock(); do { - progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true); + progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true, + get_swappiness(mem)); progress += mem_cgroup_check_under_limit(mem); } while (!progress && --retry); @@ -1468,7 +1490,9 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, break; progress = try_to_free_mem_cgroup_pages(memcg, - GFP_KERNEL, false); + GFP_KERNEL, + false, + get_swappiness(memcg)); if (!progress) retry_count--; } @@ -1512,7 +1536,8 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, break; oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); - try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true); + try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true, + get_swappiness(memcg)); curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); if (curusage >= oldusage) retry_count--; @@ -1643,8 +1668,8 @@ try_to_free: ret = -EINTR; goto out; } - progress = try_to_free_mem_cgroup_pages(mem, - GFP_KERNEL, false); + progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, + false, get_swappiness(mem)); if (!progress) { nr_retries--; /* maybe some writeback is necessary */ @@ -1864,6 +1889,37 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, return 0; } +static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); + + return get_swappiness(memcg); +} + +static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, + u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); + struct mem_cgroup *parent; + if (val > 100) + return -EINVAL; + + if (cgrp->parent == NULL) + return -EINVAL; + + parent = mem_cgroup_from_cont(cgrp->parent); + /* If under hierarchy, only empty-root can set this value */ + if ((parent->use_hierarchy) || + (memcg->use_hierarchy && !list_empty(&cgrp->children))) + return -EINVAL; + + spin_lock(&memcg->reclaim_param_lock); + memcg->swappiness = val; + spin_unlock(&memcg->reclaim_param_lock); + + return 0; +} + static struct cftype mem_cgroup_files[] = { { @@ -1902,6 +1958,11 @@ static struct cftype mem_cgroup_files[] = { .write_u64 = mem_cgroup_hierarchy_write, .read_u64 = mem_cgroup_hierarchy_read, }, + { + .name = "swappiness", + .read_u64 = mem_cgroup_swappiness_read, + .write_u64 = mem_cgroup_swappiness_write, + }, }; #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP @@ -2093,6 +2154,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) mem->last_scanned_child = NULL; spin_lock_init(&mem->reclaim_param_lock); + if (parent) + mem->swappiness = get_swappiness(parent); + return &mem->css; free_out: for_each_node_state(node, N_POSSIBLE) diff --git a/mm/vmscan.c b/mm/vmscan.c index f03c239440ad..ece2f405187f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1707,14 +1707,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, #ifdef CONFIG_CGROUP_MEM_RES_CTLR unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, - gfp_t gfp_mask, - bool noswap) + gfp_t gfp_mask, + bool noswap, + unsigned int swappiness) { struct scan_control sc = { .may_writepage = !laptop_mode, .may_swap = 1, .swap_cluster_max = SWAP_CLUSTER_MAX, - .swappiness = vm_swappiness, + .swappiness = swappiness, .order = 0, .mem_cgroup = mem_cont, .isolate_pages = mem_cgroup_isolate_pages, -- cgit v1.2.2 From c772be939e078afd2505ede7d596a30f8f61de95 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:25 -0800 Subject: memcg: fix calculation of active_ratio Currently, inactive_ratio of memcg is calculated at setting limit. because page_alloc.c does so and current implementation is straightforward porting. However, memcg introduced hierarchy feature recently. In hierarchy restriction, memory limit is not only decided memory.limit_in_bytes of current cgroup, but also parent limit and sibling memory usage. Then, The optimal inactive_ratio is changed frequently. So, everytime calculation is better. Tested-by: KAMEZAWA Hiroyuki Acked-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 64 ++++++++++++++++++++++++++++----------------------------- mm/vmscan.c | 2 +- 2 files changed, 32 insertions(+), 34 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ab2ecbb95b8d..c7d78eca8363 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -166,9 +166,6 @@ struct mem_cgroup { unsigned int swappiness; - - unsigned int inactive_ratio; - /* * statistics. This must be placed at the end of memcg. */ @@ -432,15 +429,43 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) spin_unlock(&mem->reclaim_param_lock); } -int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) +static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages) { unsigned long active; unsigned long inactive; + unsigned long gb; + unsigned long inactive_ratio; inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON); active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON); - if (inactive * memcg->inactive_ratio < active) + gb = (inactive + active) >> (30 - PAGE_SHIFT); + if (gb) + inactive_ratio = int_sqrt(10 * gb); + else + inactive_ratio = 1; + + if (present_pages) { + present_pages[0] = inactive; + present_pages[1] = active; + } + + return inactive_ratio; +} + +int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) +{ + unsigned long active; + unsigned long inactive; + unsigned long present_pages[2]; + unsigned long inactive_ratio; + + inactive_ratio = calc_inactive_ratio(memcg, present_pages); + + inactive = present_pages[0]; + active = present_pages[1]; + + if (inactive * inactive_ratio < active) return 1; return 0; @@ -1432,29 +1457,6 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) return 0; } -/* - * The inactive anon list should be small enough that the VM never has to - * do too much work, but large enough that each inactive page has a chance - * to be referenced again before it is swapped out. - * - * this calculation is straightforward porting from - * page_alloc.c::setup_per_zone_inactive_ratio(). - * it describe more detail. - */ -static void mem_cgroup_set_inactive_ratio(struct mem_cgroup *memcg) -{ - unsigned int gb, ratio; - - gb = res_counter_read_u64(&memcg->res, RES_LIMIT) >> 30; - if (gb) - ratio = int_sqrt(10 * gb); - else - ratio = 1; - - memcg->inactive_ratio = ratio; - -} - static DEFINE_MUTEX(set_limit_mutex); static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, @@ -1496,9 +1498,6 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, if (!progress) retry_count--; } - if (!ret) - mem_cgroup_set_inactive_ratio(memcg); - return ret; } @@ -1858,7 +1857,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, } #ifdef CONFIG_DEBUG_VM - cb->fill(cb, "inactive_ratio", mem_cont->inactive_ratio); + cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL)); { int nid, zid; @@ -2150,7 +2149,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) res_counter_init(&mem->res, NULL); res_counter_init(&mem->memsw, NULL); } - mem_cgroup_set_inactive_ratio(mem); mem->last_scanned_child = NULL; spin_lock_init(&mem->reclaim_param_lock); diff --git a/mm/vmscan.c b/mm/vmscan.c index ece2f405187f..9a27c44aa327 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1340,7 +1340,7 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) if (scanning_global_lru(sc)) low = inactive_anon_is_low_global(zone); else - low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone); + low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup); return low; } -- cgit v1.2.2 From fee7b548e6f2bd4bfd03a1a45d3afd593de7d5e9 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:26 -0800 Subject: memcg: show real limit under hierarchy mode Show "real" limit of memcg. This helps my debugging and maybe useful for users. While testing hierarchy like this mount -t cgroup none /cgroup -t memory mkdir /cgroup/A set use_hierarchy==1 to "A" mkdir /cgroup/A/01 mkdir /cgroup/A/01/02 mkdir /cgroup/A/01/03 mkdir /cgroup/A/01/03/04 mkdir /cgroup/A/08 mkdir /cgroup/A/08/01 .... and set each own limit to them, "real" limit of each memcg is unclear. This patch shows real limit by checking all ancestors. Changelog: (v1) -> (v2) - remove "if" and use "min(a,b)" Acked-by: Balbir Singh Signed-off-by: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c7d78eca8363..8d2e5c8a25b1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1782,6 +1782,34 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, return ret; } +static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, + unsigned long long *mem_limit, unsigned long long *memsw_limit) +{ + struct cgroup *cgroup; + unsigned long long min_limit, min_memsw_limit, tmp; + + min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); + min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); + cgroup = memcg->css.cgroup; + if (!memcg->use_hierarchy) + goto out; + + while (cgroup->parent) { + cgroup = cgroup->parent; + memcg = mem_cgroup_from_cont(cgroup); + if (!memcg->use_hierarchy) + break; + tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); + min_limit = min(min_limit, tmp); + tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); + min_memsw_limit = min(min_memsw_limit, tmp); + } +out: + *mem_limit = min_limit; + *memsw_limit = min_memsw_limit; + return; +} + static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) { struct mem_cgroup *mem; @@ -1855,6 +1883,13 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, cb->fill(cb, "unevictable", unevictable * PAGE_SIZE); } + { + unsigned long long limit, memsw_limit; + memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); + cb->fill(cb, "hierarchical_memory_limit", limit); + if (do_swap_account) + cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); + } #ifdef CONFIG_DEBUG_VM cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL)); -- cgit v1.2.2 From 3bb4edf24b26358eccfc69ac8b9a9c36ccc312da Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Wed, 7 Jan 2009 18:08:28 -0800 Subject: memcg: don't trigger oom at page migration I think triggering OOM at mem_cgroup_prepare_migration would be just a bit overkill. Returning -ENOMEM would be enough for mem_cgroup_prepare_migration. The caller would handle the case anyway. Signed-off-by: Daisuke Nishimura Acked-by: KAMEZAWA Hiroyuki Acked-by: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8d2e5c8a25b1..3ba72e6556cc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1361,7 +1361,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) unlock_page_cgroup(pc); if (mem) { - ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem); + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false); css_put(&mem->css); } *ptr = mem; -- cgit v1.2.2 From a5e924f5f8abf97944e625d74967cc9452cfbce8 Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Wed, 7 Jan 2009 18:08:28 -0800 Subject: memcg: remove mem_cgroup_try_charge After previous patch, mem_cgroup_try_charge is not used by anyone, so we can remove it. Signed-off-by: Daisuke Nishimura Acked-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3ba72e6556cc..435f08dac8bf 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -839,27 +839,8 @@ nomem: return -ENOMEM; } -/** - * mem_cgroup_try_charge - get charge of PAGE_SIZE. - * @mm: an mm_struct which is charged against. (when *memcg is NULL) - * @gfp_mask: gfp_mask for reclaim. - * @memcg: a pointer to memory cgroup which is charged against. - * - * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated - * memory cgroup from @mm is got and stored in *memcg. - * - * Returns 0 if success. -ENOMEM at failure. - * This call can invoke OOM-Killer. - */ - -int mem_cgroup_try_charge(struct mm_struct *mm, - gfp_t mask, struct mem_cgroup **memcg) -{ - return __mem_cgroup_try_charge(mm, mask, memcg, true); -} - /* - * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be + * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be * USED state. If already USED, uncharge and return. */ -- cgit v1.2.2 From 7f4d454dee2e0bdd21bafd413d1c53e443a26540 Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Wed, 7 Jan 2009 18:08:29 -0800 Subject: memcg: avoid deadlock caused by race between oom and cpuset_attach mpol_rebind_mm(), which can be called from cpuset_attach(), does down_write(mm->mmap_sem). This means down_write(mm->mmap_sem) can be called under cgroup_mutex. OTOH, page fault path does down_read(mm->mmap_sem) and calls mem_cgroup_try_charge_xxx(), which may eventually calls mem_cgroup_out_of_memory(). And mem_cgroup_out_of_memory() calls cgroup_lock(). This means cgroup_lock() can be called under down_read(mm->mmap_sem). If those two paths race, deadlock can happen. This patch avoid this deadlock by: - remove cgroup_lock() from mem_cgroup_out_of_memory(). - define new mutex (memcg_tasklist) and serialize mem_cgroup_move_task() (->attach handler of memory cgroup) and mem_cgroup_out_of_memory. Signed-off-by: Daisuke Nishimura Reviewed-by: KAMEZAWA Hiroyuki Acked-by: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 5 +++++ mm/oom_kill.c | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 435f08dac8bf..861037070f66 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -51,6 +51,7 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/ #define do_swap_account (0) #endif +static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */ /* * Statistics for memory cgroup. @@ -827,7 +828,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, if (!nr_retries--) { if (oom) { + mutex_lock(&memcg_tasklist); mem_cgroup_out_of_memory(mem_over_limit, gfp_mask); + mutex_unlock(&memcg_tasklist); mem_over_limit->last_oom_jiffies = jiffies; } goto nomem; @@ -2211,10 +2214,12 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, struct cgroup *old_cont, struct task_struct *p) { + mutex_lock(&memcg_tasklist); /* * FIXME: It's better to move charges of this process from old * memcg to new memcg. But it's just on TODO-List now. */ + mutex_unlock(&memcg_tasklist); } struct cgroup_subsys mem_cgroup_subsys = { diff --git a/mm/oom_kill.c b/mm/oom_kill.c index fd150e3a2567..40ba05061a4f 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -429,7 +429,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) unsigned long points = 0; struct task_struct *p; - cgroup_lock(); read_lock(&tasklist_lock); retry: p = select_bad_process(&points, mem); @@ -444,7 +443,6 @@ retry: goto retry; out: read_unlock(&tasklist_lock); - cgroup_unlock(); } #endif -- cgit v1.2.2 From 42e9abb628def2c335a4ecf130bb6c88d916d885 Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Wed, 7 Jan 2009 18:08:30 -0800 Subject: memcg: change try_to_free_pages to hierarchical_reclaim mem_cgroup_hierarchicl_reclaim() works properly even when !use_hierarchy now (by memcg-hierarchy-avoid-unnecessary-reclaim.patch), so, instead of try_to_free_mem_cgroup_pages(), it should be used in many cases. The only exception is force_empty. The group has no children in this case. Signed-off-by: Daisuke Nishimura Acked-by: KAMEZAWA Hiroyuki Acked-by: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 861037070f66..a7ecf23150c5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1430,8 +1430,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) rcu_read_unlock(); do { - progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true, - get_swappiness(mem)); + progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true); progress += mem_cgroup_check_under_limit(mem); } while (!progress && --retry); @@ -1475,10 +1474,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, if (!ret) break; - progress = try_to_free_mem_cgroup_pages(memcg, - GFP_KERNEL, - false, - get_swappiness(memcg)); + progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, + false); if (!progress) retry_count--; } @@ -1519,8 +1516,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, break; oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); - try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true, - get_swappiness(memcg)); + mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true); curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); if (curusage >= oldusage) retry_count--; -- cgit v1.2.2 From 03f3c433648a97ae7c86be789edba67690f6ea60 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:31 -0800 Subject: memcg: fix swap accounting leak Fix swapin charge operation of memcg. Now, memcg has hooks to swap-out operation and checks SwapCache is really unused or not. That check depends on contents of struct page. I.e. If PageAnon(page) && page_mapped(page), the page is recoginized as still-in-use. Now, reuse_swap_page() calles delete_from_swap_cache() before establishment of any rmap. Then, in followinig sequence (Page fault with WRITE) try_charge() (charge += PAGESIZE) commit_charge() (Check page_cgroup is used or not..) reuse_swap_page() -> delete_from_swapcache() -> mem_cgroup_uncharge_swapcache() (charge -= PAGESIZE) ...... New charge is uncharged soon.... To avoid this, move commit_charge() after page_mapcount() goes up to 1. By this, try_charge() (usage += PAGESIZE) reuse_swap_page() (may usage -= PAGESIZE if PCG_USED is set) commit_charge() (If page_cgroup is not marked as PCG_USED, add new charge.) Accounting will be correct. Changelog (v2) -> (v3) - fixed invalid charge to swp_entry==0. - updated documentation. Changelog (v1) -> (v2) - fixed comment. [nishimura@mxp.nes.nec.co.jp: swap accounting leak doc fix] Signed-off-by: KAMEZAWA Hiroyuki Acked-by: Balbir Singh Tested-by: Balbir Singh Cc: Hugh Dickins Cc: Daisuke Nishimura Signed-off-by: Daisuke Nishimura Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 7 ++++--- mm/memory.c | 11 ++++++----- 2 files changed, 10 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a7ecf23150c5..0ed61e27d526 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1169,10 +1169,11 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) /* * Now swap is on-memory. This means this page may be * counted both as mem and swap....double count. - * Fix it by uncharging from memsw. This SwapCache is stable - * because we're still under lock_page(). + * Fix it by uncharging from memsw. Basically, this SwapCache is stable + * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() + * may call delete_from_swap_cache() before reach here. */ - if (do_swap_account) { + if (do_swap_account && PageSwapCache(page)) { swp_entry_t ent = {.val = page_private(page)}; struct mem_cgroup *memcg; memcg = swap_cgroup_record(ent, NULL); diff --git a/mm/memory.c b/mm/memory.c index e5bfbe6b594c..e009ce870859 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2457,22 +2457,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, * while the page is counted on swap but not yet in mapcount i.e. * before page_add_anon_rmap() and swap_free(); try_to_free_swap() * must be called after the swap_free(), or it will never succeed. - * And mem_cgroup_commit_charge_swapin(), which uses the swp_entry - * in page->private, must be called before reuse_swap_page(), - * which may delete_from_swap_cache(). + * Because delete_from_swap_page() may be called by reuse_swap_page(), + * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry + * in page->private. In this case, a record in swap_cgroup is silently + * discarded at swap_free(). */ - mem_cgroup_commit_charge_swapin(page, ptr); inc_mm_counter(mm, anon_rss); pte = mk_pte(page, vma->vm_page_prot); if (write_access && reuse_swap_page(page)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); write_access = 0; } - flush_icache_page(vma, page); set_pte_at(mm, address, page_table, pte); page_add_anon_rmap(page, vma, address); + /* It's better to call commit-charge after rmap is established */ + mem_cgroup_commit_charge_swapin(page, ptr); swap_free(entry); if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) -- cgit v1.2.2 From a7ba0eef3af51cd1b6fc4028e4705b3ea2ea9469 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:32 -0800 Subject: memcg: fix double free and make refcnt sane 1. Fix double-free BUG in error route of mem_cgroup_create(). mem_cgroup_free() itself frees per-zone-info. 2. Making refcnt of memcg simple. Add 1 refcnt at creation and call free when refcnt goes down to 0. Reviewed-by: Daisuke Nishimura Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Paul Menage Cc: Daisuke Nishimura Cc: Li Zefan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0ed61e27d526..4f9a9c5a02e2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2092,14 +2092,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void) * Removal of cgroup itself succeeds regardless of refs from swap. */ -static void mem_cgroup_free(struct mem_cgroup *mem) +static void __mem_cgroup_free(struct mem_cgroup *mem) { int node; - if (atomic_read(&mem->refcnt) > 0) - return; - - for_each_node_state(node, N_POSSIBLE) free_mem_cgroup_per_zone_info(mem, node); @@ -2116,11 +2112,8 @@ static void mem_cgroup_get(struct mem_cgroup *mem) static void mem_cgroup_put(struct mem_cgroup *mem) { - if (atomic_dec_and_test(&mem->refcnt)) { - if (!mem->obsolete) - return; - mem_cgroup_free(mem); - } + if (atomic_dec_and_test(&mem->refcnt)) + __mem_cgroup_free(mem); } @@ -2170,12 +2163,10 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) if (parent) mem->swappiness = get_swappiness(parent); - + atomic_set(&mem->refcnt, 1); return &mem->css; free_out: - for_each_node_state(node, N_POSSIBLE) - free_mem_cgroup_per_zone_info(mem, node); - mem_cgroup_free(mem); + __mem_cgroup_free(mem); return ERR_PTR(-ENOMEM); } @@ -2190,7 +2181,7 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, static void mem_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { - mem_cgroup_free(mem_cgroup_from_cont(cont)); + mem_cgroup_put(mem_cgroup_from_cont(cont)); } static int mem_cgroup_populate(struct cgroup_subsys *ss, -- cgit v1.2.2 From 54595fe2652f04dc8f5b985312c7cef5aa7bf722 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:33 -0800 Subject: memcg: use css_tryget in memcg From:KAMEZAWA Hiroyuki css_tryget() newly is added and we can know css is alive or not and get refcnt of css in very safe way. ("alive" here means "rmdir/destroy" is not called.) This patch replaces css_get() to css_tryget(), where I cannot explain why css_get() is safe. And removes memcg->obsolete flag. Reviewed-by: Daisuke Nishimura Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Paul Menage Cc: Daisuke Nishimura Cc: Li Zefan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 98 ++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 62 insertions(+), 36 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4f9a9c5a02e2..b311f19bbe01 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -162,7 +162,6 @@ struct mem_cgroup { */ bool use_hierarchy; unsigned long last_oom_jiffies; - int obsolete; atomic_t refcnt; unsigned int swappiness; @@ -283,6 +282,31 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) struct mem_cgroup, css); } +static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) +{ + struct mem_cgroup *mem = NULL; + /* + * Because we have no locks, mm->owner's may be being moved to other + * cgroup. We use css_tryget() here even if this looks + * pessimistic (rather than adding locks here). + */ + rcu_read_lock(); + do { + mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (unlikely(!mem)) + break; + } while (!css_tryget(&mem->css)); + rcu_read_unlock(); + return mem; +} + +static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem) +{ + if (!mem) + return true; + return css_is_removed(&mem->css); +} + /* * Following LRU functions are allowed to be used without PCG_LOCK. * Operations are called by routine of global LRU independently from memcg. @@ -622,8 +646,9 @@ mem_cgroup_get_first_node(struct mem_cgroup *root_mem) { struct cgroup *cgroup; struct mem_cgroup *ret; - bool obsolete = (root_mem->last_scanned_child && - root_mem->last_scanned_child->obsolete); + bool obsolete; + + obsolete = mem_cgroup_is_obsolete(root_mem->last_scanned_child); /* * Scan all children under the mem_cgroup mem @@ -636,7 +661,7 @@ mem_cgroup_get_first_node(struct mem_cgroup *root_mem) if (!root_mem->last_scanned_child || obsolete) { - if (obsolete) + if (obsolete && root_mem->last_scanned_child) mem_cgroup_put(root_mem->last_scanned_child); cgroup = list_first_entry(&root_mem->css.cgroup->children, @@ -711,7 +736,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, next_mem = mem_cgroup_get_first_node(root_mem); while (next_mem != root_mem) { - if (next_mem->obsolete) { + if (mem_cgroup_is_obsolete(next_mem)) { mem_cgroup_put(next_mem); cgroup_lock(); next_mem = mem_cgroup_get_first_node(root_mem); @@ -769,23 +794,17 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, * thread group leader migrates. It's possible that mm is not * set, if so charge the init_mm (happens for pagecache usage). */ - if (likely(!*memcg)) { - rcu_read_lock(); - mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); - if (unlikely(!mem)) { - rcu_read_unlock(); - return 0; - } - /* - * For every charge from the cgroup, increment reference count - */ - css_get(&mem->css); + mem = *memcg; + if (likely(!mem)) { + mem = try_get_mem_cgroup_from_mm(mm); *memcg = mem; - rcu_read_unlock(); } else { - mem = *memcg; css_get(&mem->css); } + if (unlikely(!mem)) + return 0; + + VM_BUG_ON(mem_cgroup_is_obsolete(mem)); while (1) { int ret; @@ -1072,12 +1091,19 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); } +/* + * While swap-in, try_charge -> commit or cancel, the page is locked. + * And when try_charge() successfully returns, one refcnt to memcg without + * struct page_cgroup is aquired. This refcnt will be cumsumed by + * "commit()" or removed by "cancel()" + */ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page, gfp_t mask, struct mem_cgroup **ptr) { struct mem_cgroup *mem; swp_entry_t ent; + int ret; if (mem_cgroup_disabled()) return 0; @@ -1096,10 +1122,15 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, ent.val = page_private(page); mem = lookup_swap_cgroup(ent); - if (!mem || mem->obsolete) + if (!mem) + goto charge_cur_mm; + if (!css_tryget(&mem->css)) goto charge_cur_mm; *ptr = mem; - return __mem_cgroup_try_charge(NULL, mask, ptr, true); + ret = __mem_cgroup_try_charge(NULL, mask, ptr, true); + /* drop extra refcnt from tryget */ + css_put(&mem->css); + return ret; charge_cur_mm: if (unlikely(!mm)) mm = &init_mm; @@ -1130,13 +1161,18 @@ int mem_cgroup_cache_charge_swapin(struct page *page, ent.val = page_private(page); if (do_swap_account) { mem = lookup_swap_cgroup(ent); - if (mem && mem->obsolete) - mem = NULL; - if (mem) - mm = NULL; + if (mem) { + if (css_tryget(&mem->css)) + mm = NULL; /* charge to recorded */ + else + mem = NULL; /* charge to current */ + } } ret = mem_cgroup_charge_common(page, mm, mask, MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); + /* drop extra refcnt from tryget */ + if (mem) + css_put(&mem->css); if (!ret && do_swap_account) { /* avoid double counting */ @@ -1178,7 +1214,6 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) struct mem_cgroup *memcg; memcg = swap_cgroup_record(ent, NULL); if (memcg) { - /* If memcg is obsolete, memcg can be != ptr */ res_counter_uncharge(&memcg->memsw, PAGE_SIZE); mem_cgroup_put(memcg); } @@ -1421,14 +1456,9 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) if (!mm) return 0; - rcu_read_lock(); - mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); - if (unlikely(!mem)) { - rcu_read_unlock(); + mem = try_get_mem_cgroup_from_mm(mm); + if (unlikely(!mem)) return 0; - } - css_get(&mem->css); - rcu_read_unlock(); do { progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true); @@ -2086,9 +2116,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void) * the number of reference from swap_cgroup and free mem_cgroup when * it goes down to 0. * - * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and - * entry which points to this memcg will be ignore at swapin. - * * Removal of cgroup itself succeeds regardless of refs from swap. */ @@ -2174,7 +2201,6 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { struct mem_cgroup *mem = mem_cgroup_from_cont(cont); - mem->obsolete = 1; mem_cgroup_force_empty(mem, false); } -- cgit v1.2.2 From 544122e5e0ee27d5aac4a441f7746712afbf248c Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:34 -0800 Subject: memcg: fix LRU accounting for SwapCache Now, a page can be deleted from SwapCache while do_swap_page(). memcg-fix-swap-accounting-leak-v3.patch handles that, but, LRU handling is still broken. (above behavior broke assumption of memcg-synchronized-lru patch.) This patch is a fix for LRU handling (especially for per-zone counters). At charging SwapCache, - Remove page_cgroup from LRU if it's not used. - Add page cgroup to LRU if it's not linked to. Reported-by: Daisuke Nishimura Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Paul Menage Cc: Li Zefan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 50 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b311f19bbe01..f50cb7b1efdb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -331,8 +331,12 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) return; pc = lookup_page_cgroup(page); /* can happen while we handle swapcache. */ - if (list_empty(&pc->lru)) + if (list_empty(&pc->lru) || !pc->mem_cgroup) return; + /* + * We don't check PCG_USED bit. It's cleared when the "page" is finally + * removed from global LRU. + */ mz = page_cgroup_zoneinfo(pc); mem = pc->mem_cgroup; MEM_CGROUP_ZSTAT(mz, lru) -= 1; @@ -379,16 +383,44 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) MEM_CGROUP_ZSTAT(mz, lru) += 1; list_add(&pc->lru, &mz->lists[lru]); } + /* - * To add swapcache into LRU. Be careful to all this function. - * zone->lru_lock shouldn't be held and irq must not be disabled. + * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to + * lru because the page may.be reused after it's fully uncharged (because of + * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge + * it again. This function is only used to charge SwapCache. It's done under + * lock_page and expected that zone->lru_lock is never held. */ -static void mem_cgroup_lru_fixup(struct page *page) +static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page) +{ + unsigned long flags; + struct zone *zone = page_zone(page); + struct page_cgroup *pc = lookup_page_cgroup(page); + + spin_lock_irqsave(&zone->lru_lock, flags); + /* + * Forget old LRU when this page_cgroup is *not* used. This Used bit + * is guarded by lock_page() because the page is SwapCache. + */ + if (!PageCgroupUsed(pc)) + mem_cgroup_del_lru_list(page, page_lru(page)); + spin_unlock_irqrestore(&zone->lru_lock, flags); +} + +static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page) { - if (!isolate_lru_page(page)) - putback_lru_page(page); + unsigned long flags; + struct zone *zone = page_zone(page); + struct page_cgroup *pc = lookup_page_cgroup(page); + + spin_lock_irqsave(&zone->lru_lock, flags); + /* link when the page is linked to LRU but page_cgroup isn't */ + if (PageLRU(page) && list_empty(&pc->lru)) + mem_cgroup_add_lru_list(page, page_lru(page)); + spin_unlock_irqrestore(&zone->lru_lock, flags); } + void mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) { @@ -1168,8 +1200,11 @@ int mem_cgroup_cache_charge_swapin(struct page *page, mem = NULL; /* charge to current */ } } + /* SwapCache may be still linked to LRU now. */ + mem_cgroup_lru_del_before_commit_swapcache(page); ret = mem_cgroup_charge_common(page, mm, mask, MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); + mem_cgroup_lru_add_after_commit_swapcache(page); /* drop extra refcnt from tryget */ if (mem) css_put(&mem->css); @@ -1185,8 +1220,6 @@ int mem_cgroup_cache_charge_swapin(struct page *page, } if (!locked) unlock_page(page); - /* add this page(page_cgroup) to the LRU we want. */ - mem_cgroup_lru_fixup(page); return ret; } @@ -1201,7 +1234,9 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) if (!ptr) return; pc = lookup_page_cgroup(page); + mem_cgroup_lru_del_before_commit_swapcache(page); __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED); + mem_cgroup_lru_add_after_commit_swapcache(page); /* * Now swap is on-memory. This means this page may be * counted both as mem and swap....double count. @@ -1220,7 +1255,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) } /* add this page(page_cgroup) to the LRU we want. */ - mem_cgroup_lru_fixup(page); + } void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) @@ -1288,6 +1323,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) mem_cgroup_charge_statistics(mem, pc, false); ClearPageCgroupUsed(pc); + /* + * pc->mem_cgroup is not cleared here. It will be accessed when it's + * freed from LRU. This is safe because uncharged page is expected not + * to be reused (freed soon). Exception is SwapCache, it's handled by + * special functions. + */ mz = page_cgroup_zoneinfo(pc); unlock_page_cgroup(pc); -- cgit v1.2.2 From b5a84319a4343a0db753436fd8147e61eaafa7ea Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:35 -0800 Subject: memcg: fix shmem's swap accounting Now, you can see following even when swap accounting is enabled. 1. Create Group 01, and 02. 2. allocate a "file" on tmpfs by a task under 01. 3. swap out the "file" (by memory pressure) 4. Read "file" from a task in group 02. 5. the charge of "file" is moved to group 02. This is not ideal behavior. This is because SwapCache which was loaded by read-ahead is not taken into account.. This is a patch to fix shmem's swapcache behavior. - remove mem_cgroup_cache_charge_swapin(). - Add SwapCache handler routine to mem_cgroup_cache_charge(). By this, shmem's file cache is charged at add_to_page_cache() with GFP_NOWAIT. - pass the page of swapcache to shrink_mem_cgroup. Signed-off-by: KAMEZAWA Hiroyuki Cc: Daisuke Nishimura Cc: Balbir Singh Cc: Paul Menage Cc: Li Zefan Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 134 +++++++++++++++++++++++++------------------------------- mm/shmem.c | 30 +++++-------- 2 files changed, 72 insertions(+), 92 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f50cb7b1efdb..93a792871804 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -893,6 +893,23 @@ nomem: return -ENOMEM; } +static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page) +{ + struct mem_cgroup *mem; + swp_entry_t ent; + + if (!PageSwapCache(page)) + return NULL; + + ent.val = page_private(page); + mem = lookup_swap_cgroup(ent); + if (!mem) + return NULL; + if (!css_tryget(&mem->css)) + return NULL; + return mem; +} + /* * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be * USED state. If already USED, uncharge and return. @@ -1084,6 +1101,9 @@ int mem_cgroup_newpage_charge(struct page *page, int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { + struct mem_cgroup *mem = NULL; + int ret; + if (mem_cgroup_disabled()) return 0; if (PageCompound(page)) @@ -1096,6 +1116,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, * For GFP_NOWAIT case, the page may be pre-charged before calling * add_to_page_cache(). (See shmem.c) check it here and avoid to call * charge twice. (It works but has to pay a bit larger cost.) + * And when the page is SwapCache, it should take swap information + * into account. This is under lock_page() now. */ if (!(gfp_mask & __GFP_WAIT)) { struct page_cgroup *pc; @@ -1112,15 +1134,40 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, unlock_page_cgroup(pc); } - if (unlikely(!mm)) + if (do_swap_account && PageSwapCache(page)) { + mem = try_get_mem_cgroup_from_swapcache(page); + if (mem) + mm = NULL; + else + mem = NULL; + /* SwapCache may be still linked to LRU now. */ + mem_cgroup_lru_del_before_commit_swapcache(page); + } + + if (unlikely(!mm && !mem)) mm = &init_mm; if (page_is_file_cache(page)) return mem_cgroup_charge_common(page, mm, gfp_mask, MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); - else - return mem_cgroup_charge_common(page, mm, gfp_mask, - MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); + + ret = mem_cgroup_charge_common(page, mm, gfp_mask, + MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); + if (mem) + css_put(&mem->css); + if (PageSwapCache(page)) + mem_cgroup_lru_add_after_commit_swapcache(page); + + if (do_swap_account && !ret && PageSwapCache(page)) { + swp_entry_t ent = {.val = page_private(page)}; + /* avoid double counting */ + mem = swap_cgroup_record(ent, NULL); + if (mem) { + res_counter_uncharge(&mem->memsw, PAGE_SIZE); + mem_cgroup_put(mem); + } + } + return ret; } /* @@ -1134,7 +1181,6 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, gfp_t mask, struct mem_cgroup **ptr) { struct mem_cgroup *mem; - swp_entry_t ent; int ret; if (mem_cgroup_disabled()) @@ -1142,7 +1188,6 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, if (!do_swap_account) goto charge_cur_mm; - /* * A racing thread's fault, or swapoff, may have already updated * the pte, and even removed page from swap cache: return success @@ -1150,14 +1195,9 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, */ if (!PageSwapCache(page)) return 0; - - ent.val = page_private(page); - - mem = lookup_swap_cgroup(ent); + mem = try_get_mem_cgroup_from_swapcache(page); if (!mem) goto charge_cur_mm; - if (!css_tryget(&mem->css)) - goto charge_cur_mm; *ptr = mem; ret = __mem_cgroup_try_charge(NULL, mask, ptr, true); /* drop extra refcnt from tryget */ @@ -1169,62 +1209,6 @@ charge_cur_mm: return __mem_cgroup_try_charge(mm, mask, ptr, true); } -#ifdef CONFIG_SWAP - -int mem_cgroup_cache_charge_swapin(struct page *page, - struct mm_struct *mm, gfp_t mask, bool locked) -{ - int ret = 0; - - if (mem_cgroup_disabled()) - return 0; - if (unlikely(!mm)) - mm = &init_mm; - if (!locked) - lock_page(page); - /* - * If not locked, the page can be dropped from SwapCache until - * we reach here. - */ - if (PageSwapCache(page)) { - struct mem_cgroup *mem = NULL; - swp_entry_t ent; - - ent.val = page_private(page); - if (do_swap_account) { - mem = lookup_swap_cgroup(ent); - if (mem) { - if (css_tryget(&mem->css)) - mm = NULL; /* charge to recorded */ - else - mem = NULL; /* charge to current */ - } - } - /* SwapCache may be still linked to LRU now. */ - mem_cgroup_lru_del_before_commit_swapcache(page); - ret = mem_cgroup_charge_common(page, mm, mask, - MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); - mem_cgroup_lru_add_after_commit_swapcache(page); - /* drop extra refcnt from tryget */ - if (mem) - css_put(&mem->css); - - if (!ret && do_swap_account) { - /* avoid double counting */ - mem = swap_cgroup_record(ent, NULL); - if (mem) { - res_counter_uncharge(&mem->memsw, PAGE_SIZE); - mem_cgroup_put(mem); - } - } - } - if (!locked) - unlock_page(page); - - return ret; -} -#endif - void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) { struct page_cgroup *pc; @@ -1486,18 +1470,20 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, * This is typically used for page reclaiming for shmem for reducing side * effect of page allocation from shmem, which is used by some mem_cgroup. */ -int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) +int mem_cgroup_shrink_usage(struct page *page, + struct mm_struct *mm, + gfp_t gfp_mask) { - struct mem_cgroup *mem; + struct mem_cgroup *mem = NULL; int progress = 0; int retry = MEM_CGROUP_RECLAIM_RETRIES; if (mem_cgroup_disabled()) return 0; - if (!mm) - return 0; - - mem = try_get_mem_cgroup_from_mm(mm); + if (page) + mem = try_get_mem_cgroup_from_swapcache(page); + if (!mem && mm) + mem = try_get_mem_cgroup_from_mm(mm); if (unlikely(!mem)) return 0; diff --git a/mm/shmem.c b/mm/shmem.c index bbb7b043c986..5d0de96c9789 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -929,11 +929,11 @@ found: if (!inode) goto out; /* - * Charge page using GFP_HIGHUSER_MOVABLE while we can wait. - * charged back to the user(not to caller) when swap account is used. + * Charge page using GFP_KERNEL while we can wait. + * Charged back to the user(not to caller) when swap account is used. + * add_to_page_cache() will be called with GFP_NOWAIT. */ - error = mem_cgroup_cache_charge_swapin(page, current->mm, GFP_KERNEL, - true); + error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); if (error) goto out; error = radix_tree_preload(GFP_KERNEL); @@ -1270,16 +1270,6 @@ repeat: goto repeat; } wait_on_page_locked(swappage); - /* - * We want to avoid charge at add_to_page_cache(). - * charge against this swap cache here. - */ - if (mem_cgroup_cache_charge_swapin(swappage, - current->mm, gfp & GFP_RECLAIM_MASK, false)) { - page_cache_release(swappage); - error = -ENOMEM; - goto failed; - } page_cache_release(swappage); goto repeat; } @@ -1334,15 +1324,19 @@ repeat: } else { shmem_swp_unmap(entry); spin_unlock(&info->lock); - unlock_page(swappage); - page_cache_release(swappage); if (error == -ENOMEM) { /* allow reclaim from this memory cgroup */ - error = mem_cgroup_shrink_usage(current->mm, + error = mem_cgroup_shrink_usage(swappage, + current->mm, gfp); - if (error) + if (error) { + unlock_page(swappage); + page_cache_release(swappage); goto failed; + } } + unlock_page(swappage); + page_cache_release(swappage); goto repeat; } } else if (sgp == SGP_READ && !filepage) { -- cgit v1.2.2 From 2cb378c862777d050c20db903b119a029845fdcb Mon Sep 17 00:00:00 2001 From: Paul Menage Date: Wed, 7 Jan 2009 18:08:37 -0800 Subject: cgroups: use hierarchy_mutex in memory controller Update the memory controller to use its hierarchy_mutex rather than calling cgroup_lock() to protected against cgroup_mkdir()/cgroup_rmdir() from occurring in its hierarchy. Signed-off-by: Paul Menage Tested-by: KAMEZAWA Hiroyuki Cc: Li Zefan Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 93a792871804..e2996b80601f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -154,7 +154,7 @@ struct mem_cgroup { /* * While reclaiming in a hiearchy, we cache the last child we - * reclaimed from. Protected by cgroup_lock() + * reclaimed from. Protected by hierarchy_mutex */ struct mem_cgroup *last_scanned_child; /* @@ -615,7 +615,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, /* * This routine finds the DFS walk successor. This routine should be - * called with cgroup_mutex held + * called with hierarchy_mutex held */ static struct mem_cgroup * mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem) @@ -685,7 +685,7 @@ mem_cgroup_get_first_node(struct mem_cgroup *root_mem) /* * Scan all children under the mem_cgroup mem */ - cgroup_lock(); + mutex_lock(&mem_cgroup_subsys.hierarchy_mutex); if (list_empty(&root_mem->css.cgroup->children)) { ret = root_mem; goto done; @@ -706,7 +706,7 @@ mem_cgroup_get_first_node(struct mem_cgroup *root_mem) done: root_mem->last_scanned_child = ret; - cgroup_unlock(); + mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex); return ret; } @@ -770,18 +770,16 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, while (next_mem != root_mem) { if (mem_cgroup_is_obsolete(next_mem)) { mem_cgroup_put(next_mem); - cgroup_lock(); next_mem = mem_cgroup_get_first_node(root_mem); - cgroup_unlock(); continue; } ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap, get_swappiness(next_mem)); if (mem_cgroup_check_under_limit(root_mem)) return 0; - cgroup_lock(); + mutex_lock(&mem_cgroup_subsys.hierarchy_mutex); next_mem = mem_cgroup_get_next_node(next_mem, root_mem); - cgroup_unlock(); + mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex); } return ret; } -- cgit v1.2.2 From 95156f0051cba60ec674bbaa5cf7dc74a74c5612 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Jan 2009 13:02:11 +0100 Subject: lockdep, mm: fix might_fault() annotation Some code (nfs/sunrpc) uses socket ops on kernel memory while holding the mmap_sem, this is safe because kernel memory doesn't get paged out, therefore we'll never actually fault, and the might_fault() annotations will generate false positives. Reported-by: "J. Bruce Fields" Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- mm/memory.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index e009ce870859..c2d4c477e5bb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3165,6 +3165,15 @@ void print_vma_addr(char *prefix, unsigned long ip) #ifdef CONFIG_PROVE_LOCKING void might_fault(void) { + /* + * Some code (nfs/sunrpc) uses socket ops on kernel memory while + * holding the mmap_sem, this is safe because kernel memory doesn't + * get paged out, therefore we'll never actually fault, and the + * below annotations will generate false positives. + */ + if (segment_eq(get_fs(), KERNEL_DS)) + return; + might_sleep(); /* * it would be nicer only to annotate paths which are not under -- cgit v1.2.2 From a36706131182f5507d1e2cfbf391b0fa8d72203c Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Fri, 9 Jan 2009 16:13:09 -0800 Subject: x86 PAT: remove PFNMAP type on track_pfn_vma_new() error Impact: fix (harmless) double-free of memtype entries and avoid warning On track_pfn_vma_new() failure, reset the vm_flags so that there will be no second cleanup happening when upper level routines call unmap_vmas(). This patch fixes part of the bug reported here: http://marc.info/?l=linux-kernel&m=123108883716357&w=2 Specifically the error message: X:5010 freeing invalid memtype d0000000-d0101000 Is due to multiple frees on error path, will not happen with the patch below. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- mm/memory.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index c2d4c477e5bb..d3ee2ea5615c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1672,8 +1672,14 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); - if (err) + if (err) { + /* + * To indicate that track_pfn related cleanup is not + * needed from higher level routine calling unmap_vmas + */ + vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); return -EINVAL; + } BUG_ON(addr >= end); pfn -= addr >> PAGE_SHIFT; -- cgit v1.2.2 From e4b866ed197cef9989348e0479fed8d864ea465b Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Fri, 9 Jan 2009 16:13:11 -0800 Subject: x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param Impact: cleanup Change the protection parameter for track_pfn_vma_new() into a pgprot_t pointer. Subsequent patch changes the x86 PAT handling to return a compatible memtype in pgprot_t, if what was requested cannot be allowed due to conflicts. No fuctionality change in this patch. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- mm/memory.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index d3ee2ea5615c..22bfa7a47a0b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { int ret; + pgprot_t pgprot = vma->vm_page_prot; /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like @@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; - if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) + if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE)) return -EINVAL; - ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); + ret = insert_pfn(vma, addr, pfn, pgprot); if (ret) untrack_pfn_vma(vma, pfn, PAGE_SIZE); @@ -1671,7 +1672,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; - err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); + err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size)); if (err) { /* * To indicate that track_pfn related cleanup is not -- cgit v1.2.2 From 2ed7c03ec17779afb4fcfa3b8c61df61bd4879ba Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 14 Jan 2009 14:13:54 +0100 Subject: [CVE-2009-0029] Convert all system calls to return a long Convert all system calls to return a long. This should be a NOP since all converted types should have the same size anyway. With the exception of sys_exit_group which returned void. But that doesn't matter since the system call doesn't return. Signed-off-by: Heiko Carstens --- mm/filemap.c | 2 +- mm/mmap.c | 2 +- mm/mremap.c | 2 +- mm/nommu.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index ceba0bd03662..538b75ed6236 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1374,7 +1374,7 @@ do_readahead(struct address_space *mapping, struct file *filp, return 0; } -asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) +asmlinkage long sys_readahead(int fd, loff_t offset, size_t count) { ssize_t ret; struct file *file; diff --git a/mm/mmap.c b/mm/mmap.c index 749623196cb9..a970d890cb21 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -245,7 +245,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) return next; } -asmlinkage unsigned long sys_brk(unsigned long brk) +asmlinkage long sys_brk(unsigned long brk) { unsigned long rlim, retval; unsigned long newbrk, oldbrk; diff --git a/mm/mremap.c b/mm/mremap.c index 646de959aa58..5572e0825d80 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -420,7 +420,7 @@ out_nc: return ret; } -asmlinkage unsigned long sys_mremap(unsigned long addr, +asmlinkage long sys_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { diff --git a/mm/nommu.c b/mm/nommu.c index 60ed8375c986..ee3e78927739 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -416,7 +416,7 @@ EXPORT_SYMBOL(vm_insert_page); * to a regular file. in this case, the unmapping will need * to invoke file system routines that need the global lock. */ -asmlinkage unsigned long sys_brk(unsigned long brk) +asmlinkage long sys_brk(unsigned long brk) { struct mm_struct *mm = current->mm; -- cgit v1.2.2 From 6673e0c3fbeaed2cd08e2fd4a4aa97382d6fedb0 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 14 Jan 2009 14:14:02 +0100 Subject: [CVE-2009-0029] System call wrapper special cases System calls with an unsigned long long argument can't be converted with the standard wrappers since that would include a cast to long, which in turn means that we would lose the upper 32 bit on 32 bit architectures. Also semctl can't use the standard wrapper since it has a 'union' parameter. So we handle them as special case and add some extra wrappers instead. Signed-off-by: Heiko Carstens --- mm/fadvise.c | 18 ++++++++++++++++-- mm/filemap.c | 9 ++++++++- 2 files changed, 24 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/fadvise.c b/mm/fadvise.c index a1da969bd980..54a0f8040afa 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -24,7 +24,7 @@ * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could * deactivate the pages and clear PG_Referenced. */ -asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) +SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice) { struct file *file = fget(fd); struct address_space *mapping; @@ -126,12 +126,26 @@ out: fput(file); return ret; } +#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS +asmlinkage long SyS_fadvise64_64(long fd, loff_t offset, loff_t len, long advice) +{ + return SYSC_fadvise64_64((int) fd, offset, len, (int) advice); +} +SYSCALL_ALIAS(sys_fadvise64_64, SyS_fadvise64_64); +#endif #ifdef __ARCH_WANT_SYS_FADVISE64 -asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice) +SYSCALL_DEFINE(fadvise64)(int fd, loff_t offset, size_t len, int advice) { return sys_fadvise64_64(fd, offset, len, advice); } +#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS +asmlinkage long SyS_fadvise64(long fd, loff_t offset, long len, long advice) +{ + return SYSC_fadvise64((int) fd, offset, (size_t)len, (int)advice); +} +SYSCALL_ALIAS(sys_fadvise64, SyS_fadvise64); +#endif #endif diff --git a/mm/filemap.c b/mm/filemap.c index 538b75ed6236..23acefe51808 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1374,7 +1374,7 @@ do_readahead(struct address_space *mapping, struct file *filp, return 0; } -asmlinkage long sys_readahead(int fd, loff_t offset, size_t count) +SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count) { ssize_t ret; struct file *file; @@ -1393,6 +1393,13 @@ asmlinkage long sys_readahead(int fd, loff_t offset, size_t count) } return ret; } +#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS +asmlinkage long SyS_readahead(long fd, loff_t offset, long count) +{ + return SYSC_readahead((int) fd, offset, (size_t) count); +} +SYSCALL_ALIAS(sys_readahead, SyS_readahead); +#endif #ifdef CONFIG_MMU /** -- cgit v1.2.2 From 6a6160a7b5c27b3c38651baef92a14fa7072b3c1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 14 Jan 2009 14:14:15 +0100 Subject: [CVE-2009-0029] System call wrappers part 13 Signed-off-by: Heiko Carstens --- mm/fremap.c | 4 ++-- mm/mlock.c | 4 ++-- mm/mmap.c | 4 ++-- mm/mprotect.c | 4 ++-- mm/mremap.c | 6 +++--- mm/msync.c | 2 +- mm/nommu.c | 11 +++++------ 7 files changed, 17 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/mm/fremap.c b/mm/fremap.c index 62d5bbda921a..736ba7f3306a 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -120,8 +120,8 @@ static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma, * and the vma's default protection is used. Arbitrary protections * might be implemented in the future. */ -asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, - unsigned long prot, unsigned long pgoff, unsigned long flags) +SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, + unsigned long, prot, unsigned long, pgoff, unsigned long, flags) { struct mm_struct *mm = current->mm; struct address_space *mapping; diff --git a/mm/mlock.c b/mm/mlock.c index e125156c664e..04d5e7429c55 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -530,7 +530,7 @@ static int do_mlock(unsigned long start, size_t len, int on) return error; } -asmlinkage long sys_mlock(unsigned long start, size_t len) +SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) { unsigned long locked; unsigned long lock_limit; @@ -558,7 +558,7 @@ asmlinkage long sys_mlock(unsigned long start, size_t len) return error; } -asmlinkage long sys_munlock(unsigned long start, size_t len) +SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) { int ret; diff --git a/mm/mmap.c b/mm/mmap.c index a970d890cb21..8d95902e9a38 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -245,7 +245,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) return next; } -asmlinkage long sys_brk(unsigned long brk) +SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long rlim, retval; unsigned long newbrk, oldbrk; @@ -1948,7 +1948,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) EXPORT_SYMBOL(do_munmap); -asmlinkage long sys_munmap(unsigned long addr, size_t len) +SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { int ret; struct mm_struct *mm = current->mm; diff --git a/mm/mprotect.c b/mm/mprotect.c index d0f6e7ce09f1..abe2694e13f4 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -217,8 +217,8 @@ fail: return error; } -asmlinkage long -sys_mprotect(unsigned long start, size_t len, unsigned long prot) +SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + unsigned long, prot) { unsigned long vm_flags, nstart, end, tmp, reqprot; struct vm_area_struct *vma, *prev; diff --git a/mm/mremap.c b/mm/mremap.c index 5572e0825d80..a39b7b91be46 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -420,9 +420,9 @@ out_nc: return ret; } -asmlinkage long sys_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr) +SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, + unsigned long, new_len, unsigned long, flags, + unsigned long, new_addr) { unsigned long ret; diff --git a/mm/msync.c b/mm/msync.c index 07dae08cf31c..4083209b7f02 100644 --- a/mm/msync.c +++ b/mm/msync.c @@ -28,7 +28,7 @@ * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to * applications. */ -asmlinkage long sys_msync(unsigned long start, size_t len, int flags) +SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) { unsigned long end; struct mm_struct *mm = current->mm; diff --git a/mm/nommu.c b/mm/nommu.c index ee3e78927739..8cee8c8ff0f2 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -416,7 +416,7 @@ EXPORT_SYMBOL(vm_insert_page); * to a regular file. in this case, the unmapping will need * to invoke file system routines that need the global lock. */ -asmlinkage long sys_brk(unsigned long brk) +SYSCALL_DEFINE1(brk, unsigned long, brk) { struct mm_struct *mm = current->mm; @@ -1573,7 +1573,7 @@ erase_whole_vma: } EXPORT_SYMBOL(do_munmap); -asmlinkage long sys_munmap(unsigned long addr, size_t len) +SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { int ret; struct mm_struct *mm = current->mm; @@ -1657,10 +1657,9 @@ unsigned long do_mremap(unsigned long addr, } EXPORT_SYMBOL(do_mremap); -asmlinkage -unsigned long sys_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr) +SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, + unsigned long, new_len, unsigned long, flags, + unsigned long, new_addr) { unsigned long ret; -- cgit v1.2.2 From 3480b25743cb7404928d57efeaa3d085708b04c2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 14 Jan 2009 14:14:16 +0100 Subject: [CVE-2009-0029] System call wrappers part 14 Signed-off-by: Heiko Carstens --- mm/madvise.c | 2 +- mm/mincore.c | 4 ++-- mm/mlock.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/madvise.c b/mm/madvise.c index f9349c18a1b5..b9ce574827c8 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -281,7 +281,7 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, * -EBADF - map exists, but area maps something that isn't a file. * -EAGAIN - a kernel resource was temporarily unavailable. */ -asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior) +SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) { unsigned long end, tmp; struct vm_area_struct * vma, *prev; diff --git a/mm/mincore.c b/mm/mincore.c index 5178800bc129..8cb508f84ea4 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -177,8 +177,8 @@ none_mapped: * mapped * -EAGAIN - A kernel resource was temporarily unavailable. */ -asmlinkage long sys_mincore(unsigned long start, size_t len, - unsigned char __user * vec) +SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, + unsigned char __user *, vec) { long retval; unsigned long pages; diff --git a/mm/mlock.c b/mm/mlock.c index 04d5e7429c55..2904a347e476 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -595,7 +595,7 @@ out: return 0; } -asmlinkage long sys_mlockall(int flags) +SYSCALL_DEFINE1(mlockall, int, flags) { unsigned long lock_limit; int ret = -EINVAL; @@ -623,7 +623,7 @@ out: return ret; } -asmlinkage long sys_munlockall(void) +SYSCALL_DEFINE0(munlockall) { int ret; -- cgit v1.2.2 From c4ea37c26a691ad0b7e86aa5884aab27830e95c9 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 14 Jan 2009 14:14:28 +0100 Subject: [CVE-2009-0029] System call wrappers part 26 Signed-off-by: Heiko Carstens --- mm/swapfile.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index da422c47e2ee..f48b831e5e5c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1377,7 +1377,7 @@ out: return ret; } -asmlinkage long sys_swapoff(const char __user * specialfile) +SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) { struct swap_info_struct * p = NULL; unsigned short *swap_map; @@ -1633,7 +1633,7 @@ late_initcall(max_swapfiles_check); * * The swapon system call */ -asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) +SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) { struct swap_info_struct * p; char *name = NULL; -- cgit v1.2.2 From 938bb9f5e840eddbf54e4f62f6c5ba9b3ae12c9d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 14 Jan 2009 14:14:30 +0100 Subject: [CVE-2009-0029] System call wrappers part 28 Signed-off-by: Heiko Carstens --- mm/mempolicy.c | 24 +++++++++++------------- mm/migrate.c | 8 ++++---- 2 files changed, 15 insertions(+), 17 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e412ffa8e52e..3eb4a6fdc043 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1068,10 +1068,9 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; } -asmlinkage long sys_mbind(unsigned long start, unsigned long len, - unsigned long mode, - unsigned long __user *nmask, unsigned long maxnode, - unsigned flags) +SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, + unsigned long, mode, unsigned long __user *, nmask, + unsigned long, maxnode, unsigned, flags) { nodemask_t nodes; int err; @@ -1091,8 +1090,8 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len, } /* Set the process memory policy */ -asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, - unsigned long maxnode) +SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, + unsigned long, maxnode) { int err; nodemask_t nodes; @@ -1110,9 +1109,9 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, return do_set_mempolicy(mode, flags, &nodes); } -asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, - const unsigned long __user *old_nodes, - const unsigned long __user *new_nodes) +SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, + const unsigned long __user *, old_nodes, + const unsigned long __user *, new_nodes) { const struct cred *cred = current_cred(), *tcred; struct mm_struct *mm; @@ -1185,10 +1184,9 @@ out: /* Retrieve NUMA policy */ -asmlinkage long sys_get_mempolicy(int __user *policy, - unsigned long __user *nmask, - unsigned long maxnode, - unsigned long addr, unsigned long flags) +SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, + unsigned long __user *, nmask, unsigned long, maxnode, + unsigned long, addr, unsigned long, flags) { int err; int uninitialized_var(pval); diff --git a/mm/migrate.c b/mm/migrate.c index a30ea5fcf9f1..2bb4e1d63520 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1055,10 +1055,10 @@ out: * Move a list of pages in the address space of the currently executing * process. */ -asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, - const void __user * __user *pages, - const int __user *nodes, - int __user *status, int flags) +SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, + const void __user * __user *, pages, + const int __user *, nodes, + int __user *, status, int, flags) { const struct cred *cred = current_cred(), *tcred; struct task_struct *task; -- cgit v1.2.2 From 822c18f2e38cbc775792ab65ace4f9198678dec9 Mon Sep 17 00:00:00 2001 From: Ivan Kokshaysky Date: Thu, 15 Jan 2009 13:50:48 -0800 Subject: alpha: fix vmalloc breakage On alpha, we have to map some stuff in the VMALLOC space very early in the boot process (to make SRM console callbacks work and so on, see arch/alpha/mm/init.c). For old VM allocator, we just manually placed a vm_struct onto the global vmlist and this worked for ages. Unfortunately, the new allocator isn't aware of this, so it constantly tries to allocate the VM space which is already in use, making vmalloc on alpha defunct. This patch forces KVA to import vmlist entries on init. [akpm@linux-foundation.org: remove unneeded check (per Johannes)] Signed-off-by: Ivan Kokshaysky Cc: Nick Piggin Cc: Johannes Weiner Cc: Richard Henderson Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index c5db9a7264d9..7e00b280648a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -984,6 +985,8 @@ EXPORT_SYMBOL(vm_map_ram); void __init vmalloc_init(void) { + struct vmap_area *va; + struct vm_struct *tmp; int i; for_each_possible_cpu(i) { @@ -996,6 +999,14 @@ void __init vmalloc_init(void) vbq->nr_dirty = 0; } + /* Import existing vmlist entries. */ + for (tmp = vmlist; tmp; tmp = tmp->next) { + va = alloc_bootmem(sizeof(struct vmap_area)); + va->flags = tmp->flags | VM_VM_AREA; + va->va_start = (unsigned long)tmp->addr; + va->va_end = va->va_start + tmp->size; + __insert_vmap_area(va); + } vmap_initialized = true; } -- cgit v1.2.2 From bd112db872c2f69993c86f458467acb4a14da010 Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Thu, 15 Jan 2009 13:51:11 -0800 Subject: memcg: fix mem_cgroup_get_reclaim_stat_from_page In case of swapin, a new page is added to lru before it is charged, so page->pc->mem_cgroup points to NULL or last mem_cgroup the page was charged before. In the latter case, if the mem_cgroup has already freed by rmdir, the area pointed to by page->pc->mem_cgroup may have invalid data. Actually, I saw general protection fault. general protection fault: 0000 [#1] SMP last sysfs file: /sys/devices/system/cpu/cpu15/cache/index1/shared_cpu_map CPU 4 Modules linked in: ipt_REJECT xt_tcpudp iptable_filter ip_tables x_tables bridge stp ipv6 autofs4 hidp rfcomm l2cap bluetooth sunrpc dm_mirror dm_region_hash dm_log dm_multipath dm_mod rfkill input_polldev sbs sbshc battery ac lp sg ide_cd_mod cdrom button serio_raw acpi_memhotplug parport_pc e1000 rtc_cmos parport rtc_core rtc_lib i2c_i801 i2c_core shpchp pcspkr ata_piix libata megaraid_mbox megaraid_mm sd_mod scsi_mod ext3 jbd ehci_hcd ohci_hcd uhci_hcd [last unloaded: microcode] Pid: 26038, comm: page01 Tainted: G W 2.6.28-rc9-mm1-mmotm-2008-12-22-16-14-f2ab3dea #1 RIP: 0010:[] [] update_page_reclaim_stat+0x2f/0x42 RSP: 0000:ffff8801ee457da8 EFLAGS: 00010002 RAX: 32353438312021c8 RBX: 0000000000000000 RCX: 32353438312021c8 RDX: 0000000000000000 RSI: ffff8800cb0b1000 RDI: ffff8801164d1d28 RBP: ffff880110002cb8 R08: ffff88010f2eae23 R09: 0000000000000001 R10: ffff8800bc514b00 R11: ffff880110002c00 R12: 0000000000000000 R13: ffff88000f484100 R14: 0000000000000003 R15: 00000000001200d2 FS: 00007f8a261726f0(0000) GS:ffff88010f2eaa80(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00007f8a25d22000 CR3: 00000001ef18c000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process page01 (pid: 26038, threadinfo ffff8801ee456000, task ffff8800b585b960) Stack: ffffe200071ee568 ffff880110001f00 0000000000000000 ffffffff8028ea17 ffff88000f484100 0000000000000000 0000000000000020 00007f8a25d22000 ffff8800bc514b00 ffffffff8028ec34 0000000000000000 0000000000016fd8 Call Trace: [] ? ____pagevec_lru_add+0xc1/0x13c [] ? drain_cpu_pagevecs+0x36/0x89 [] ? swapin_readahead+0x78/0x98 [] ? handle_mm_fault+0x3d9/0x741 [] ? do_page_fault+0x3ce/0x78c [] ? trace_hardirqs_off_thunk+0x3a/0x3c [] ? page_fault+0x1f/0x30 Code: cc 55 48 8d af b8 0d 00 00 48 89 f7 53 89 d3 e8 39 85 02 00 48 63 d3 48 ff 44 d5 10 45 85 e4 74 05 48 ff 44 d5 00 48 85 c0 74 0e <48> ff 44 d0 10 45 85 e4 74 04 48 ff 04 d0 5b 5d 41 5c c3 41 54 RIP [] update_page_reclaim_stat+0x2f/0x42 RSP Signed-off-by: Daisuke Nishimura Acked-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Pavel Emelyanov Cc: Li Zefan Cc: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e2996b80601f..b66512771167 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -358,6 +358,10 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) return; pc = lookup_page_cgroup(page); + /* + * Used bit is set without atomic ops but after smp_wmb(). + * For making pc->mem_cgroup visible, insert smp_rmb() here. + */ smp_rmb(); /* unused page is not rotated. */ if (!PageCgroupUsed(pc)) @@ -374,7 +378,10 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) if (mem_cgroup_disabled()) return; pc = lookup_page_cgroup(page); - /* barrier to sync with "charge" */ + /* + * Used bit is set without atomic ops but after smp_wmb(). + * For making pc->mem_cgroup visible, insert smp_rmb() here. + */ smp_rmb(); if (!PageCgroupUsed(pc)) return; @@ -559,6 +566,14 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page) return NULL; pc = lookup_page_cgroup(page); + /* + * Used bit is set without atomic ops but after smp_wmb(). + * For making pc->mem_cgroup visible, insert smp_rmb() here. + */ + smp_rmb(); + if (!PageCgroupUsed(pc)) + return NULL; + mz = page_cgroup_zoneinfo(pc); if (!mz) return NULL; -- cgit v1.2.2 From 40d58138f832a48208cdce57d6572a033b1f7a23 Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Thu, 15 Jan 2009 13:51:12 -0800 Subject: memcg: fix error path of mem_cgroup_move_parent There is a bug in error path of mem_cgroup_move_parent. Extra refcnt got from try_charge should be dropped, and usages incremented by try_charge should be decremented in both error paths: A: failure at get_page_unless_zero B: failure at isolate_lru_page This bug makes this parent directory unremovable. In case of A, rmdir doesn't return, because res.usage doesn't go down to 0 at mem_cgroup_force_empty even after all the pc in lru are removed. In case of B, rmdir fails and returns -EBUSY, because it has extra ref counts even after res.usage goes down to 0. Signed-off-by: Daisuke Nishimura Acked-by: KAMEZAWA Hiroyuki Acked-by: Balbir Singh Cc: Pavel Emelyanov Cc: Li Zefan Cc: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b66512771167..7be9b35d7ffb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -994,14 +994,15 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, if (pc->mem_cgroup != from) goto out; - css_put(&from->css); res_counter_uncharge(&from->res, PAGE_SIZE); mem_cgroup_charge_statistics(from, pc, false); if (do_swap_account) res_counter_uncharge(&from->memsw, PAGE_SIZE); + css_put(&from->css); + + css_get(&to->css); pc->mem_cgroup = to; mem_cgroup_charge_statistics(to, pc, true); - css_get(&to->css); ret = 0; out: unlock_page_cgroup(pc); @@ -1034,8 +1035,10 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, if (ret || !parent) return ret; - if (!get_page_unless_zero(page)) - return -EBUSY; + if (!get_page_unless_zero(page)) { + ret = -EBUSY; + goto uncharge; + } ret = isolate_lru_page(page); @@ -1044,19 +1047,23 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, ret = mem_cgroup_move_account(pc, child, parent); - /* drop extra refcnt by try_charge() (move_account increment one) */ - css_put(&parent->css); putback_lru_page(page); if (!ret) { put_page(page); + /* drop extra refcnt by try_charge() */ + css_put(&parent->css); return 0; } - /* uncharge if move fails */ + cancel: + put_page(page); +uncharge: + /* drop extra refcnt by try_charge() */ + css_put(&parent->css); + /* uncharge if move fails */ res_counter_uncharge(&parent->res, PAGE_SIZE); if (do_swap_account) res_counter_uncharge(&parent->memsw, PAGE_SIZE); - put_page(page); return ret; } -- cgit v1.2.2 From c268e9946d7dc30ac4e55cdc3f43c8af1ae8153c Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Thu, 15 Jan 2009 13:51:13 -0800 Subject: memcg: fix hierarchical reclaim If root_mem has no children, last_scaned_child is set to root_mem itself. But after some children added to root_mem, mem_cgroup_get_next_node can mem_cgroup_put the root_mem although root_mem has not been mem_cgroup_get. This patch fixes this behavior by: - Set last_scanned_child to NULL if root_mem has no children or DFS search has returned to root_mem itself(root_mem is not a "child" of root_mem). Make mem_cgroup_get_first_node return root_mem in this case. There are no mem_cgroup_get/put for root_mem. - Rename mem_cgroup_get_next_node to __mem_cgroup_get_next_node, and mem_cgroup_get_first_node to mem_cgroup_get_next_node. Make mem_cgroup_hierarchical_reclaim call only new mem_cgroup_get_next_node. Signed-off-by: Daisuke Nishimura Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Pavel Emelyanov Cc: Li Zefan Cc: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 68 ++++++++++++++++++++++++++++++--------------------------- 1 file changed, 36 insertions(+), 32 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7be9b35d7ffb..322625f551c2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -633,7 +633,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, * called with hierarchy_mutex held */ static struct mem_cgroup * -mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem) +__mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem) { struct cgroup *cgroup, *curr_cgroup, *root_cgroup; @@ -644,19 +644,16 @@ mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem) /* * Walk down to children */ - mem_cgroup_put(curr); cgroup = list_entry(curr_cgroup->children.next, struct cgroup, sibling); curr = mem_cgroup_from_cont(cgroup); - mem_cgroup_get(curr); goto done; } visit_parent: if (curr_cgroup == root_cgroup) { - mem_cgroup_put(curr); - curr = root_mem; - mem_cgroup_get(curr); + /* caller handles NULL case */ + curr = NULL; goto done; } @@ -664,11 +661,9 @@ visit_parent: * Goto next sibling */ if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) { - mem_cgroup_put(curr); cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup, sibling); curr = mem_cgroup_from_cont(cgroup); - mem_cgroup_get(curr); goto done; } @@ -679,7 +674,6 @@ visit_parent: goto visit_parent; done: - root_mem->last_scanned_child = curr; return curr; } @@ -689,40 +683,46 @@ done: * that to reclaim free pages from. */ static struct mem_cgroup * -mem_cgroup_get_first_node(struct mem_cgroup *root_mem) +mem_cgroup_get_next_node(struct mem_cgroup *root_mem) { struct cgroup *cgroup; - struct mem_cgroup *ret; + struct mem_cgroup *orig, *next; bool obsolete; - obsolete = mem_cgroup_is_obsolete(root_mem->last_scanned_child); - /* * Scan all children under the mem_cgroup mem */ mutex_lock(&mem_cgroup_subsys.hierarchy_mutex); + + orig = root_mem->last_scanned_child; + obsolete = mem_cgroup_is_obsolete(orig); + if (list_empty(&root_mem->css.cgroup->children)) { - ret = root_mem; + /* + * root_mem might have children before and last_scanned_child + * may point to one of them. We put it later. + */ + if (orig) + VM_BUG_ON(!obsolete); + next = NULL; goto done; } - if (!root_mem->last_scanned_child || obsolete) { - - if (obsolete && root_mem->last_scanned_child) - mem_cgroup_put(root_mem->last_scanned_child); - + if (!orig || obsolete) { cgroup = list_first_entry(&root_mem->css.cgroup->children, struct cgroup, sibling); - ret = mem_cgroup_from_cont(cgroup); - mem_cgroup_get(ret); + next = mem_cgroup_from_cont(cgroup); } else - ret = mem_cgroup_get_next_node(root_mem->last_scanned_child, - root_mem); + next = __mem_cgroup_get_next_node(orig, root_mem); done: - root_mem->last_scanned_child = ret; + if (next) + mem_cgroup_get(next); + root_mem->last_scanned_child = next; + if (orig) + mem_cgroup_put(orig); mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex); - return ret; + return (next) ? next : root_mem; } static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem) @@ -780,21 +780,18 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, if (!root_mem->use_hierarchy) return ret; - next_mem = mem_cgroup_get_first_node(root_mem); + next_mem = mem_cgroup_get_next_node(root_mem); while (next_mem != root_mem) { if (mem_cgroup_is_obsolete(next_mem)) { - mem_cgroup_put(next_mem); - next_mem = mem_cgroup_get_first_node(root_mem); + next_mem = mem_cgroup_get_next_node(root_mem); continue; } ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap, get_swappiness(next_mem)); if (mem_cgroup_check_under_limit(root_mem)) return 0; - mutex_lock(&mem_cgroup_subsys.hierarchy_mutex); - next_mem = mem_cgroup_get_next_node(next_mem, root_mem); - mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex); + next_mem = mem_cgroup_get_next_node(root_mem); } return ret; } @@ -2254,7 +2251,14 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, static void mem_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { - mem_cgroup_put(mem_cgroup_from_cont(cont)); + struct mem_cgroup *mem = mem_cgroup_from_cont(cont); + struct mem_cgroup *last_scanned_child = mem->last_scanned_child; + + if (last_scanned_child) { + VM_BUG_ON(!mem_cgroup_is_obsolete(last_scanned_child)); + mem_cgroup_put(last_scanned_child); + } + mem_cgroup_put(mem); } static int mem_cgroup_populate(struct cgroup_subsys *ss, -- cgit v1.2.2 From 4d1c627389c8ba6d9e703208567ffcdbd356f682 Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Thu, 15 Jan 2009 13:51:14 -0800 Subject: memcg: make oom less frequently In previous implementation, mem_cgroup_try_charge checked the return value of mem_cgroup_try_to_free_pages, and just retried if some pages had been reclaimed. But now, try_charge(and mem_cgroup_hierarchical_reclaim called from it) only checks whether the usage is less than the limit. This patch tries to change the behavior as before to cause oom less frequently. Signed-off-by: Daisuke Nishimura Acked-by: Balbir Singh Acked-by: KAMEZAWA Hiroyuki Cc: Pavel Emelyanov Cc: Li Zefan Cc: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 322625f551c2..fb62b4335fa9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -773,10 +773,10 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, * but there might be left over accounting, even after children * have left. */ - ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap, + ret += try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap, get_swappiness(root_mem)); if (mem_cgroup_check_under_limit(root_mem)) - return 0; + return 1; /* indicate reclaim has succeeded */ if (!root_mem->use_hierarchy) return ret; @@ -787,10 +787,10 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, next_mem = mem_cgroup_get_next_node(root_mem); continue; } - ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap, + ret += try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap, get_swappiness(next_mem)); if (mem_cgroup_check_under_limit(root_mem)) - return 0; + return 1; /* indicate reclaim has succeeded */ next_mem = mem_cgroup_get_next_node(root_mem); } return ret; @@ -875,6 +875,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask, noswap); + if (ret) + continue; /* * try_to_free_mem_cgroup_pages() might not give us a full -- cgit v1.2.2 From 46666d8ac42893f90edde7e57a11bc8749d7e89c Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 15 Jan 2009 13:51:15 -0800 Subject: revert "mm: vmalloc use mutex for purge" Revert commit e97a630eb0f5b8b380fd67504de6cedebb489003 ("mm: vmalloc use mutex for purge") Bryan Donlan reports: : After testing 2.6.29-rc1 on xen-x86 with a btrfs root filesystem, I : got the OOPS quoted below and a hard freeze shortly after boot. : Boot messages and config are attached. : : ------------[ cut here ]------------ : Kernel BUG at c05ef80d [verbose debug info unavailable] : invalid opcode: 0000 [#1] SMP : last sysfs file: /sys/block/xvdc/size : Modules linked in: : : Pid: 0, comm: swapper Not tainted (2.6.29-rc1 #6) : EIP: 0061:[] EFLAGS: 00010087 CPU: 2 : EIP is at schedule+0x7cd/0x950 : EAX: d5aeca80 EBX: 00000002 ECX: 00000000 EDX: d4cb9a40 : ESI: c12f5600 EDI: d4cb9a40 EBP: d6033fa4 ESP: d6033ef4 : DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0069 : Process swapper (pid: 0, ti=d6032000 task=d6020b70 task.ti=d6032000) : Stack: : 000d85bc 00000000 000186a0 00000000 0dd11410 c0105417 c12efe00 0dc367c3 : 00000011 c0105d46 d5a5d310 deadbeef d4cb9a40 c07cc600 c05f1340 c12e0060 : deadbeef d6020b70 d6020d08 00000002 c014377d 00000000 c12f5600 00002c22 : Call Trace: : [] xen_force_evtchn_callback+0x17/0x30 : [] check_events+0x8/0x12 : [] _spin_unlock_irqrestore+0x20/0x40 : [] hrtimer_start_range_ns+0x12d/0x2e0 : [] tick_nohz_restart_sched_tick+0x146/0x160 : [] cpu_idle+0xa5/0xc0 and bisected it to this commit. Let's remove it now while we have a think about the problem. Reported-by: Bryan Donlan Tested-by: Christophe Saout Cc: Nick Piggin Cc: Ingo Molnar Cc: Jeremy Fitzhardinge Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7e00b280648a..75f49d312e8c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -496,7 +495,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, int sync, int force_flush) { - static DEFINE_MUTEX(purge_lock); + static DEFINE_SPINLOCK(purge_lock); LIST_HEAD(valist); struct vmap_area *va; int nr = 0; @@ -507,10 +506,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, * the case that isn't actually used at the moment anyway. */ if (!sync && !force_flush) { - if (!mutex_trylock(&purge_lock)) + if (!spin_trylock(&purge_lock)) return; } else - mutex_lock(&purge_lock); + spin_lock(&purge_lock); rcu_read_lock(); list_for_each_entry_rcu(va, &vmap_area_list, list) { @@ -542,7 +541,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, __free_vmap_area(va); spin_unlock(&vmap_area_lock); } - mutex_unlock(&purge_lock); + spin_unlock(&purge_lock); } /* -- cgit v1.2.2 From 0eb253e223c88b982461e59154fcad1b82597592 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 15 Jan 2009 13:51:25 -0800 Subject: memcg: fix section mismatch At system boot when creating the top cgroup, mem_cgroup_create() calls enable_swap_cgroup() which is marked as __init, so mark mem_cgroup_create() as __ref to avoid false section mismatch warning. Reported-by: Rakib Mullick Signed-off-by: Li Zefan Acked-by; KAMEZAWA Hiroyuki Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index fb62b4335fa9..f0dc076adf05 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2202,7 +2202,7 @@ static void __init enable_swap_cgroup(void) } #endif -static struct cgroup_subsys_state * +static struct cgroup_subsys_state * __ref mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) { struct mem_cgroup *mem, *parent; -- cgit v1.2.2 From 068b38c1fa7a9210608f27ac521897ccc5f9b726 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 15 Jan 2009 13:51:26 -0800 Subject: memcg: fix a race when setting memory.swappiness (suppose: memcg->use_hierarchy == 0 and memcg->swappiness == 60) echo 10 > /memcg/0/swappiness | mem_cgroup_swappiness_write() | ... | echo 1 > /memcg/0/use_hierarchy | mkdir /mnt/0/1 | sub_memcg->swappiness = 60; memcg->swappiness = 10; | In the above scenario, we end up having 2 different swappiness values in a single hierarchy. We should hold cgroup_lock() when cheking cgrp->children list. Signed-off-by: Li Zefan Acked-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Paul Menage Cc: Daisuke Nishimura Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f0dc076adf05..4d0ea3ceba6d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1992,6 +1992,7 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, { struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); struct mem_cgroup *parent; + if (val > 100) return -EINVAL; @@ -1999,15 +2000,22 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, return -EINVAL; parent = mem_cgroup_from_cont(cgrp->parent); + + cgroup_lock(); + /* If under hierarchy, only empty-root can set this value */ if ((parent->use_hierarchy) || - (memcg->use_hierarchy && !list_empty(&cgrp->children))) + (memcg->use_hierarchy && !list_empty(&cgrp->children))) { + cgroup_unlock(); return -EINVAL; + } spin_lock(&memcg->reclaim_param_lock); memcg->swappiness = val; spin_unlock(&memcg->reclaim_param_lock); + cgroup_unlock(); + return 0; } -- cgit v1.2.2 From eb6434d9e79a72d35d68811efd68fe8bab8f5baf Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 21 Jan 2009 17:45:47 +0900 Subject: nommu: Stub in vm_map_ram()/vm_unmap_ram()/vm_unmap_aliases(). Presently we do not support these interfaces, so make them BUG() wrappers as per the rest of the vmap interface on nommu. Fixes up the modular xfs build. Signed-off-by: Paul Mundt --- mm/nommu.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 8cee8c8ff0f2..0c3e7d2114f6 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -10,7 +10,7 @@ * Copyright (c) 2000-2003 David McCullough * Copyright (c) 2000-2001 D Jeff Dionne * Copyright (c) 2002 Greg Ungerer - * Copyright (c) 2007-2008 Paul Mundt + * Copyright (c) 2007-2009 Paul Mundt */ #include @@ -394,6 +394,24 @@ void vunmap(const void *addr) } EXPORT_SYMBOL(vunmap); +void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) +{ + BUG(); + return NULL; +} +EXPORT_SYMBOL(vm_map_ram); + +void vm_unmap_ram(const void *mem, unsigned int count) +{ + BUG(); +} +EXPORT_SYMBOL(vm_unmap_ram); + +void vm_unmap_aliases(void) +{ +} +EXPORT_SYMBOL_GPL(vm_unmap_aliases); + /* * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. -- cgit v1.2.2 From 05ae6fa31874eda2484da13c5dc4ddee8a47a0a4 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Tue, 13 Jan 2009 17:30:22 +1000 Subject: uclinux: add process name to allocation error message This patch adds the name of the process to the bad allocation error message on non-MMU systems. Changed suggested by jsujjavanich@syntech-fuelmaster.com Signed-off-by: Greg Ungerer --- mm/nommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 0c3e7d2114f6..2fcf47d449b4 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1161,8 +1161,8 @@ error_free: return ret; enomem: - printk("Allocation of length %lu from process %d failed\n", - len, current->pid); + printk("Allocation of length %lu from process %d (%s) failed\n", + len, current->pid, current->comm); show_free_areas(); return -ENOMEM; } -- cgit v1.2.2 From 3718909448116bf4411445468c58acc946379f92 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 27 Jan 2009 18:59:46 -0800 Subject: slub: fix per cpu kmem_cache_cpu array memory leak The per cpu array of kmem_cache_cpu structures accomodates NR_KMEM_CACHE_CPU such structs. When this array overflows and a struct is allocated by kmalloc(), it may have an address at the upper bound of this array. If this happens, it does not get freed and the per cpu kmem_cache_cpu_free pointer will be out of bounds after kmem_cache_destroy() or cpu offlining. Cc: Christoph Lameter Signed-off-by: David Rientjes Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 6392ae5cc6b1..bdc9abb08a23 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1996,7 +1996,7 @@ static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) { if (c < per_cpu(kmem_cache_cpu, cpu) || - c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { + c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { kfree(c); return; } -- cgit v1.2.2 From de33c8db5910cda599899dd431cc30d7c1018cbf Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 29 Jan 2009 17:46:42 -0800 Subject: Fix OOPS in mmap_region() when merging adjacent VM_LOCKED file segments As of commit ba470de43188cdbff795b5da43a1474523c6c2fb ("map: handle mlocked pages during map, remap, unmap") we now use the 'vma' variable at the end of mmap_region() to handle the page-in of newly mapped mlocked pages. However, if we merged adjacent vma's together, the vma we're using may be stale. We historically consciously avoided using it after the merge operation, but that got overlooked when redoing the locked page handling. This commit simplifies mmap_region() by doing any vma merges early, avoiding the issue entirely, and 'vma' will always be valid. As pointed out by Hugh Dickins, this depends on any drivers that change the page offset of flags to have set one of the VM_SPECIAL bits (so that they cannot trigger the early merge logic), but that's true in general. Reported-and-tested-by: Maksim Yevmenkin Cc: Lee Schermerhorn Cc: Nick Piggin Cc: Andrew Morton Cc: Hugh Dickins Signed-off-by: Linus Torvalds --- mm/mmap.c | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 8d95902e9a38..d3fa10a726cf 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1134,16 +1134,11 @@ munmap_back: } /* - * Can we just expand an old private anonymous mapping? - * The VM_SHARED test is necessary because shmem_zero_setup - * will create the file object for a shared anonymous map below. + * Can we just expand an old mapping? */ - if (!file && !(vm_flags & VM_SHARED)) { - vma = vma_merge(mm, prev, addr, addr + len, vm_flags, - NULL, NULL, pgoff, NULL); - if (vma) - goto out; - } + vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL); + if (vma) + goto out; /* * Determine the object being mapped and call the appropriate @@ -1206,17 +1201,8 @@ munmap_back: if (vma_wants_writenotify(vma)) vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); - if (file && vma_merge(mm, prev, addr, vma->vm_end, - vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { - mpol_put(vma_policy(vma)); - kmem_cache_free(vm_area_cachep, vma); - fput(file); - if (vm_flags & VM_EXECUTABLE) - removed_exe_file_vma(mm); - } else { - vma_link(mm, vma, prev, rb_link, rb_parent); - file = vma->vm_file; - } + vma_link(mm, vma, prev, rb_link, rb_parent); + file = vma->vm_file; /* Once vma denies write, undo our temporary denial count */ if (correct_wcount) -- cgit v1.2.2 From 7bcc1bb1232de6efc0b85e0c7fe38e90b2436318 Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Thu, 29 Jan 2009 14:25:11 -0800 Subject: memcg: get/put parents at create/free The lifetime of struct cgroup and struct mem_cgroup is different and mem_cgroup has its own reference count for handling references from swap_cgroup. This causes strange problem that the parent mem_cgroup dies while child mem_cgroup alive, and this problem causes a bug in case of use_hierarchy==1 because res_counter_uncharge climbs up the tree. This patch is for avoiding it by getting the parent at create, and putting it at freeing. Signed-off-by: Daisuke Nishimura Reviewed-by; KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Pavel Emelyanov Cc: Li Zefan Cc: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4d0ea3ceba6d..76feccd26dc3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -202,6 +202,7 @@ pcg_default_flags[NR_CHARGE_TYPE] = { static void mem_cgroup_get(struct mem_cgroup *mem); static void mem_cgroup_put(struct mem_cgroup *mem); +static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, struct page_cgroup *pc, @@ -2193,10 +2194,23 @@ static void mem_cgroup_get(struct mem_cgroup *mem) static void mem_cgroup_put(struct mem_cgroup *mem) { - if (atomic_dec_and_test(&mem->refcnt)) + if (atomic_dec_and_test(&mem->refcnt)) { + struct mem_cgroup *parent = parent_mem_cgroup(mem); __mem_cgroup_free(mem); + if (parent) + mem_cgroup_put(parent); + } } +/* + * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. + */ +static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem) +{ + if (!mem->res.parent) + return NULL; + return mem_cgroup_from_res_counter(mem->res.parent, res); +} #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP static void __init enable_swap_cgroup(void) @@ -2235,6 +2249,13 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) if (parent && parent->use_hierarchy) { res_counter_init(&mem->res, &parent->res); res_counter_init(&mem->memsw, &parent->memsw); + /* + * We increment refcnt of the parent to ensure that we can + * safely access it on res_counter_charge/uncharge. + * This refcnt will be decremented when freeing this + * mem_cgroup(see mem_cgroup_put). + */ + mem_cgroup_get(parent); } else { res_counter_init(&mem->res, NULL); res_counter_init(&mem->memsw, NULL); -- cgit v1.2.2 From 85d9fc89fb0f0703df6444f260187c088a8d59ff Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Thu, 29 Jan 2009 14:25:13 -0800 Subject: memcg: fix refcnt handling at swapoff Now, at swapoff, even while try_charge() fails, commit is executed. This is a bug which turns the refcnt of cgroup_subsys_state negative. Reported-by: Li Zefan Tested-by: Li Zefan Tested-by: Daisuke Nishimura Signed-off-by: KAMEZAWA Hiroyuki Reviewed-by: Daisuke Nishimura Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index f48b831e5e5c..7e6304dfafab 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -698,8 +698,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, pte_t *pte; int ret = 1; - if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) + if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) { ret = -ENOMEM; + goto out_nolock; + } pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { @@ -723,6 +725,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, activate_page(page); out: pte_unmap_unlock(pte, ptl); +out_nolock: return ret; } -- cgit v1.2.2 From 299b4eaa302138426d5a9ecd954de1f565d76c94 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Thu, 29 Jan 2009 14:25:17 -0800 Subject: memcg: NULL pointer dereference at rmdir on some NUMA systems N_POSSIBLE doesn't means there is memory...and force_empty can visit invalid node which have no pgdat. To visit all valid nodes, N_HIGH_MEMORY should be used. Reported-by: Li Zefan Signed-off-by: KAMEZAWA Hiroyuki Tested-by: Li Zefan Cc: Balbir Singh Cc: Daisuke Nishimura Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 76feccd26dc3..8e4be9cb2a6a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1685,7 +1685,7 @@ move_account: /* This is for making all *used* pages to be on LRU. */ lru_add_drain_all(); ret = 0; - for_each_node_state(node, N_POSSIBLE) { + for_each_node_state(node, N_HIGH_MEMORY) { for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { enum lru_list l; for_each_lru(l) { -- cgit v1.2.2 From 33bfad54b58cf05cfe6678c3ec9235d4bc8db4c2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 30 Jan 2009 11:37:22 -0800 Subject: Allow opportunistic merging of VM_CAN_NONLINEAR areas Commit de33c8db5910cda599899dd431cc30d7c1018cbf ("Fix OOPS in mmap_region() when merging adjacent VM_LOCKED file segments") unified the vma merging of anonymous and file maps to just one place, which simplified the code and fixed a use-after-free bug that could cause an oops. But by doing the merge opportunistically before even having called ->mmap() on the file method, it now compares two different 'vm_flags' values: the pre-mmap() value of the new not-yet-formed vma, and previous mappings of the same file around it. And in doing so, it refused to merge the common file case, which adds a marker to say "I can be made non-linear". This fixes it by just adding a set of flags that don't have to match, because we know they are ok to merge. Currently it's only that single VM_CAN_NONLINEAR flag, but at least conceptually there could be others in the future. Reported-and-acked-by: Hugh Dickins Cc: Lee Schermerhorn Cc: Nick Piggin Cc: Andrew Morton Cc: Greg KH Signed-off-by: Linus Torvalds --- mm/mmap.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index d3fa10a726cf..c581df14d0de 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -658,6 +658,9 @@ again: remove_next = 1 + (end > next->vm_end); validate_mm(mm); } +/* Flags that can be inherited from an existing mapping when merging */ +#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR) + /* * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. @@ -665,7 +668,7 @@ again: remove_next = 1 + (end > next->vm_end); static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags) { - if (vma->vm_flags != vm_flags) + if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS) return 0; if (vma->vm_file != file) return 0; -- cgit v1.2.2 From fc8744adc870a8d4366908221508bb113d8b72ee Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 31 Jan 2009 15:08:56 -0800 Subject: Stop playing silly games with the VM_ACCOUNT flag The mmap_region() code would temporarily set the VM_ACCOUNT flag for anonymous shared mappings just to inform shmem_zero_setup() that it should enable accounting for the resulting shm object. It would then clear the flag after calling ->mmap (for the /dev/zero case) or doing shmem_zero_setup() (for the MAP_ANON case). This just resulted in vma merge issues, but also made for just unnecessary confusion. Use the already-existing VM_NORESERVE flag for this instead, and let shmem_{zero|file}_setup() just figure it out from that. This also happens to make it obvious that the new DRI2 GEM layer uses a non-reserving backing store for its object allocation - which is quite possibly not intentional. But since I didn't want to change semantics in this patch, I left it alone, and just updated the caller to use the new flag semantics. Signed-off-by: Linus Torvalds --- mm/mmap.c | 48 +++++++++++++++++++++++++----------------------- mm/shmem.c | 2 +- 2 files changed, 26 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index c581df14d0de..214b6a258eeb 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1090,6 +1090,15 @@ int vma_wants_writenotify(struct vm_area_struct *vma) mapping_cap_account_dirty(vma->vm_file->f_mapping); } +/* + * We account for memory if it's a private writeable mapping, + * and VM_NORESERVE wasn't set. + */ +static inline int accountable_mapping(unsigned int vm_flags) +{ + return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; +} + unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, unsigned long flags, unsigned int vm_flags, unsigned long pgoff, @@ -1117,23 +1126,24 @@ munmap_back: if (!may_expand_vm(mm, len >> PAGE_SHIFT)) return -ENOMEM; - if (flags & MAP_NORESERVE) + /* + * Set 'VM_NORESERVE' if we should not account for the + * memory use of this mapping. We only honor MAP_NORESERVE + * if we're allowed to overcommit memory. + */ + if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) + vm_flags |= VM_NORESERVE; + if (!accountable) vm_flags |= VM_NORESERVE; - if (accountable && (!(flags & MAP_NORESERVE) || - sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { - if (vm_flags & VM_SHARED) { - /* Check memory availability in shmem_file_setup? */ - vm_flags |= VM_ACCOUNT; - } else if (vm_flags & VM_WRITE) { - /* - * Private writable mapping: check memory availability - */ - charged = len >> PAGE_SHIFT; - if (security_vm_enough_memory(charged)) - return -ENOMEM; - vm_flags |= VM_ACCOUNT; - } + /* + * Private writable mapping: check memory availability + */ + if (accountable_mapping(vm_flags)) { + charged = len >> PAGE_SHIFT; + if (security_vm_enough_memory(charged)) + return -ENOMEM; + vm_flags |= VM_ACCOUNT; } /* @@ -1184,14 +1194,6 @@ munmap_back: goto free_vma; } - /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform - * shmem_zero_setup (perhaps called through /dev/zero's ->mmap) - * that memory reservation must be checked; but that reservation - * belongs to shared memory object, not to vma: so now clear it. - */ - if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT)) - vma->vm_flags &= ~VM_ACCOUNT; - /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their diff --git a/mm/shmem.c b/mm/shmem.c index 5d0de96c9789..19d566ccdeea 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2628,7 +2628,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) goto close_file; #ifdef CONFIG_SHMEM - SHMEM_I(inode)->flags = flags & VM_ACCOUNT; + SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT; #endif d_instantiate(dentry, inode); inode->i_size = size; -- cgit v1.2.2 From 27421e211a39784694b597dbf35848b88363c248 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 1 Feb 2009 11:00:16 -0800 Subject: Manually revert "mlock: downgrade mmap sem while populating mlocked regions" This essentially reverts commit 8edb08caf68184fb170f4f69c7445929e199eaea. It downgraded our mmap semaphore to a read-lock while mlocking pages, in order to allow other threads (and external accesses like "ps" et al) to walk the vma lists and take page faults etc. Which is a nice idea, but the implementation does not work. Because we cannot upgrade the lock back to a write lock without releasing the mmap semaphore, the code had to release the lock entirely and then re-take it as a writelock. However, that meant that the caller possibly lost the vma chain that it was following, since now another thread could come in and mmap/munmap the range. The code tried to work around that by just looking up the vma again and erroring out if that happened, but quite frankly, that was just a buggy hack that doesn't actually protect against anything (the other thread could just have replaced the vma with another one instead of totally unmapping it). The only way to downgrade to a read map _reliably_ is to do it at the end, which is likely the right thing to do: do all the 'vma' operations with the write-lock held, then downgrade to a read after completing them all, and then do the "populate the newly mlocked regions" while holding just the read lock. And then just drop the read-lock and return to user space. The (perhaps somewhat simpler) alternative is to just make all the callers of mlock_vma_pages_range() know that the mmap lock got dropped, and just re-grab the mmap semaphore if it needs to mlock more than one vma region. So we can do this "downgrade mmap sem while populating mlocked regions" thing right, but the way it was done here was absolutely not correct. Thus the revert, in the expectation that we will do it all correctly some day. Cc: Lee Schermerhorn Cc: Rik van Riel Cc: Andrew Morton Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- mm/mlock.c | 47 ++--------------------------------------------- 1 file changed, 2 insertions(+), 45 deletions(-) (limited to 'mm') diff --git a/mm/mlock.c b/mm/mlock.c index 2904a347e476..028ec482fdd4 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -294,14 +294,10 @@ static inline int __mlock_posix_error_return(long retval) * * return number of pages [> 0] to be removed from locked_vm on success * of "special" vmas. - * - * return negative error if vma spanning @start-@range disappears while - * mmap semaphore is dropped. Unlikely? */ long mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - struct mm_struct *mm = vma->vm_mm; int nr_pages = (end - start) / PAGE_SIZE; BUG_ON(!(vma->vm_flags & VM_LOCKED)); @@ -314,20 +310,8 @@ long mlock_vma_pages_range(struct vm_area_struct *vma, if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current))) { - long error; - downgrade_write(&mm->mmap_sem); - - error = __mlock_vma_pages_range(vma, start, end, 1); - up_read(&mm->mmap_sem); - /* vma can change or disappear */ - down_write(&mm->mmap_sem); - vma = find_vma(mm, start); - /* non-NULL vma must contain @start, but need to check @end */ - if (!vma || end > vma->vm_end) - return -ENOMEM; - - return 0; /* hide other errors from mmap(), et al */ + return __mlock_vma_pages_range(vma, start, end, 1); } /* @@ -438,41 +422,14 @@ success: vma->vm_flags = newflags; if (lock) { - /* - * mmap_sem is currently held for write. Downgrade the write - * lock to a read lock so that other faults, mmap scans, ... - * while we fault in all pages. - */ - downgrade_write(&mm->mmap_sem); - ret = __mlock_vma_pages_range(vma, start, end, 1); - /* - * Need to reacquire mmap sem in write mode, as our callers - * expect this. We have no support for atomically upgrading - * a sem to write, so we need to check for ranges while sem - * is unlocked. - */ - up_read(&mm->mmap_sem); - /* vma can change or disappear */ - down_write(&mm->mmap_sem); - *prev = find_vma(mm, start); - /* non-NULL *prev must contain @start, but need to check @end */ - if (!(*prev) || end > (*prev)->vm_end) - ret = -ENOMEM; - else if (ret > 0) { + if (ret > 0) { mm->locked_vm -= ret; ret = 0; } else ret = __mlock_posix_error_return(ret); /* translate if needed */ } else { - /* - * TODO: for unlocking, pages will already be resident, so - * we don't need to wait for allocations/reclaim/pagein, ... - * However, unlocking a very large region can still take a - * while. Should we downgrade the semaphore for both lock - * AND unlock ? - */ __mlock_vma_pages_range(vma, start, end, 0); } -- cgit v1.2.2 From dcf6a79dda5cc2a2bec183e50d829030c0972aaa Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 2 Feb 2009 18:33:49 +0200 Subject: write-back: fix nr_to_write counter Commit 05fe478dd04e02fa230c305ab9b5616669821dd3 introduced some @wbc->nr_to_write breakage. It made the following changes: 1. Decrement wbc->nr_to_write instead of nr_to_write 2. Decrement wbc->nr_to_write _only_ if wbc->sync_mode == WB_SYNC_NONE 3. If synced nr_to_write pages, stop only if if wbc->sync_mode == WB_SYNC_NONE, otherwise keep going. However, according to the commit message, the intention was to only make change 3. Change 1 is a bug. Change 2 does not seem to be necessary, and it breaks UBIFS expectations, so if needed, it should be done separately later. And change 2 does not seem to be documented in the commit message. This patch does the following: 1. Undo changes 1 and 2 2. Add a comment explaining change 3 (it very useful to have comments in _code_, not only in the commit). Signed-off-by: Artem Bityutskiy Acked-by: Nick Piggin Cc: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b493db7841dc..dc32dae01e5f 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1051,13 +1051,22 @@ continue_unlock: } } - if (wbc->sync_mode == WB_SYNC_NONE) { - wbc->nr_to_write--; - if (wbc->nr_to_write <= 0) { - done = 1; - break; - } + if (nr_to_write > 0) + nr_to_write--; + else if (wbc->sync_mode == WB_SYNC_NONE) { + /* + * We stop writing back only if we are not + * doing integrity sync. In case of integrity + * sync we have to keep going because someone + * may be concurrently dirtying pages, and we + * might have synced a lot of newly appeared + * dirty pages, but have not synced all of the + * old dirty pages. + */ + done = 1; + break; } + if (wbc->nonblocking && bdi_write_congested(bdi)) { wbc->encountered_congestion = 1; done = 1; -- cgit v1.2.2 From ab92661d5d9514647346047f30f67a7f35ffea67 Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Wed, 4 Feb 2009 15:12:16 -0800 Subject: do_wp_page: fix regression with execute in place Fix do_wp_page for VM_MIXEDMAP mappings. In the case where pfn_valid returns 0 for a pfn at the beginning of do_wp_page and the mapping is not shared writable, the code branches to label `gotten:' with old_page == NULL. In case the vma is locked (vma->vm_flags & VM_LOCKED), lock_page, clear_page_mlock, and unlock_page try to access the old_page. This patch checks whether old_page is valid before it is dereferenced. The regression was introduced by "mlock: mlocked pages are unevictable" (commit b291f000393f5a0b679012b39d79fbc85c018233). Signed-off-by: Carsten Otte Cc: Nick Piggin Cc: Heiko Carstens Cc: [2.6.28.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 22bfa7a47a0b..baa999e87cd2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1999,7 +1999,7 @@ gotten: * Don't let another task, with possibly unlocked vma, * keep the mlocked page. */ - if (vma->vm_flags & VM_LOCKED) { + if ((vma->vm_flags & VM_LOCKED) && old_page) { lock_page(old_page); /* for LRU manipulation */ clear_page_mlock(old_page); unlock_page(old_page); -- cgit v1.2.2