diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-04-30 18:08:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-05-02 18:36:10 -0400 |
commit | 00a62ce91e554198ef28234c91c36f850f5a3bc9 (patch) | |
tree | 367ef134219deef91903c3fa0eb108c13658f2c7 /mm/mmap.c | |
parent | 0763ed2355198cdef2f6a2098e9d52eb1fe4365d (diff) |
mm: fix Committed_AS underflow on large NR_CPUS environment
The Committed_AS field can underflow in certain situations:
> # while true; do cat /proc/meminfo | grep _AS; sleep 1; done | uniq -c
> 1 Committed_AS: 18446744073709323392 kB
> 11 Committed_AS: 18446744073709455488 kB
> 6 Committed_AS: 35136 kB
> 5 Committed_AS: 18446744073709454400 kB
> 7 Committed_AS: 35904 kB
> 3 Committed_AS: 18446744073709453248 kB
> 2 Committed_AS: 34752 kB
> 9 Committed_AS: 18446744073709453248 kB
> 8 Committed_AS: 34752 kB
> 3 Committed_AS: 18446744073709320960 kB
> 7 Committed_AS: 18446744073709454080 kB
> 3 Committed_AS: 18446744073709320960 kB
> 5 Committed_AS: 18446744073709454080 kB
> 6 Committed_AS: 18446744073709320960 kB
Because NR_CPUS can be greater than 1000 and meminfo_proc_show() does
not check for underflow.
But NR_CPUS proportional isn't good calculation. In general,
possibility of lock contention is proportional to the number of online
cpus, not theorical maximum cpus (NR_CPUS).
The current kernel has generic percpu-counter stuff. using it is right
way. it makes code simplify and percpu_counter_read_positive() don't
make underflow issue.
Reported-by: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Eric B Munson <ebmunson@us.ibm.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: <stable@kernel.org> [All kernel versions]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 12 |
1 files changed, 6 insertions, 6 deletions
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(vm_get_page_prot); | |||
85 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 85 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
86 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 86 | int sysctl_overcommit_ratio = 50; /* default is 50% */ |
87 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | 87 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; |
88 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); | 88 | struct percpu_counter vm_committed_as; |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * Check that a process has enough memory to allocate a new virtual | 91 | * Check that a process has enough memory to allocate a new virtual |
@@ -179,11 +179,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |||
179 | if (mm) | 179 | if (mm) |
180 | allowed -= mm->total_vm / 32; | 180 | allowed -= mm->total_vm / 32; |
181 | 181 | ||
182 | /* | 182 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) |
183 | * cast `allowed' as a signed long because vm_committed_space | ||
184 | * sometimes has a negative value | ||
185 | */ | ||
186 | if (atomic_long_read(&vm_committed_space) < (long)allowed) | ||
187 | return 0; | 183 | return 0; |
188 | error: | 184 | error: |
189 | vm_unacct_memory(pages); | 185 | vm_unacct_memory(pages); |
@@ -2481,4 +2477,8 @@ void mm_drop_all_locks(struct mm_struct *mm) | |||
2481 | */ | 2477 | */ |
2482 | void __init mmap_init(void) | 2478 | void __init mmap_init(void) |
2483 | { | 2479 | { |
2480 | int ret; | ||
2481 | |||
2482 | ret = percpu_counter_init(&vm_committed_as, 0); | ||
2483 | VM_BUG_ON(ret); | ||
2484 | } | 2484 | } |