diff options
author | Alan Cox <alan@redhat.com> | 2008-05-23 16:04:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-24 12:56:09 -0400 |
commit | 80119ef5c8153e0a6cc5edf00c083dc98a9bd348 (patch) | |
tree | a2af11a3991b1bebe9d764ab6d4d28891e86eda2 | |
parent | 6c7c6afbb8c0e60d32a563cae7c6889211e9d9d8 (diff) |
mm: fix atomic_t overflow in vm
The atomic_t type is 32bit but a 64bit system can have more than 2^32
pages of virtual address space available. Without this we overflow on
ludicrously large mappings
Signed-off-by: Alan Cox <alan@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/proc/proc_misc.c | 2 | ||||
-rw-r--r-- | include/linux/mman.h | 4 | ||||
-rw-r--r-- | mm/mmap.c | 4 | ||||
-rw-r--r-- | mm/nommu.c | 4 | ||||
-rw-r--r-- | mm/swap.c | 4 |
5 files changed, 9 insertions, 9 deletions
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 74a323d2b850..32dc14cd8900 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -139,7 +139,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
139 | #define K(x) ((x) << (PAGE_SHIFT - 10)) | 139 | #define K(x) ((x) << (PAGE_SHIFT - 10)) |
140 | si_meminfo(&i); | 140 | si_meminfo(&i); |
141 | si_swapinfo(&i); | 141 | si_swapinfo(&i); |
142 | committed = atomic_read(&vm_committed_space); | 142 | committed = atomic_long_read(&vm_committed_space); |
143 | allowed = ((totalram_pages - hugetlb_total_pages()) | 143 | allowed = ((totalram_pages - hugetlb_total_pages()) |
144 | * sysctl_overcommit_ratio / 100) + total_swap_pages; | 144 | * sysctl_overcommit_ratio / 100) + total_swap_pages; |
145 | 145 | ||
diff --git a/include/linux/mman.h b/include/linux/mman.h index 87920a0852a3..dab8892e6ff1 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h | |||
@@ -17,14 +17,14 @@ | |||
17 | 17 | ||
18 | extern int sysctl_overcommit_memory; | 18 | extern int sysctl_overcommit_memory; |
19 | extern int sysctl_overcommit_ratio; | 19 | extern int sysctl_overcommit_ratio; |
20 | extern atomic_t vm_committed_space; | 20 | extern atomic_long_t vm_committed_space; |
21 | 21 | ||
22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
23 | extern void vm_acct_memory(long pages); | 23 | extern void vm_acct_memory(long pages); |
24 | #else | 24 | #else |
25 | static inline void vm_acct_memory(long pages) | 25 | static inline void vm_acct_memory(long pages) |
26 | { | 26 | { |
27 | atomic_add(pages, &vm_committed_space); | 27 | atomic_long_add(pages, &vm_committed_space); |
28 | } | 28 | } |
29 | #endif | 29 | #endif |
30 | 30 | ||
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(vm_get_page_prot); | |||
80 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 80 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
81 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 81 | int sysctl_overcommit_ratio = 50; /* default is 50% */ |
82 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | 82 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; |
83 | atomic_t vm_committed_space = ATOMIC_INIT(0); | 83 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * Check that a process has enough memory to allocate a new virtual | 86 | * Check that a process has enough memory to allocate a new virtual |
@@ -177,7 +177,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |||
177 | * cast `allowed' as a signed long because vm_committed_space | 177 | * cast `allowed' as a signed long because vm_committed_space |
178 | * sometimes has a negative value | 178 | * sometimes has a negative value |
179 | */ | 179 | */ |
180 | if (atomic_read(&vm_committed_space) < (long)allowed) | 180 | if (atomic_long_read(&vm_committed_space) < (long)allowed) |
181 | return 0; | 181 | return 0; |
182 | error: | 182 | error: |
183 | vm_unacct_memory(pages); | 183 | vm_unacct_memory(pages); |
diff --git a/mm/nommu.c b/mm/nommu.c index ef8c62cec697..dca93fcb8b7a 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -39,7 +39,7 @@ struct page *mem_map; | |||
39 | unsigned long max_mapnr; | 39 | unsigned long max_mapnr; |
40 | unsigned long num_physpages; | 40 | unsigned long num_physpages; |
41 | unsigned long askedalloc, realalloc; | 41 | unsigned long askedalloc, realalloc; |
42 | atomic_t vm_committed_space = ATOMIC_INIT(0); | 42 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); |
43 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 43 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
44 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 44 | int sysctl_overcommit_ratio = 50; /* default is 50% */ |
45 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | 45 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; |
@@ -1410,7 +1410,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |||
1410 | * cast `allowed' as a signed long because vm_committed_space | 1410 | * cast `allowed' as a signed long because vm_committed_space |
1411 | * sometimes has a negative value | 1411 | * sometimes has a negative value |
1412 | */ | 1412 | */ |
1413 | if (atomic_read(&vm_committed_space) < (long)allowed) | 1413 | if (atomic_long_read(&vm_committed_space) < (long)allowed) |
1414 | return 0; | 1414 | return 0; |
1415 | error: | 1415 | error: |
1416 | vm_unacct_memory(pages); | 1416 | vm_unacct_memory(pages); |
@@ -503,7 +503,7 @@ void vm_acct_memory(long pages) | |||
503 | local = &__get_cpu_var(committed_space); | 503 | local = &__get_cpu_var(committed_space); |
504 | *local += pages; | 504 | *local += pages; |
505 | if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { | 505 | if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { |
506 | atomic_add(*local, &vm_committed_space); | 506 | atomic_long_add(*local, &vm_committed_space); |
507 | *local = 0; | 507 | *local = 0; |
508 | } | 508 | } |
509 | preempt_enable(); | 509 | preempt_enable(); |
@@ -520,7 +520,7 @@ static int cpu_swap_callback(struct notifier_block *nfb, | |||
520 | 520 | ||
521 | committed = &per_cpu(committed_space, (long)hcpu); | 521 | committed = &per_cpu(committed_space, (long)hcpu); |
522 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | 522 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
523 | atomic_add(*committed, &vm_committed_space); | 523 | atomic_long_add(*committed, &vm_committed_space); |
524 | *committed = 0; | 524 | *committed = 0; |
525 | drain_cpu_pagevecs((long)hcpu); | 525 | drain_cpu_pagevecs((long)hcpu); |
526 | } | 526 | } |