aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c10
-rw-r--r--mm/mempool.c9
-rw-r--r--mm/mmap.c17
-rw-r--r--mm/truncate.c11
-rw-r--r--mm/vmstat.c151
5 files changed, 158 insertions, 40 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e07e27e846a2..a9963ceddd65 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1176,7 +1176,15 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
1176 if (vma) { 1176 if (vma) {
1177 unsigned long off; 1177 unsigned long off;
1178 1178
1179 off = vma->vm_pgoff; 1179 /*
1180 * for small pages, there is no difference between
1181 * shift and PAGE_SHIFT, so the bit-shift is safe.
1182 * for huge pages, since vm_pgoff is in units of small
1183 * pages, we need to shift off the always 0 bits to get
1184 * a useful offset.
1185 */
1186 BUG_ON(shift < PAGE_SHIFT);
1187 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1180 off += (addr - vma->vm_start) >> shift; 1188 off += (addr - vma->vm_start) >> shift;
1181 return offset_il_node(pol, vma, off); 1189 return offset_il_node(pol, vma, off);
1182 } else 1190 } else
diff --git a/mm/mempool.c b/mm/mempool.c
index fe6e05289cc5..ccd8cb8cd41f 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -238,8 +238,13 @@ repeat_alloc:
238 init_wait(&wait); 238 init_wait(&wait);
239 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); 239 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
240 smp_mb(); 240 smp_mb();
241 if (!pool->curr_nr) 241 if (!pool->curr_nr) {
242 io_schedule(); 242 /*
243 * FIXME: this should be io_schedule(). The timeout is there
244 * as a workaround for some DM problems in 2.6.18.
245 */
246 io_schedule_timeout(5*HZ);
247 }
243 finish_wait(&pool->wait, &wait); 248 finish_wait(&pool->wait, &wait);
244 249
245 goto repeat_alloc; 250 goto repeat_alloc;
diff --git a/mm/mmap.c b/mm/mmap.c
index c1868ecdbc5f..e66a0b524aff 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -30,6 +30,10 @@
30#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
31#include <asm/tlb.h> 31#include <asm/tlb.h>
32 32
33#ifndef arch_mmap_check
34#define arch_mmap_check(addr, len, flags) (0)
35#endif
36
33static void unmap_region(struct mm_struct *mm, 37static void unmap_region(struct mm_struct *mm,
34 struct vm_area_struct *vma, struct vm_area_struct *prev, 38 struct vm_area_struct *vma, struct vm_area_struct *prev,
35 unsigned long start, unsigned long end); 39 unsigned long start, unsigned long end);
@@ -913,6 +917,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
913 if (!len) 917 if (!len)
914 return -EINVAL; 918 return -EINVAL;
915 919
920 error = arch_mmap_check(addr, len, flags);
921 if (error)
922 return error;
923
916 /* Careful about overflows.. */ 924 /* Careful about overflows.. */
917 len = PAGE_ALIGN(len); 925 len = PAGE_ALIGN(len);
918 if (!len || len > TASK_SIZE) 926 if (!len || len > TASK_SIZE)
@@ -1859,6 +1867,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1859 unsigned long flags; 1867 unsigned long flags;
1860 struct rb_node ** rb_link, * rb_parent; 1868 struct rb_node ** rb_link, * rb_parent;
1861 pgoff_t pgoff = addr >> PAGE_SHIFT; 1869 pgoff_t pgoff = addr >> PAGE_SHIFT;
1870 int error;
1862 1871
1863 len = PAGE_ALIGN(len); 1872 len = PAGE_ALIGN(len);
1864 if (!len) 1873 if (!len)
@@ -1867,6 +1876,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1867 if ((addr + len) > TASK_SIZE || (addr + len) < addr) 1876 if ((addr + len) > TASK_SIZE || (addr + len) < addr)
1868 return -EINVAL; 1877 return -EINVAL;
1869 1878
1879 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1880
1881 error = arch_mmap_check(addr, len, flags);
1882 if (error)
1883 return error;
1884
1870 /* 1885 /*
1871 * mlock MCL_FUTURE? 1886 * mlock MCL_FUTURE?
1872 */ 1887 */
@@ -1907,8 +1922,6 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1907 if (security_vm_enough_memory(len >> PAGE_SHIFT)) 1922 if (security_vm_enough_memory(len >> PAGE_SHIFT))
1908 return -ENOMEM; 1923 return -ENOMEM;
1909 1924
1910 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1911
1912 /* Can we just expand an old private anonymous mapping? */ 1925 /* Can we just expand an old private anonymous mapping? */
1913 if (vma_merge(mm, prev, addr, addr + len, flags, 1926 if (vma_merge(mm, prev, addr, addr + len, flags,
1914 NULL, NULL, pgoff, NULL)) 1927 NULL, NULL, pgoff, NULL))
diff --git a/mm/truncate.c b/mm/truncate.c
index cf1b015df4a7..c6ab55ec6883 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -68,10 +68,10 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
68 return 0; 68 return 0;
69 69
70 write_lock_irq(&mapping->tree_lock); 70 write_lock_irq(&mapping->tree_lock);
71 if (PageDirty(page)) { 71 if (PageDirty(page))
72 write_unlock_irq(&mapping->tree_lock); 72 goto failed;
73 return 0; 73 if (page_count(page) != 2) /* caller's ref + pagecache ref */
74 } 74 goto failed;
75 75
76 BUG_ON(PagePrivate(page)); 76 BUG_ON(PagePrivate(page));
77 __remove_from_page_cache(page); 77 __remove_from_page_cache(page);
@@ -79,6 +79,9 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
79 ClearPageUptodate(page); 79 ClearPageUptodate(page);
80 page_cache_release(page); /* pagecache ref */ 80 page_cache_release(page); /* pagecache ref */
81 return 1; 81 return 1;
82failed:
83 write_unlock_irq(&mapping->tree_lock);
84 return 0;
82} 85}
83 86
84/** 87/**
diff --git a/mm/vmstat.c b/mm/vmstat.c
index dfdf24133901..c1b5f4106b38 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -12,6 +12,7 @@
12#include <linux/config.h> 12#include <linux/config.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/cpu.h>
15 16
16void __get_zone_counts(unsigned long *active, unsigned long *inactive, 17void __get_zone_counts(unsigned long *active, unsigned long *inactive,
17 unsigned long *free, struct pglist_data *pgdat) 18 unsigned long *free, struct pglist_data *pgdat)
@@ -114,17 +115,72 @@ EXPORT_SYMBOL(vm_stat);
114 115
115#ifdef CONFIG_SMP 116#ifdef CONFIG_SMP
116 117
117#define STAT_THRESHOLD 32 118static int calculate_threshold(struct zone *zone)
119{
120 int threshold;
121 int mem; /* memory in 128 MB units */
122
123 /*
124 * The threshold scales with the number of processors and the amount
125 * of memory per zone. More memory means that we can defer updates for
126 * longer, more processors could lead to more contention.
127 * fls() is used to have a cheap way of logarithmic scaling.
128 *
129 * Some sample thresholds:
130 *
131 * Threshold Processors (fls) Zonesize fls(mem+1)
132 * ------------------------------------------------------------------
133 * 8 1 1 0.9-1 GB 4
134 * 16 2 2 0.9-1 GB 4
135 * 20 2 2 1-2 GB 5
136 * 24 2 2 2-4 GB 6
137 * 28 2 2 4-8 GB 7
138 * 32 2 2 8-16 GB 8
139 * 4 2 2 <128M 1
140 * 30 4 3 2-4 GB 5
141 * 48 4 3 8-16 GB 8
142 * 32 8 4 1-2 GB 4
143 * 32 8 4 0.9-1GB 4
144 * 10 16 5 <128M 1
145 * 40 16 5 900M 4
146 * 70 64 7 2-4 GB 5
147 * 84 64 7 4-8 GB 6
148 * 108 512 9 4-8 GB 6
149 * 125 1024 10 8-16 GB 8
150 * 125 1024 10 16-32 GB 9
151 */
152
153 mem = zone->present_pages >> (27 - PAGE_SHIFT);
154
155 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
156
157 /*
158 * Maximum threshold is 125
159 */
160 threshold = min(125, threshold);
161
162 return threshold;
163}
118 164
119/* 165/*
120 * Determine pointer to currently valid differential byte given a zone and 166 * Refresh the thresholds for each zone.
121 * the item number.
122 *
123 * Preemption must be off
124 */ 167 */
125static inline s8 *diff_pointer(struct zone *zone, enum zone_stat_item item) 168static void refresh_zone_stat_thresholds(void)
126{ 169{
127 return &zone_pcp(zone, smp_processor_id())->vm_stat_diff[item]; 170 struct zone *zone;
171 int cpu;
172 int threshold;
173
174 for_each_zone(zone) {
175
176 if (!zone->present_pages)
177 continue;
178
179 threshold = calculate_threshold(zone);
180
181 for_each_online_cpu(cpu)
182 zone_pcp(zone, cpu)->stat_threshold = threshold;
183 }
128} 184}
129 185
130/* 186/*
@@ -133,17 +189,16 @@ static inline s8 *diff_pointer(struct zone *zone, enum zone_stat_item item)
133void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 189void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
134 int delta) 190 int delta)
135{ 191{
136 s8 *p; 192 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
193 s8 *p = pcp->vm_stat_diff + item;
137 long x; 194 long x;
138 195
139 p = diff_pointer(zone, item);
140 x = delta + *p; 196 x = delta + *p;
141 197
142 if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) { 198 if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
143 zone_page_state_add(x, zone, item); 199 zone_page_state_add(x, zone, item);
144 x = 0; 200 x = 0;
145 } 201 }
146
147 *p = x; 202 *p = x;
148} 203}
149EXPORT_SYMBOL(__mod_zone_page_state); 204EXPORT_SYMBOL(__mod_zone_page_state);
@@ -172,10 +227,12 @@ EXPORT_SYMBOL(mod_zone_page_state);
172 * No overflow check is necessary and therefore the differential can be 227 * No overflow check is necessary and therefore the differential can be
173 * incremented or decremented in place which may allow the compilers to 228 * incremented or decremented in place which may allow the compilers to
174 * generate better code. 229 * generate better code.
175 *
176 * The increment or decrement is known and therefore one boundary check can 230 * The increment or decrement is known and therefore one boundary check can
177 * be omitted. 231 * be omitted.
178 * 232 *
233 * NOTE: These functions are very performance sensitive. Change only
234 * with care.
235 *
179 * Some processors have inc/dec instructions that are atomic vs an interrupt. 236 * Some processors have inc/dec instructions that are atomic vs an interrupt.
180 * However, the code must first determine the differential location in a zone 237 * However, the code must first determine the differential location in a zone
181 * based on the processor number and then inc/dec the counter. There is no 238 * based on the processor number and then inc/dec the counter. There is no
@@ -185,13 +242,16 @@ EXPORT_SYMBOL(mod_zone_page_state);
185 */ 242 */
186static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 243static void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
187{ 244{
188 s8 *p = diff_pointer(zone, item); 245 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
246 s8 *p = pcp->vm_stat_diff + item;
189 247
190 (*p)++; 248 (*p)++;
191 249
192 if (unlikely(*p > STAT_THRESHOLD)) { 250 if (unlikely(*p > pcp->stat_threshold)) {
193 zone_page_state_add(*p, zone, item); 251 int overstep = pcp->stat_threshold / 2;
194 *p = 0; 252
253 zone_page_state_add(*p + overstep, zone, item);
254 *p = -overstep;
195 } 255 }
196} 256}
197 257
@@ -204,13 +264,16 @@ EXPORT_SYMBOL(__inc_zone_page_state);
204void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 264void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
205{ 265{
206 struct zone *zone = page_zone(page); 266 struct zone *zone = page_zone(page);
207 s8 *p = diff_pointer(zone, item); 267 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
268 s8 *p = pcp->vm_stat_diff + item;
208 269
209 (*p)--; 270 (*p)--;
210 271
211 if (unlikely(*p < -STAT_THRESHOLD)) { 272 if (unlikely(*p < - pcp->stat_threshold)) {
212 zone_page_state_add(*p, zone, item); 273 int overstep = pcp->stat_threshold / 2;
213 *p = 0; 274
275 zone_page_state_add(*p - overstep, zone, item);
276 *p = overstep;
214 } 277 }
215} 278}
216EXPORT_SYMBOL(__dec_zone_page_state); 279EXPORT_SYMBOL(__dec_zone_page_state);
@@ -239,19 +302,9 @@ EXPORT_SYMBOL(inc_zone_page_state);
239void dec_zone_page_state(struct page *page, enum zone_stat_item item) 302void dec_zone_page_state(struct page *page, enum zone_stat_item item)
240{ 303{
241 unsigned long flags; 304 unsigned long flags;
242 struct zone *zone;
243 s8 *p;
244 305
245 zone = page_zone(page);
246 local_irq_save(flags); 306 local_irq_save(flags);
247 p = diff_pointer(zone, item); 307 __dec_zone_page_state(page, item);
248
249 (*p)--;
250
251 if (unlikely(*p < -STAT_THRESHOLD)) {
252 zone_page_state_add(*p, zone, item);
253 *p = 0;
254 }
255 local_irq_restore(flags); 308 local_irq_restore(flags);
256} 309}
257EXPORT_SYMBOL(dec_zone_page_state); 310EXPORT_SYMBOL(dec_zone_page_state);
@@ -525,6 +578,10 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
525 pageset->pcp[j].high, 578 pageset->pcp[j].high,
526 pageset->pcp[j].batch); 579 pageset->pcp[j].batch);
527 } 580 }
581#ifdef CONFIG_SMP
582 seq_printf(m, "\n vm stats threshold: %d",
583 pageset->stat_threshold);
584#endif
528 } 585 }
529 seq_printf(m, 586 seq_printf(m,
530 "\n all_unreclaimable: %u" 587 "\n all_unreclaimable: %u"
@@ -613,3 +670,35 @@ struct seq_operations vmstat_op = {
613 670
614#endif /* CONFIG_PROC_FS */ 671#endif /* CONFIG_PROC_FS */
615 672
673#ifdef CONFIG_SMP
674/*
675 * Use the cpu notifier to insure that the thresholds are recalculated
676 * when necessary.
677 */
678static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
679 unsigned long action,
680 void *hcpu)
681{
682 switch (action) {
683 case CPU_UP_PREPARE:
684 case CPU_UP_CANCELED:
685 case CPU_DEAD:
686 refresh_zone_stat_thresholds();
687 break;
688 default:
689 break;
690 }
691 return NOTIFY_OK;
692}
693
694static struct notifier_block __cpuinitdata vmstat_notifier =
695 { &vmstat_cpuup_callback, NULL, 0 };
696
697int __init setup_vmstat(void)
698{
699 refresh_zone_stat_thresholds();
700 register_cpu_notifier(&vmstat_notifier);
701 return 0;
702}
703module_init(setup_vmstat)
704#endif