aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c7
-rw-r--r--mm/memory.c26
-rw-r--r--mm/memory_hotplug.c86
-rw-r--r--mm/mprotect.c11
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/pdflush.c4
-rw-r--r--mm/slub.c2
-rw-r--r--mm/vmstat.c2
8 files changed, 94 insertions, 47 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 239d36163bbe..1e6a7d34874f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1461,6 +1461,11 @@ page_not_uptodate:
1461 */ 1461 */
1462 ClearPageError(page); 1462 ClearPageError(page);
1463 error = mapping->a_ops->readpage(file, page); 1463 error = mapping->a_ops->readpage(file, page);
1464 if (!error) {
1465 wait_on_page_locked(page);
1466 if (!PageUptodate(page))
1467 error = -EIO;
1468 }
1464 page_cache_release(page); 1469 page_cache_release(page);
1465 1470
1466 if (!error || error == AOP_TRUNCATED_PAGE) 1471 if (!error || error == AOP_TRUNCATED_PAGE)
@@ -1655,7 +1660,7 @@ int should_remove_suid(struct dentry *dentry)
1655} 1660}
1656EXPORT_SYMBOL(should_remove_suid); 1661EXPORT_SYMBOL(should_remove_suid);
1657 1662
1658int __remove_suid(struct dentry *dentry, int kill) 1663static int __remove_suid(struct dentry *dentry, int kill)
1659{ 1664{
1660 struct iattr newattrs; 1665 struct iattr newattrs;
1661 1666
diff --git a/mm/memory.c b/mm/memory.c
index bbab1e37055e..fb5608a120ed 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -311,6 +311,21 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
311 if (!new) 311 if (!new)
312 return -ENOMEM; 312 return -ENOMEM;
313 313
314 /*
315 * Ensure all pte setup (eg. pte page lock and page clearing) are
316 * visible before the pte is made visible to other CPUs by being
317 * put into page tables.
318 *
319 * The other side of the story is the pointer chasing in the page
320 * table walking code (when walking the page table without locking;
321 * ie. most of the time). Fortunately, these data accesses consist
322 * of a chain of data-dependent loads, meaning most CPUs (alpha
323 * being the notable exception) will already guarantee loads are
324 * seen in-order. See the alpha page table accessors for the
325 * smp_read_barrier_depends() barriers in page table walking code.
326 */
327 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
328
314 spin_lock(&mm->page_table_lock); 329 spin_lock(&mm->page_table_lock);
315 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 330 if (!pmd_present(*pmd)) { /* Has another populated it ? */
316 mm->nr_ptes++; 331 mm->nr_ptes++;
@@ -329,6 +344,8 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
329 if (!new) 344 if (!new)
330 return -ENOMEM; 345 return -ENOMEM;
331 346
347 smp_wmb(); /* See comment in __pte_alloc */
348
332 spin_lock(&init_mm.page_table_lock); 349 spin_lock(&init_mm.page_table_lock);
333 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 350 if (!pmd_present(*pmd)) { /* Has another populated it ? */
334 pmd_populate_kernel(&init_mm, pmd, new); 351 pmd_populate_kernel(&init_mm, pmd, new);
@@ -969,7 +986,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
969 goto no_page_table; 986 goto no_page_table;
970 987
971 pmd = pmd_offset(pud, address); 988 pmd = pmd_offset(pud, address);
972 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 989 if (pmd_none(*pmd))
973 goto no_page_table; 990 goto no_page_table;
974 991
975 if (pmd_huge(*pmd)) { 992 if (pmd_huge(*pmd)) {
@@ -978,6 +995,9 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
978 goto out; 995 goto out;
979 } 996 }
980 997
998 if (unlikely(pmd_bad(*pmd)))
999 goto no_page_table;
1000
981 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 1001 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
982 if (!ptep) 1002 if (!ptep)
983 goto out; 1003 goto out;
@@ -2616,6 +2636,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2616 if (!new) 2636 if (!new)
2617 return -ENOMEM; 2637 return -ENOMEM;
2618 2638
2639 smp_wmb(); /* See comment in __pte_alloc */
2640
2619 spin_lock(&mm->page_table_lock); 2641 spin_lock(&mm->page_table_lock);
2620 if (pgd_present(*pgd)) /* Another has populated it */ 2642 if (pgd_present(*pgd)) /* Another has populated it */
2621 pud_free(mm, new); 2643 pud_free(mm, new);
@@ -2637,6 +2659,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2637 if (!new) 2659 if (!new)
2638 return -ENOMEM; 2660 return -ENOMEM;
2639 2661
2662 smp_wmb(); /* See comment in __pte_alloc */
2663
2640 spin_lock(&mm->page_table_lock); 2664 spin_lock(&mm->page_table_lock);
2641#ifndef __ARCH_HAS_4LEVEL_HACK 2665#ifndef __ARCH_HAS_4LEVEL_HACK
2642 if (pud_present(*pud)) /* Another has populated it */ 2666 if (pud_present(*pud)) /* Another has populated it */
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b17dca7249f8..833f854eabe5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -159,21 +159,58 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
159} 159}
160#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 160#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
161 161
162static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
163 unsigned long end_pfn)
164{
165 unsigned long old_zone_end_pfn;
166
167 zone_span_writelock(zone);
168
169 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
170 if (start_pfn < zone->zone_start_pfn)
171 zone->zone_start_pfn = start_pfn;
172
173 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
174 zone->zone_start_pfn;
175
176 zone_span_writeunlock(zone);
177}
178
179static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
180 unsigned long end_pfn)
181{
182 unsigned long old_pgdat_end_pfn =
183 pgdat->node_start_pfn + pgdat->node_spanned_pages;
184
185 if (start_pfn < pgdat->node_start_pfn)
186 pgdat->node_start_pfn = start_pfn;
187
188 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
189 pgdat->node_start_pfn;
190}
191
162static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) 192static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
163{ 193{
164 struct pglist_data *pgdat = zone->zone_pgdat; 194 struct pglist_data *pgdat = zone->zone_pgdat;
165 int nr_pages = PAGES_PER_SECTION; 195 int nr_pages = PAGES_PER_SECTION;
166 int nid = pgdat->node_id; 196 int nid = pgdat->node_id;
167 int zone_type; 197 int zone_type;
198 unsigned long flags;
168 199
169 zone_type = zone - pgdat->node_zones; 200 zone_type = zone - pgdat->node_zones;
170 if (!zone->wait_table) { 201 if (!zone->wait_table) {
171 int ret = 0; 202 int ret;
203
172 ret = init_currently_empty_zone(zone, phys_start_pfn, 204 ret = init_currently_empty_zone(zone, phys_start_pfn,
173 nr_pages, MEMMAP_HOTPLUG); 205 nr_pages, MEMMAP_HOTPLUG);
174 if (ret < 0) 206 if (ret)
175 return ret; 207 return ret;
176 } 208 }
209 pgdat_resize_lock(zone->zone_pgdat, &flags);
210 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
211 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
212 phys_start_pfn + nr_pages);
213 pgdat_resize_unlock(zone->zone_pgdat, &flags);
177 memmap_init_zone(nr_pages, nid, zone_type, 214 memmap_init_zone(nr_pages, nid, zone_type,
178 phys_start_pfn, MEMMAP_HOTPLUG); 215 phys_start_pfn, MEMMAP_HOTPLUG);
179 return 0; 216 return 0;
@@ -299,36 +336,6 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
299} 336}
300EXPORT_SYMBOL_GPL(__remove_pages); 337EXPORT_SYMBOL_GPL(__remove_pages);
301 338
302static void grow_zone_span(struct zone *zone,
303 unsigned long start_pfn, unsigned long end_pfn)
304{
305 unsigned long old_zone_end_pfn;
306
307 zone_span_writelock(zone);
308
309 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
310 if (start_pfn < zone->zone_start_pfn)
311 zone->zone_start_pfn = start_pfn;
312
313 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
314 zone->zone_start_pfn;
315
316 zone_span_writeunlock(zone);
317}
318
319static void grow_pgdat_span(struct pglist_data *pgdat,
320 unsigned long start_pfn, unsigned long end_pfn)
321{
322 unsigned long old_pgdat_end_pfn =
323 pgdat->node_start_pfn + pgdat->node_spanned_pages;
324
325 if (start_pfn < pgdat->node_start_pfn)
326 pgdat->node_start_pfn = start_pfn;
327
328 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
329 pgdat->node_start_pfn;
330}
331
332void online_page(struct page *page) 339void online_page(struct page *page)
333{ 340{
334 totalram_pages++; 341 totalram_pages++;
@@ -367,7 +374,6 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
367 374
368int online_pages(unsigned long pfn, unsigned long nr_pages) 375int online_pages(unsigned long pfn, unsigned long nr_pages)
369{ 376{
370 unsigned long flags;
371 unsigned long onlined_pages = 0; 377 unsigned long onlined_pages = 0;
372 struct zone *zone; 378 struct zone *zone;
373 int need_zonelists_rebuild = 0; 379 int need_zonelists_rebuild = 0;
@@ -395,11 +401,6 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
395 * memory_block->state_mutex. 401 * memory_block->state_mutex.
396 */ 402 */
397 zone = page_zone(pfn_to_page(pfn)); 403 zone = page_zone(pfn_to_page(pfn));
398 pgdat_resize_lock(zone->zone_pgdat, &flags);
399 grow_zone_span(zone, pfn, pfn + nr_pages);
400 grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
401 pgdat_resize_unlock(zone->zone_pgdat, &flags);
402
403 /* 404 /*
404 * If this zone is not populated, then it is not in zonelist. 405 * If this zone is not populated, then it is not in zonelist.
405 * This means the page allocator ignores this zone. 406 * This means the page allocator ignores this zone.
@@ -408,8 +409,15 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
408 if (!populated_zone(zone)) 409 if (!populated_zone(zone))
409 need_zonelists_rebuild = 1; 410 need_zonelists_rebuild = 1;
410 411
411 walk_memory_resource(pfn, nr_pages, &onlined_pages, 412 ret = walk_memory_resource(pfn, nr_pages, &onlined_pages,
412 online_pages_range); 413 online_pages_range);
414 if (ret) {
415 printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
416 nr_pages, pfn);
417 memory_notify(MEM_CANCEL_ONLINE, &arg);
418 return ret;
419 }
420
413 zone->present_pages += onlined_pages; 421 zone->present_pages += onlined_pages;
414 zone->zone_pgdat->node_present_pages += onlined_pages; 422 zone->zone_pgdat->node_present_pages += onlined_pages;
415 423
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 4de546899dc1..a5bf31c27375 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -26,6 +26,13 @@
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28 28
29#ifndef pgprot_modify
30static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
31{
32 return newprot;
33}
34#endif
35
29static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, 36static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
30 unsigned long addr, unsigned long end, pgprot_t newprot, 37 unsigned long addr, unsigned long end, pgprot_t newprot,
31 int dirty_accountable) 38 int dirty_accountable)
@@ -192,7 +199,9 @@ success:
192 * held in write mode. 199 * held in write mode.
193 */ 200 */
194 vma->vm_flags = newflags; 201 vma->vm_flags = newflags;
195 vma->vm_page_prot = vm_get_page_prot(newflags); 202 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
203 vm_get_page_prot(newflags));
204
196 if (vma_wants_writenotify(vma)) { 205 if (vma_wants_writenotify(vma)) {
197 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); 206 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
198 dirty_accountable = 1; 207 dirty_accountable = 1;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bdd5c432c426..63835579323a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2862,8 +2862,6 @@ __meminit int init_currently_empty_zone(struct zone *zone,
2862 2862
2863 zone->zone_start_pfn = zone_start_pfn; 2863 zone->zone_start_pfn = zone_start_pfn;
2864 2864
2865 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
2866
2867 zone_init_free_lists(zone); 2865 zone_init_free_lists(zone);
2868 2866
2869 return 0; 2867 return 0;
@@ -3433,6 +3431,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3433 ret = init_currently_empty_zone(zone, zone_start_pfn, 3431 ret = init_currently_empty_zone(zone, zone_start_pfn,
3434 size, MEMMAP_EARLY); 3432 size, MEMMAP_EARLY);
3435 BUG_ON(ret); 3433 BUG_ON(ret);
3434 memmap_init(size, nid, j, zone_start_pfn);
3436 zone_start_pfn += size; 3435 zone_start_pfn += size;
3437 } 3436 }
3438} 3437}
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 1c96cfc9e040..9d834aa4b979 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -207,7 +207,6 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
207 207
208 spin_lock_irqsave(&pdflush_lock, flags); 208 spin_lock_irqsave(&pdflush_lock, flags);
209 if (list_empty(&pdflush_list)) { 209 if (list_empty(&pdflush_list)) {
210 spin_unlock_irqrestore(&pdflush_lock, flags);
211 ret = -1; 210 ret = -1;
212 } else { 211 } else {
213 struct pdflush_work *pdf; 212 struct pdflush_work *pdf;
@@ -219,8 +218,9 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
219 pdf->fn = fn; 218 pdf->fn = fn;
220 pdf->arg0 = arg0; 219 pdf->arg0 = arg0;
221 wake_up_process(pdf->who); 220 wake_up_process(pdf->who);
222 spin_unlock_irqrestore(&pdflush_lock, flags);
223 } 221 }
222 spin_unlock_irqrestore(&pdflush_lock, flags);
223
224 return ret; 224 return ret;
225} 225}
226 226
diff --git a/mm/slub.c b/mm/slub.c
index d379b782fc83..a505a828ef41 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3762,7 +3762,7 @@ static int any_slab_objects(struct kmem_cache *s)
3762 if (!n) 3762 if (!n)
3763 continue; 3763 continue;
3764 3764
3765 if (atomic_read(&n->total_objects)) 3765 if (atomic_long_read(&n->total_objects))
3766 return 1; 3766 return 1;
3767 } 3767 }
3768 return 0; 3768 return 0;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 1a32130b958c..db9eabb2c5b3 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -41,7 +41,9 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
41*/ 41*/
42void all_vm_events(unsigned long *ret) 42void all_vm_events(unsigned long *ret)
43{ 43{
44 get_online_cpus();
44 sum_vm_events(ret, &cpu_online_map); 45 sum_vm_events(ret, &cpu_online_map);
46 put_online_cpus();
45} 47}
46EXPORT_SYMBOL_GPL(all_vm_events); 48EXPORT_SYMBOL_GPL(all_vm_events);
47 49