aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap_xip.c4
-rw-r--r--mm/memory.c11
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/mmap.c7
-rw-r--r--mm/mremap.c1
-rw-r--r--mm/page-writeback.c41
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/truncate.c22
8 files changed, 55 insertions, 38 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 45b3553865cf..9dd9fbb75139 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -183,7 +183,7 @@ __xip_unmap (struct address_space * mapping,
183 address = vma->vm_start + 183 address = vma->vm_start +
184 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 184 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
185 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 185 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
186 page = ZERO_PAGE(address); 186 page = ZERO_PAGE(0);
187 pte = page_check_address(page, mm, address, &ptl); 187 pte = page_check_address(page, mm, address, &ptl);
188 if (pte) { 188 if (pte) {
189 /* Nuke the page table entry. */ 189 /* Nuke the page table entry. */
@@ -246,7 +246,7 @@ xip_file_nopage(struct vm_area_struct * area,
246 __xip_unmap(mapping, pgoff); 246 __xip_unmap(mapping, pgoff);
247 } else { 247 } else {
248 /* not shared and writable, use ZERO_PAGE() */ 248 /* not shared and writable, use ZERO_PAGE() */
249 page = ZERO_PAGE(address); 249 page = ZERO_PAGE(0);
250 } 250 }
251 251
252out: 252out:
diff --git a/mm/memory.c b/mm/memory.c
index af227d26e104..ef09f0acb1d8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2606,8 +2606,15 @@ static int __init gate_vma_init(void)
2606 gate_vma.vm_mm = NULL; 2606 gate_vma.vm_mm = NULL;
2607 gate_vma.vm_start = FIXADDR_USER_START; 2607 gate_vma.vm_start = FIXADDR_USER_START;
2608 gate_vma.vm_end = FIXADDR_USER_END; 2608 gate_vma.vm_end = FIXADDR_USER_END;
2609 gate_vma.vm_page_prot = PAGE_READONLY; 2609 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
2610 gate_vma.vm_flags = 0; 2610 gate_vma.vm_page_prot = __P101;
2611 /*
2612 * Make sure the vDSO gets into every core dump.
2613 * Dumping its contents makes post-mortem fully interpretable later
2614 * without matching up the same kernel and hardware config to see
2615 * what PC values meant.
2616 */
2617 gate_vma.vm_flags |= VM_ALWAYSDUMP;
2611 return 0; 2618 return 0;
2612} 2619}
2613__initcall(gate_vma_init); 2620__initcall(gate_vma_init);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index da9463946556..c2aec0e1090d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -884,6 +884,10 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
884 err = get_nodes(&nodes, nmask, maxnode); 884 err = get_nodes(&nodes, nmask, maxnode);
885 if (err) 885 if (err)
886 return err; 886 return err;
887#ifdef CONFIG_CPUSETS
888 /* Restrict the nodes to the allowed nodes in the cpuset */
889 nodes_and(nodes, nodes, current->mems_allowed);
890#endif
887 return do_mbind(start, len, mode, &nodes, flags); 891 return do_mbind(start, len, mode, &nodes, flags);
888} 892}
889 893
diff --git a/mm/mmap.c b/mm/mmap.c
index 9717337293c3..cc3a20819457 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1477,6 +1477,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
1477{ 1477{
1478 struct mm_struct *mm = vma->vm_mm; 1478 struct mm_struct *mm = vma->vm_mm;
1479 struct rlimit *rlim = current->signal->rlim; 1479 struct rlimit *rlim = current->signal->rlim;
1480 unsigned long new_start;
1480 1481
1481 /* address space limit tests */ 1482 /* address space limit tests */
1482 if (!may_expand_vm(mm, grow)) 1483 if (!may_expand_vm(mm, grow))
@@ -1496,6 +1497,12 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
1496 return -ENOMEM; 1497 return -ENOMEM;
1497 } 1498 }
1498 1499
1500 /* Check to ensure the stack will not grow into a hugetlb-only region */
1501 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1502 vma->vm_end - size;
1503 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1504 return -EFAULT;
1505
1499 /* 1506 /*
1500 * Overcommit.. This must be the final test, as it will 1507 * Overcommit.. This must be the final test, as it will
1501 * update security statistics. 1508 * update security statistics.
diff --git a/mm/mremap.c b/mm/mremap.c
index 9c769fa29f32..5d4bd4f95b8e 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -105,7 +105,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
105 if (pte_none(*old_pte)) 105 if (pte_none(*old_pte))
106 continue; 106 continue;
107 pte = ptep_clear_flush(vma, old_addr, old_pte); 107 pte = ptep_clear_flush(vma, old_addr, old_pte);
108 /* ZERO_PAGE can be dependant on virtual addr */
109 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 108 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
110 set_pte_at(mm, new_addr, new_pte, pte); 109 set_pte_at(mm, new_addr, new_pte, pte);
111 } 110 }
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 1d2fc89ca56d..be0efbde4994 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -133,11 +133,9 @@ get_dirty_limits(long *pbackground, long *pdirty,
133 133
134#ifdef CONFIG_HIGHMEM 134#ifdef CONFIG_HIGHMEM
135 /* 135 /*
136 * If this mapping can only allocate from low memory, 136 * We always exclude high memory from our count.
137 * we exclude high memory from our count.
138 */ 137 */
139 if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM)) 138 available_memory -= totalhigh_pages;
140 available_memory -= totalhigh_pages;
141#endif 139#endif
142 140
143 141
@@ -526,28 +524,25 @@ static struct notifier_block __cpuinitdata ratelimit_nb = {
526}; 524};
527 525
528/* 526/*
529 * If the machine has a large highmem:lowmem ratio then scale back the default 527 * Called early on to tune the page writeback dirty limits.
530 * dirty memory thresholds: allowing too much dirty highmem pins an excessive 528 *
531 * number of buffer_heads. 529 * We used to scale dirty pages according to how total memory
530 * related to pages that could be allocated for buffers (by
531 * comparing nr_free_buffer_pages() to vm_total_pages.
532 *
533 * However, that was when we used "dirty_ratio" to scale with
534 * all memory, and we don't do that any more. "dirty_ratio"
535 * is now applied to total non-HIGHPAGE memory (by subtracting
536 * totalhigh_pages from vm_total_pages), and as such we can't
537 * get into the old insane situation any more where we had
538 * large amounts of dirty pages compared to a small amount of
539 * non-HIGHMEM memory.
540 *
541 * But we might still want to scale the dirty_ratio by how
542 * much memory the box has..
532 */ 543 */
533void __init page_writeback_init(void) 544void __init page_writeback_init(void)
534{ 545{
535 long buffer_pages = nr_free_buffer_pages();
536 long correction;
537
538 correction = (100 * 4 * buffer_pages) / vm_total_pages;
539
540 if (correction < 100) {
541 dirty_background_ratio *= correction;
542 dirty_background_ratio /= 100;
543 vm_dirty_ratio *= correction;
544 vm_dirty_ratio /= 100;
545
546 if (dirty_background_ratio <= 0)
547 dirty_background_ratio = 1;
548 if (vm_dirty_ratio <= 0)
549 vm_dirty_ratio = 1;
550 }
551 mod_timer(&wb_timer, jiffies + dirty_writeback_interval); 546 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
552 writeback_set_ratelimit(); 547 writeback_set_ratelimit();
553 register_cpu_notifier(&ratelimit_nb); 548 register_cpu_notifier(&ratelimit_nb);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc5b5442e942..2c606cc922a5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -989,8 +989,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
989 int classzone_idx, int alloc_flags) 989 int classzone_idx, int alloc_flags)
990{ 990{
991 /* free_pages my go negative - that's OK */ 991 /* free_pages my go negative - that's OK */
992 unsigned long min = mark; 992 long min = mark, free_pages = z->free_pages - (1 << order) + 1;
993 long free_pages = z->free_pages - (1 << order) + 1;
994 int o; 993 int o;
995 994
996 if (alloc_flags & ALLOC_HIGH) 995 if (alloc_flags & ALLOC_HIGH)
diff --git a/mm/truncate.c b/mm/truncate.c
index 6c79ca4a1ca7..5df947de7654 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -51,15 +51,22 @@ static inline void truncate_partial_page(struct page *page, unsigned partial)
51 do_invalidatepage(page, partial); 51 do_invalidatepage(page, partial);
52} 52}
53 53
54/*
55 * This cancels just the dirty bit on the kernel page itself, it
56 * does NOT actually remove dirty bits on any mmap's that may be
57 * around. It also leaves the page tagged dirty, so any sync
58 * activity will still find it on the dirty lists, and in particular,
59 * clear_page_dirty_for_io() will still look at the dirty bits in
60 * the VM.
61 *
62 * Doing this should *normally* only ever be done when a page
63 * is truncated, and is not actually mapped anywhere at all. However,
64 * fs/buffer.c does this when it notices that somebody has cleaned
65 * out all the buffers on a page without actually doing it through
66 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
67 */
54void cancel_dirty_page(struct page *page, unsigned int account_size) 68void cancel_dirty_page(struct page *page, unsigned int account_size)
55{ 69{
56 /* If we're cancelling the page, it had better not be mapped any more */
57 if (page_mapped(page)) {
58 static unsigned int warncount;
59
60 WARN_ON(++warncount < 5);
61 }
62
63 if (TestClearPageDirty(page)) { 70 if (TestClearPageDirty(page)) {
64 struct address_space *mapping = page->mapping; 71 struct address_space *mapping = page->mapping;
65 if (mapping && mapping_cap_account_dirty(mapping)) { 72 if (mapping && mapping_cap_account_dirty(mapping)) {
@@ -422,7 +429,6 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
422 pagevec_release(&pvec); 429 pagevec_release(&pvec);
423 cond_resched(); 430 cond_resched();
424 } 431 }
425 WARN_ON_ONCE(ret);
426 return ret; 432 return ret;
427} 433}
428EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 434EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);