diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/madvise.c | 13 | ||||
-rw-r--r-- | mm/memory.c | 25 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 |
4 files changed, 24 insertions, 20 deletions
diff --git a/mm/madvise.c b/mm/madvise.c index 73180a22877e..c8c01a12fea4 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -83,9 +83,6 @@ static long madvise_willneed(struct vm_area_struct * vma, | |||
83 | { | 83 | { |
84 | struct file *file = vma->vm_file; | 84 | struct file *file = vma->vm_file; |
85 | 85 | ||
86 | if (!file) | ||
87 | return -EBADF; | ||
88 | |||
89 | if (file->f_mapping->a_ops->get_xip_page) { | 86 | if (file->f_mapping->a_ops->get_xip_page) { |
90 | /* no bad return value, but ignore advice */ | 87 | /* no bad return value, but ignore advice */ |
91 | return 0; | 88 | return 0; |
@@ -140,11 +137,16 @@ static long madvise_dontneed(struct vm_area_struct * vma, | |||
140 | return 0; | 137 | return 0; |
141 | } | 138 | } |
142 | 139 | ||
143 | static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, | 140 | static long |
144 | unsigned long start, unsigned long end, int behavior) | 141 | madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, |
142 | unsigned long start, unsigned long end, int behavior) | ||
145 | { | 143 | { |
144 | struct file *filp = vma->vm_file; | ||
146 | long error = -EBADF; | 145 | long error = -EBADF; |
147 | 146 | ||
147 | if (!filp) | ||
148 | goto out; | ||
149 | |||
148 | switch (behavior) { | 150 | switch (behavior) { |
149 | case MADV_NORMAL: | 151 | case MADV_NORMAL: |
150 | case MADV_SEQUENTIAL: | 152 | case MADV_SEQUENTIAL: |
@@ -165,6 +167,7 @@ static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev | |||
165 | break; | 167 | break; |
166 | } | 168 | } |
167 | 169 | ||
170 | out: | ||
168 | return error; | 171 | return error; |
169 | } | 172 | } |
170 | 173 | ||
diff --git a/mm/memory.c b/mm/memory.c index beabdefa6254..6fe77acbc1cd 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -776,8 +776,8 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | |||
776 | * Do a quick page-table lookup for a single page. | 776 | * Do a quick page-table lookup for a single page. |
777 | * mm->page_table_lock must be held. | 777 | * mm->page_table_lock must be held. |
778 | */ | 778 | */ |
779 | static struct page * | 779 | static struct page *__follow_page(struct mm_struct *mm, unsigned long address, |
780 | __follow_page(struct mm_struct *mm, unsigned long address, int read, int write) | 780 | int read, int write, int accessed) |
781 | { | 781 | { |
782 | pgd_t *pgd; | 782 | pgd_t *pgd; |
783 | pud_t *pud; | 783 | pud_t *pud; |
@@ -818,9 +818,11 @@ __follow_page(struct mm_struct *mm, unsigned long address, int read, int write) | |||
818 | pfn = pte_pfn(pte); | 818 | pfn = pte_pfn(pte); |
819 | if (pfn_valid(pfn)) { | 819 | if (pfn_valid(pfn)) { |
820 | page = pfn_to_page(pfn); | 820 | page = pfn_to_page(pfn); |
821 | if (write && !pte_dirty(pte) && !PageDirty(page)) | 821 | if (accessed) { |
822 | set_page_dirty(page); | 822 | if (write && !pte_dirty(pte) &&!PageDirty(page)) |
823 | mark_page_accessed(page); | 823 | set_page_dirty(page); |
824 | mark_page_accessed(page); | ||
825 | } | ||
824 | return page; | 826 | return page; |
825 | } | 827 | } |
826 | } | 828 | } |
@@ -829,16 +831,19 @@ out: | |||
829 | return NULL; | 831 | return NULL; |
830 | } | 832 | } |
831 | 833 | ||
832 | struct page * | 834 | inline struct page * |
833 | follow_page(struct mm_struct *mm, unsigned long address, int write) | 835 | follow_page(struct mm_struct *mm, unsigned long address, int write) |
834 | { | 836 | { |
835 | return __follow_page(mm, address, /*read*/0, write); | 837 | return __follow_page(mm, address, 0, write, 1); |
836 | } | 838 | } |
837 | 839 | ||
838 | int | 840 | /* |
839 | check_user_page_readable(struct mm_struct *mm, unsigned long address) | 841 | * check_user_page_readable() can be called frm niterrupt context by oprofile, |
842 | * so we need to avoid taking any non-irq-safe locks | ||
843 | */ | ||
844 | int check_user_page_readable(struct mm_struct *mm, unsigned long address) | ||
840 | { | 845 | { |
841 | return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL; | 846 | return __follow_page(mm, address, 1, 0, 0) != NULL; |
842 | } | 847 | } |
843 | EXPORT_SYMBOL(check_user_page_readable); | 848 | EXPORT_SYMBOL(check_user_page_readable); |
844 | 849 | ||
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cb41c31e7c87..1694845526be 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1138,11 +1138,11 @@ void mpol_free_shared_policy(struct shared_policy *p) | |||
1138 | while (next) { | 1138 | while (next) { |
1139 | n = rb_entry(next, struct sp_node, nd); | 1139 | n = rb_entry(next, struct sp_node, nd); |
1140 | next = rb_next(&n->nd); | 1140 | next = rb_next(&n->nd); |
1141 | rb_erase(&n->nd, &p->root); | ||
1141 | mpol_free(n->policy); | 1142 | mpol_free(n->policy); |
1142 | kmem_cache_free(sn_cache, n); | 1143 | kmem_cache_free(sn_cache, n); |
1143 | } | 1144 | } |
1144 | spin_unlock(&p->lock); | 1145 | spin_unlock(&p->lock); |
1145 | p->root = RB_ROOT; | ||
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | /* assumes fs == KERNEL_DS */ | 1148 | /* assumes fs == KERNEL_DS */ |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1d6ba6a4b594..42bccfb8464d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1861,7 +1861,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat, | |||
1861 | unsigned long *zones_size, unsigned long *zholes_size) | 1861 | unsigned long *zones_size, unsigned long *zholes_size) |
1862 | { | 1862 | { |
1863 | unsigned long i, j; | 1863 | unsigned long i, j; |
1864 | const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1); | ||
1865 | int cpu, nid = pgdat->node_id; | 1864 | int cpu, nid = pgdat->node_id; |
1866 | unsigned long zone_start_pfn = pgdat->node_start_pfn; | 1865 | unsigned long zone_start_pfn = pgdat->node_start_pfn; |
1867 | 1866 | ||
@@ -1934,9 +1933,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat, | |||
1934 | zone->zone_mem_map = pfn_to_page(zone_start_pfn); | 1933 | zone->zone_mem_map = pfn_to_page(zone_start_pfn); |
1935 | zone->zone_start_pfn = zone_start_pfn; | 1934 | zone->zone_start_pfn = zone_start_pfn; |
1936 | 1935 | ||
1937 | if ((zone_start_pfn) & (zone_required_alignment-1)) | ||
1938 | printk(KERN_CRIT "BUG: wrong zone alignment, it will crash\n"); | ||
1939 | |||
1940 | memmap_init(size, nid, j, zone_start_pfn); | 1936 | memmap_init(size, nid, j, zone_start_pfn); |
1941 | 1937 | ||
1942 | zonetable_add(zone, nid, j, zone_start_pfn, size); | 1938 | zonetable_add(zone, nid, j, zone_start_pfn, size); |