aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap_xip.c23
-rw-r--r--mm/madvise.c13
-rw-r--r--mm/memory.c25
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c4
5 files changed, 32 insertions, 35 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 4553b2c5aab4..8c199f537732 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -68,13 +68,12 @@ do_xip_mapping_read(struct address_space *mapping,
68 if (unlikely(IS_ERR(page))) { 68 if (unlikely(IS_ERR(page))) {
69 if (PTR_ERR(page) == -ENODATA) { 69 if (PTR_ERR(page) == -ENODATA) {
70 /* sparse */ 70 /* sparse */
71 page = virt_to_page(empty_zero_page); 71 page = ZERO_PAGE(0);
72 } else { 72 } else {
73 desc->error = PTR_ERR(page); 73 desc->error = PTR_ERR(page);
74 goto out; 74 goto out;
75 } 75 }
76 } else 76 }
77 BUG_ON(!PageUptodate(page));
78 77
79 /* If users can be writing to this page using arbitrary 78 /* If users can be writing to this page using arbitrary
80 * virtual addresses, take care about potential aliasing 79 * virtual addresses, take care about potential aliasing
@@ -84,8 +83,7 @@ do_xip_mapping_read(struct address_space *mapping,
84 flush_dcache_page(page); 83 flush_dcache_page(page);
85 84
86 /* 85 /*
87 * Ok, we have the page, and it's up-to-date, so 86 * Ok, we have the page, so now we can copy it to user space...
88 * now we can copy it to user space...
89 * 87 *
90 * The actor routine returns how many bytes were actually used.. 88 * The actor routine returns how many bytes were actually used..
91 * NOTE! This may not be the same as how much of a user buffer 89 * NOTE! This may not be the same as how much of a user buffer
@@ -164,7 +162,7 @@ EXPORT_SYMBOL_GPL(xip_file_sendfile);
164 * xip_write 162 * xip_write
165 * 163 *
166 * This function walks all vmas of the address_space and unmaps the 164 * This function walks all vmas of the address_space and unmaps the
167 * empty_zero_page when found at pgoff. Should it go in rmap.c? 165 * ZERO_PAGE when found at pgoff. Should it go in rmap.c?
168 */ 166 */
169static void 167static void
170__xip_unmap (struct address_space * mapping, 168__xip_unmap (struct address_space * mapping,
@@ -187,7 +185,7 @@ __xip_unmap (struct address_space * mapping,
187 * We need the page_table_lock to protect us from page faults, 185 * We need the page_table_lock to protect us from page faults,
188 * munmap, fork, etc... 186 * munmap, fork, etc...
189 */ 187 */
190 pte = page_check_address(virt_to_page(empty_zero_page), mm, 188 pte = page_check_address(ZERO_PAGE(address), mm,
191 address); 189 address);
192 if (!IS_ERR(pte)) { 190 if (!IS_ERR(pte)) {
193 /* Nuke the page table entry. */ 191 /* Nuke the page table entry. */
@@ -230,7 +228,6 @@ xip_file_nopage(struct vm_area_struct * area,
230 228
231 page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0); 229 page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
232 if (!IS_ERR(page)) { 230 if (!IS_ERR(page)) {
233 BUG_ON(!PageUptodate(page));
234 return page; 231 return page;
235 } 232 }
236 if (PTR_ERR(page) != -ENODATA) 233 if (PTR_ERR(page) != -ENODATA)
@@ -245,12 +242,11 @@ xip_file_nopage(struct vm_area_struct * area,
245 pgoff*(PAGE_SIZE/512), 1); 242 pgoff*(PAGE_SIZE/512), 1);
246 if (IS_ERR(page)) 243 if (IS_ERR(page))
247 return NULL; 244 return NULL;
248 BUG_ON(!PageUptodate(page));
249 /* unmap page at pgoff from all other vmas */ 245 /* unmap page at pgoff from all other vmas */
250 __xip_unmap(mapping, pgoff); 246 __xip_unmap(mapping, pgoff);
251 } else { 247 } else {
252 /* not shared and writable, use empty_zero_page */ 248 /* not shared and writable, use ZERO_PAGE() */
253 page = virt_to_page(empty_zero_page); 249 page = ZERO_PAGE(address);
254 } 250 }
255 251
256 return page; 252 return page;
@@ -319,8 +315,6 @@ __xip_file_write(struct file *filp, const char __user *buf,
319 break; 315 break;
320 } 316 }
321 317
322 BUG_ON(!PageUptodate(page));
323
324 copied = filemap_copy_from_user(page, offset, buf, bytes); 318 copied = filemap_copy_from_user(page, offset, buf, bytes);
325 flush_dcache_page(page); 319 flush_dcache_page(page);
326 if (likely(copied > 0)) { 320 if (likely(copied > 0)) {
@@ -435,8 +429,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
435 return 0; 429 return 0;
436 else 430 else
437 return PTR_ERR(page); 431 return PTR_ERR(page);
438 } else 432 }
439 BUG_ON(!PageUptodate(page));
440 kaddr = kmap_atomic(page, KM_USER0); 433 kaddr = kmap_atomic(page, KM_USER0);
441 memset(kaddr + offset, 0, length); 434 memset(kaddr + offset, 0, length);
442 kunmap_atomic(kaddr, KM_USER0); 435 kunmap_atomic(kaddr, KM_USER0);
diff --git a/mm/madvise.c b/mm/madvise.c
index 73180a22877e..c8c01a12fea4 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -83,9 +83,6 @@ static long madvise_willneed(struct vm_area_struct * vma,
83{ 83{
84 struct file *file = vma->vm_file; 84 struct file *file = vma->vm_file;
85 85
86 if (!file)
87 return -EBADF;
88
89 if (file->f_mapping->a_ops->get_xip_page) { 86 if (file->f_mapping->a_ops->get_xip_page) {
90 /* no bad return value, but ignore advice */ 87 /* no bad return value, but ignore advice */
91 return 0; 88 return 0;
@@ -140,11 +137,16 @@ static long madvise_dontneed(struct vm_area_struct * vma,
140 return 0; 137 return 0;
141} 138}
142 139
143static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, 140static long
144 unsigned long start, unsigned long end, int behavior) 141madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
142 unsigned long start, unsigned long end, int behavior)
145{ 143{
144 struct file *filp = vma->vm_file;
146 long error = -EBADF; 145 long error = -EBADF;
147 146
147 if (!filp)
148 goto out;
149
148 switch (behavior) { 150 switch (behavior) {
149 case MADV_NORMAL: 151 case MADV_NORMAL:
150 case MADV_SEQUENTIAL: 152 case MADV_SEQUENTIAL:
@@ -165,6 +167,7 @@ static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev
165 break; 167 break;
166 } 168 }
167 169
170out:
168 return error; 171 return error;
169} 172}
170 173
diff --git a/mm/memory.c b/mm/memory.c
index beabdefa6254..6fe77acbc1cd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -776,8 +776,8 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
776 * Do a quick page-table lookup for a single page. 776 * Do a quick page-table lookup for a single page.
777 * mm->page_table_lock must be held. 777 * mm->page_table_lock must be held.
778 */ 778 */
779static struct page * 779static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
780__follow_page(struct mm_struct *mm, unsigned long address, int read, int write) 780 int read, int write, int accessed)
781{ 781{
782 pgd_t *pgd; 782 pgd_t *pgd;
783 pud_t *pud; 783 pud_t *pud;
@@ -818,9 +818,11 @@ __follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
818 pfn = pte_pfn(pte); 818 pfn = pte_pfn(pte);
819 if (pfn_valid(pfn)) { 819 if (pfn_valid(pfn)) {
820 page = pfn_to_page(pfn); 820 page = pfn_to_page(pfn);
821 if (write && !pte_dirty(pte) && !PageDirty(page)) 821 if (accessed) {
822 set_page_dirty(page); 822 if (write && !pte_dirty(pte) &&!PageDirty(page))
823 mark_page_accessed(page); 823 set_page_dirty(page);
824 mark_page_accessed(page);
825 }
824 return page; 826 return page;
825 } 827 }
826 } 828 }
@@ -829,16 +831,19 @@ out:
829 return NULL; 831 return NULL;
830} 832}
831 833
832struct page * 834inline struct page *
833follow_page(struct mm_struct *mm, unsigned long address, int write) 835follow_page(struct mm_struct *mm, unsigned long address, int write)
834{ 836{
835 return __follow_page(mm, address, /*read*/0, write); 837 return __follow_page(mm, address, 0, write, 1);
836} 838}
837 839
838int 840/*
839check_user_page_readable(struct mm_struct *mm, unsigned long address) 841 * check_user_page_readable() can be called frm niterrupt context by oprofile,
842 * so we need to avoid taking any non-irq-safe locks
843 */
844int check_user_page_readable(struct mm_struct *mm, unsigned long address)
840{ 845{
841 return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL; 846 return __follow_page(mm, address, 1, 0, 0) != NULL;
842} 847}
843EXPORT_SYMBOL(check_user_page_readable); 848EXPORT_SYMBOL(check_user_page_readable);
844 849
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index cb41c31e7c87..1694845526be 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1138,11 +1138,11 @@ void mpol_free_shared_policy(struct shared_policy *p)
1138 while (next) { 1138 while (next) {
1139 n = rb_entry(next, struct sp_node, nd); 1139 n = rb_entry(next, struct sp_node, nd);
1140 next = rb_next(&n->nd); 1140 next = rb_next(&n->nd);
1141 rb_erase(&n->nd, &p->root);
1141 mpol_free(n->policy); 1142 mpol_free(n->policy);
1142 kmem_cache_free(sn_cache, n); 1143 kmem_cache_free(sn_cache, n);
1143 } 1144 }
1144 spin_unlock(&p->lock); 1145 spin_unlock(&p->lock);
1145 p->root = RB_ROOT;
1146} 1146}
1147 1147
1148/* assumes fs == KERNEL_DS */ 1148/* assumes fs == KERNEL_DS */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1d6ba6a4b594..42bccfb8464d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1861,7 +1861,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
1861 unsigned long *zones_size, unsigned long *zholes_size) 1861 unsigned long *zones_size, unsigned long *zholes_size)
1862{ 1862{
1863 unsigned long i, j; 1863 unsigned long i, j;
1864 const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
1865 int cpu, nid = pgdat->node_id; 1864 int cpu, nid = pgdat->node_id;
1866 unsigned long zone_start_pfn = pgdat->node_start_pfn; 1865 unsigned long zone_start_pfn = pgdat->node_start_pfn;
1867 1866
@@ -1934,9 +1933,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
1934 zone->zone_mem_map = pfn_to_page(zone_start_pfn); 1933 zone->zone_mem_map = pfn_to_page(zone_start_pfn);
1935 zone->zone_start_pfn = zone_start_pfn; 1934 zone->zone_start_pfn = zone_start_pfn;
1936 1935
1937 if ((zone_start_pfn) & (zone_required_alignment-1))
1938 printk(KERN_CRIT "BUG: wrong zone alignment, it will crash\n");
1939
1940 memmap_init(size, nid, j, zone_start_pfn); 1936 memmap_init(size, nid, j, zone_start_pfn);
1941 1937
1942 zonetable_add(zone, nid, j, zone_start_pfn, size); 1938 zonetable_add(zone, nid, j, zone_start_pfn, size);