aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCarsten Otte <cotte@de.ibm.com>2007-03-29 04:20:39 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-03-29 11:22:26 -0400
commita76c0b976310bbb1b6eaecaaae465af194134477 (patch)
tree4f23a06244f7cb6689bd7ded80843c481968a283
parent90ed52ebe48181d3c5427b3bd1d24f659e7575ad (diff)
[PATCH] mm: fix xip issue with /dev/zero
Fix the bug, that reading into xip mapping from /dev/zero fills the user page table with ZERO_PAGE() entries. Later on, xip cannot tell which pages have been ZERO_PAGE() filled by access to a sparse mapping, and which ones origin from /dev/zero. It will unmap ZERO_PAGE from all mappings when filling the sparse hole with data. xip does now use its own zeroed page for its sparse mappings. Please apply. Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/filemap_xip.c48
1 files changed, 37 insertions, 11 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 9dd9fbb75139..cbb335813ec0 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -17,6 +17,29 @@
17#include "filemap.h" 17#include "filemap.h"
18 18
19/* 19/*
20 * We do use our own empty page to avoid interference with other users
21 * of ZERO_PAGE(), such as /dev/zero
22 */
23static struct page *__xip_sparse_page;
24
25static struct page *xip_sparse_page(void)
26{
27 if (!__xip_sparse_page) {
28 unsigned long zeroes = get_zeroed_page(GFP_HIGHUSER);
29 if (zeroes) {
30 static DEFINE_SPINLOCK(xip_alloc_lock);
31 spin_lock(&xip_alloc_lock);
32 if (!__xip_sparse_page)
33 __xip_sparse_page = virt_to_page(zeroes);
34 else
35 free_page(zeroes);
36 spin_unlock(&xip_alloc_lock);
37 }
38 }
39 return __xip_sparse_page;
40}
41
42/*
20 * This is a file read routine for execute in place files, and uses 43 * This is a file read routine for execute in place files, and uses
21 * the mapping->a_ops->get_xip_page() function for the actual low-level 44 * the mapping->a_ops->get_xip_page() function for the actual low-level
22 * stuff. 45 * stuff.
@@ -162,7 +185,7 @@ EXPORT_SYMBOL_GPL(xip_file_sendfile);
162 * xip_write 185 * xip_write
163 * 186 *
164 * This function walks all vmas of the address_space and unmaps the 187 * This function walks all vmas of the address_space and unmaps the
165 * ZERO_PAGE when found at pgoff. Should it go in rmap.c? 188 * __xip_sparse_page when found at pgoff.
166 */ 189 */
167static void 190static void
168__xip_unmap (struct address_space * mapping, 191__xip_unmap (struct address_space * mapping,
@@ -177,13 +200,16 @@ __xip_unmap (struct address_space * mapping,
177 spinlock_t *ptl; 200 spinlock_t *ptl;
178 struct page *page; 201 struct page *page;
179 202
203 page = __xip_sparse_page;
204 if (!page)
205 return;
206
180 spin_lock(&mapping->i_mmap_lock); 207 spin_lock(&mapping->i_mmap_lock);
181 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 208 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
182 mm = vma->vm_mm; 209 mm = vma->vm_mm;
183 address = vma->vm_start + 210 address = vma->vm_start +
184 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 211 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
185 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 212 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
186 page = ZERO_PAGE(0);
187 pte = page_check_address(page, mm, address, &ptl); 213 pte = page_check_address(page, mm, address, &ptl);
188 if (pte) { 214 if (pte) {
189 /* Nuke the page table entry. */ 215 /* Nuke the page table entry. */
@@ -222,16 +248,14 @@ xip_file_nopage(struct vm_area_struct * area,
222 + area->vm_pgoff; 248 + area->vm_pgoff;
223 249
224 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 250 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
225 if (pgoff >= size) { 251 if (pgoff >= size)
226 return NULL; 252 return NOPAGE_SIGBUS;
227 }
228 253
229 page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0); 254 page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
230 if (!IS_ERR(page)) { 255 if (!IS_ERR(page))
231 goto out; 256 goto out;
232 }
233 if (PTR_ERR(page) != -ENODATA) 257 if (PTR_ERR(page) != -ENODATA)
234 return NULL; 258 return NOPAGE_SIGBUS;
235 259
236 /* sparse block */ 260 /* sparse block */
237 if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) && 261 if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
@@ -241,12 +265,14 @@ xip_file_nopage(struct vm_area_struct * area,
241 page = mapping->a_ops->get_xip_page (mapping, 265 page = mapping->a_ops->get_xip_page (mapping,
242 pgoff*(PAGE_SIZE/512), 1); 266 pgoff*(PAGE_SIZE/512), 1);
243 if (IS_ERR(page)) 267 if (IS_ERR(page))
244 return NULL; 268 return NOPAGE_SIGBUS;
245 /* unmap page at pgoff from all other vmas */ 269 /* unmap page at pgoff from all other vmas */
246 __xip_unmap(mapping, pgoff); 270 __xip_unmap(mapping, pgoff);
247 } else { 271 } else {
248 /* not shared and writable, use ZERO_PAGE() */ 272 /* not shared and writable, use xip_sparse_page() */
249 page = ZERO_PAGE(0); 273 page = xip_sparse_page();
274 if (!page)
275 return NOPAGE_OOM;
250 } 276 }
251 277
252out: 278out: