aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-02-20 16:58:09 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-20 20:10:15 -0500
commit22c8ca78f20724676b6006232bf06cc3e9299539 (patch)
tree2e92d122733505590c5df7906ac2590477b5806b /fs/buffer.c
parent955eff5acc8b8cd1c7d4eec0229c35eaabe013db (diff)
[PATCH] fs: fix nobh data leak
nobh_prepare_write leaks data similarly to how simple_prepare_write did. Fix by not marking the page uptodate until nobh_commit_write time. Again, this could break weird use-cases, but none appear to exist in the tree. We can safely remove the set_page_dirty, because as the comment says, nobh_commit_write does set_page_dirty. If a filesystem wants to allocate backing store for a page dirtied via mmap, page_mkwrite is the suggested approach. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c21
1 files changed, 3 insertions, 18 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index e95fe4f8d11a..e8504b65176c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2248,7 +2248,6 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2248 int i; 2248 int i;
2249 int ret = 0; 2249 int ret = 0;
2250 int is_mapped_to_disk = 1; 2250 int is_mapped_to_disk = 1;
2251 int dirtied_it = 0;
2252 2251
2253 if (PageMappedToDisk(page)) 2252 if (PageMappedToDisk(page))
2254 return 0; 2253 return 0;
@@ -2285,14 +2284,10 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2285 continue; 2284 continue;
2286 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) { 2285 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2287 kaddr = kmap_atomic(page, KM_USER0); 2286 kaddr = kmap_atomic(page, KM_USER0);
2288 if (block_start < from) { 2287 if (block_start < from)
2289 memset(kaddr+block_start, 0, from-block_start); 2288 memset(kaddr+block_start, 0, from-block_start);
2290 dirtied_it = 1; 2289 if (block_end > to)
2291 }
2292 if (block_end > to) {
2293 memset(kaddr + to, 0, block_end - to); 2290 memset(kaddr + to, 0, block_end - to);
2294 dirtied_it = 1;
2295 }
2296 flush_dcache_page(page); 2291 flush_dcache_page(page);
2297 kunmap_atomic(kaddr, KM_USER0); 2292 kunmap_atomic(kaddr, KM_USER0);
2298 continue; 2293 continue;
@@ -2347,17 +2342,6 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2347 2342
2348 if (is_mapped_to_disk) 2343 if (is_mapped_to_disk)
2349 SetPageMappedToDisk(page); 2344 SetPageMappedToDisk(page);
2350 SetPageUptodate(page);
2351
2352 /*
2353 * Setting the page dirty here isn't necessary for the prepare_write
2354 * function - commit_write will do that. But if/when this function is
2355 * used within the pagefault handler to ensure that all mmapped pages
2356 * have backing space in the filesystem, we will need to dirty the page
2357 * if its contents were altered.
2358 */
2359 if (dirtied_it)
2360 set_page_dirty(page);
2361 2345
2362 return 0; 2346 return 0;
2363 2347
@@ -2387,6 +2371,7 @@ int nobh_commit_write(struct file *file, struct page *page,
2387 struct inode *inode = page->mapping->host; 2371 struct inode *inode = page->mapping->host;
2388 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 2372 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2389 2373
2374 SetPageUptodate(page);
2390 set_page_dirty(page); 2375 set_page_dirty(page);
2391 if (pos > inode->i_size) { 2376 if (pos > inode->i_size) {
2392 i_size_write(inode, pos); 2377 i_size_write(inode, pos);