aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2010-11-08 13:45:33 -0500
committerTheodore Ts'o <tytso@mit.edu>2010-11-08 13:45:33 -0500
commit83668e7141c7a0aa4035bde94344b81f9cf966ab (patch)
tree34d9fd52470b475d6e9d88ece8ca1ba80bf85a42 /fs
parentf7ad6d2e9201a6e1c9ee6530a291452eb695feb8 (diff)
ext4: fix potential race when freeing ext4_io_page structures
Use an atomic_t and make sure we don't free the structure while we might still be submitting I/O for that page. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs')
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/page-io.c38
2 files changed, 16 insertions, 24 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 670d1343f914..6a5edea2d70b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -177,7 +177,7 @@ struct mpage_da_data {
177 177
178struct ext4_io_page { 178struct ext4_io_page {
179 struct page *p_page; 179 struct page *p_page;
180 int p_count; 180 atomic_t p_count;
181}; 181};
182 182
183#define MAX_IO_PAGES 128 183#define MAX_IO_PAGES 128
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index a24c8cca7370..7f5451cd1d38 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -67,6 +67,15 @@ void ext4_ioend_wait(struct inode *inode)
67 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); 67 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
68} 68}
69 69
70static void put_io_page(struct ext4_io_page *io_page)
71{
72 if (atomic_dec_and_test(&io_page->p_count)) {
73 end_page_writeback(io_page->p_page);
74 put_page(io_page->p_page);
75 kmem_cache_free(io_page_cachep, io_page);
76 }
77}
78
70void ext4_free_io_end(ext4_io_end_t *io) 79void ext4_free_io_end(ext4_io_end_t *io)
71{ 80{
72 int i; 81 int i;
@@ -75,15 +84,8 @@ void ext4_free_io_end(ext4_io_end_t *io)
75 BUG_ON(!io); 84 BUG_ON(!io);
76 if (io->page) 85 if (io->page)
77 put_page(io->page); 86 put_page(io->page);
78 for (i = 0; i < io->num_io_pages; i++) { 87 for (i = 0; i < io->num_io_pages; i++)
79 if (--io->pages[i]->p_count == 0) { 88 put_io_page(io->pages[i]);
80 struct page *page = io->pages[i]->p_page;
81
82 end_page_writeback(page);
83 put_page(page);
84 kmem_cache_free(io_page_cachep, io->pages[i]);
85 }
86 }
87 io->num_io_pages = 0; 89 io->num_io_pages = 0;
88 wq = to_ioend_wq(io->inode); 90 wq = to_ioend_wq(io->inode);
89 if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) && 91 if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
@@ -235,13 +237,7 @@ static void ext4_end_bio(struct bio *bio, int error)
235 } while (bh != head); 237 } while (bh != head);
236 } 238 }
237 239
238 if (--io_end->pages[i]->p_count == 0) { 240 put_io_page(io_end->pages[i]);
239 struct page *page = io_end->pages[i]->p_page;
240
241 end_page_writeback(page);
242 put_page(page);
243 kmem_cache_free(io_page_cachep, io_end->pages[i]);
244 }
245 241
246 /* 242 /*
247 * If this is a partial write which happened to make 243 * If this is a partial write which happened to make
@@ -369,7 +365,7 @@ submit_and_retry:
369 if ((io_end->num_io_pages == 0) || 365 if ((io_end->num_io_pages == 0) ||
370 (io_end->pages[io_end->num_io_pages-1] != io_page)) { 366 (io_end->pages[io_end->num_io_pages-1] != io_page)) {
371 io_end->pages[io_end->num_io_pages++] = io_page; 367 io_end->pages[io_end->num_io_pages++] = io_page;
372 io_page->p_count++; 368 atomic_inc(&io_page->p_count);
373 } 369 }
374 return 0; 370 return 0;
375} 371}
@@ -398,7 +394,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
398 return -ENOMEM; 394 return -ENOMEM;
399 } 395 }
400 io_page->p_page = page; 396 io_page->p_page = page;
401 io_page->p_count = 0; 397 atomic_set(&io_page->p_count, 1);
402 get_page(page); 398 get_page(page);
403 399
404 for (bh = head = page_buffers(page), block_start = 0; 400 for (bh = head = page_buffers(page), block_start = 0;
@@ -430,10 +426,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
430 * PageWriteback bit from the page to prevent the system from 426 * PageWriteback bit from the page to prevent the system from
431 * wedging later on. 427 * wedging later on.
432 */ 428 */
433 if (io_page->p_count == 0) { 429 put_io_page(io_page);
434 put_page(page);
435 end_page_writeback(page);
436 kmem_cache_free(io_page_cachep, io_page);
437 }
438 return ret; 430 return ret;
439} 431}