aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/ext4/ext4.h4
-rw-r--r--fs/ext4/inode.c3
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/page-io.c97
-rw-r--r--fs/ext4/super.c102
-rw-r--r--include/trace/events/ext4.h97
6 files changed, 214 insertions, 91 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8b5dd6369f82..6a5edea2d70b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -177,7 +177,7 @@ struct mpage_da_data {
177 177
178struct ext4_io_page { 178struct ext4_io_page {
179 struct page *p_page; 179 struct page *p_page;
180 int p_count; 180 atomic_t p_count;
181}; 181};
182 182
183#define MAX_IO_PAGES 128 183#define MAX_IO_PAGES 128
@@ -858,6 +858,7 @@ struct ext4_inode_info {
858 spinlock_t i_completed_io_lock; 858 spinlock_t i_completed_io_lock;
859 /* current io_end structure for async DIO write*/ 859 /* current io_end structure for async DIO write*/
860 ext4_io_end_t *cur_aio_dio; 860 ext4_io_end_t *cur_aio_dio;
861 atomic_t i_ioend_count; /* Number of outstanding io_end structs */
861 862
862 /* 863 /*
863 * Transactions that contain inode's metadata needed to complete 864 * Transactions that contain inode's metadata needed to complete
@@ -2060,6 +2061,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
2060/* page-io.c */ 2061/* page-io.c */
2061extern int __init ext4_init_pageio(void); 2062extern int __init ext4_init_pageio(void);
2062extern void ext4_exit_pageio(void); 2063extern void ext4_exit_pageio(void);
2064extern void ext4_ioend_wait(struct inode *);
2063extern void ext4_free_io_end(ext4_io_end_t *io); 2065extern void ext4_free_io_end(ext4_io_end_t *io);
2064extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); 2066extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
2065extern int ext4_end_io_nolock(ext4_io_end_t *io); 2067extern int ext4_end_io_nolock(ext4_io_end_t *io);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4d78342f3bf0..bdbe69902207 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -53,6 +53,7 @@
53static inline int ext4_begin_ordered_truncate(struct inode *inode, 53static inline int ext4_begin_ordered_truncate(struct inode *inode,
54 loff_t new_size) 54 loff_t new_size)
55{ 55{
56 trace_ext4_begin_ordered_truncate(inode, new_size);
56 return jbd2_journal_begin_ordered_truncate( 57 return jbd2_journal_begin_ordered_truncate(
57 EXT4_SB(inode->i_sb)->s_journal, 58 EXT4_SB(inode->i_sb)->s_journal,
58 &EXT4_I(inode)->jinode, 59 &EXT4_I(inode)->jinode,
@@ -178,6 +179,7 @@ void ext4_evict_inode(struct inode *inode)
178 handle_t *handle; 179 handle_t *handle;
179 int err; 180 int err;
180 181
182 trace_ext4_evict_inode(inode);
181 if (inode->i_nlink) { 183 if (inode->i_nlink) {
182 truncate_inode_pages(&inode->i_data, 0); 184 truncate_inode_pages(&inode->i_data, 0);
183 goto no_delete; 185 goto no_delete;
@@ -5647,6 +5649,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5647 int err, ret; 5649 int err, ret;
5648 5650
5649 might_sleep(); 5651 might_sleep();
5652 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5650 err = ext4_reserve_inode_write(handle, inode, &iloc); 5653 err = ext4_reserve_inode_write(handle, inode, &iloc);
5651 if (ext4_handle_valid(handle) && 5654 if (ext4_handle_valid(handle) &&
5652 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5655 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c58eba34724a..5b4d4e3a4d58 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4640,8 +4640,6 @@ do_more:
4640 * with group lock held. generate_buddy look at 4640 * with group lock held. generate_buddy look at
4641 * them with group lock_held 4641 * them with group lock_held
4642 */ 4642 */
4643 if (test_opt(sb, DISCARD))
4644 ext4_issue_discard(sb, block_group, bit, count);
4645 ext4_lock_group(sb, block_group); 4643 ext4_lock_group(sb, block_group);
4646 mb_clear_bits(bitmap_bh->b_data, bit, count); 4644 mb_clear_bits(bitmap_bh->b_data, bit, count);
4647 mb_free_blocks(inode, &e4b, bit, count); 4645 mb_free_blocks(inode, &e4b, bit, count);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 46a7d6a9d976..7f5451cd1d38 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -32,8 +32,14 @@
32 32
33static struct kmem_cache *io_page_cachep, *io_end_cachep; 33static struct kmem_cache *io_page_cachep, *io_end_cachep;
34 34
35#define WQ_HASH_SZ 37
36#define to_ioend_wq(v) (&ioend_wq[((unsigned long)v) % WQ_HASH_SZ])
37static wait_queue_head_t ioend_wq[WQ_HASH_SZ];
38
35int __init ext4_init_pageio(void) 39int __init ext4_init_pageio(void)
36{ 40{
41 int i;
42
37 io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); 43 io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
38 if (io_page_cachep == NULL) 44 if (io_page_cachep == NULL)
39 return -ENOMEM; 45 return -ENOMEM;
@@ -42,6 +48,8 @@ int __init ext4_init_pageio(void)
42 kmem_cache_destroy(io_page_cachep); 48 kmem_cache_destroy(io_page_cachep);
43 return -ENOMEM; 49 return -ENOMEM;
44 } 50 }
51 for (i = 0; i < WQ_HASH_SZ; i++)
52 init_waitqueue_head(&ioend_wq[i]);
45 53
46 return 0; 54 return 0;
47} 55}
@@ -52,24 +60,37 @@ void ext4_exit_pageio(void)
52 kmem_cache_destroy(io_page_cachep); 60 kmem_cache_destroy(io_page_cachep);
53} 61}
54 62
63void ext4_ioend_wait(struct inode *inode)
64{
65 wait_queue_head_t *wq = to_ioend_wq(inode);
66
67 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
68}
69
70static void put_io_page(struct ext4_io_page *io_page)
71{
72 if (atomic_dec_and_test(&io_page->p_count)) {
73 end_page_writeback(io_page->p_page);
74 put_page(io_page->p_page);
75 kmem_cache_free(io_page_cachep, io_page);
76 }
77}
78
55void ext4_free_io_end(ext4_io_end_t *io) 79void ext4_free_io_end(ext4_io_end_t *io)
56{ 80{
57 int i; 81 int i;
82 wait_queue_head_t *wq;
58 83
59 BUG_ON(!io); 84 BUG_ON(!io);
60 if (io->page) 85 if (io->page)
61 put_page(io->page); 86 put_page(io->page);
62 for (i = 0; i < io->num_io_pages; i++) { 87 for (i = 0; i < io->num_io_pages; i++)
63 if (--io->pages[i]->p_count == 0) { 88 put_io_page(io->pages[i]);
64 struct page *page = io->pages[i]->p_page;
65
66 end_page_writeback(page);
67 put_page(page);
68 kmem_cache_free(io_page_cachep, io->pages[i]);
69 }
70 }
71 io->num_io_pages = 0; 89 io->num_io_pages = 0;
72 iput(io->inode); 90 wq = to_ioend_wq(io->inode);
91 if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
92 waitqueue_active(wq))
93 wake_up_all(wq);
73 kmem_cache_free(io_end_cachep, io); 94 kmem_cache_free(io_end_cachep, io);
74} 95}
75 96
@@ -142,8 +163,8 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
142 io = kmem_cache_alloc(io_end_cachep, flags); 163 io = kmem_cache_alloc(io_end_cachep, flags);
143 if (io) { 164 if (io) {
144 memset(io, 0, sizeof(*io)); 165 memset(io, 0, sizeof(*io));
145 io->inode = igrab(inode); 166 atomic_inc(&EXT4_I(inode)->i_ioend_count);
146 BUG_ON(!io->inode); 167 io->inode = inode;
147 INIT_WORK(&io->work, ext4_end_io_work); 168 INIT_WORK(&io->work, ext4_end_io_work);
148 INIT_LIST_HEAD(&io->list); 169 INIT_LIST_HEAD(&io->list);
149 } 170 }
@@ -171,35 +192,15 @@ static void ext4_end_bio(struct bio *bio, int error)
171 struct workqueue_struct *wq; 192 struct workqueue_struct *wq;
172 struct inode *inode; 193 struct inode *inode;
173 unsigned long flags; 194 unsigned long flags;
174 ext4_fsblk_t err_block;
175 int i; 195 int i;
176 196
177 BUG_ON(!io_end); 197 BUG_ON(!io_end);
178 inode = io_end->inode;
179 bio->bi_private = NULL; 198 bio->bi_private = NULL;
180 bio->bi_end_io = NULL; 199 bio->bi_end_io = NULL;
181 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 200 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
182 error = 0; 201 error = 0;
183 err_block = bio->bi_sector >> (inode->i_blkbits - 9);
184 bio_put(bio); 202 bio_put(bio);
185 203
186 if (!(inode->i_sb->s_flags & MS_ACTIVE)) {
187 pr_err("sb umounted, discard end_io request for inode %lu\n",
188 io_end->inode->i_ino);
189 ext4_free_io_end(io_end);
190 return;
191 }
192
193 if (error) {
194 io_end->flag |= EXT4_IO_END_ERROR;
195 ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
196 "(offset %llu size %ld starting block %llu)",
197 inode->i_ino,
198 (unsigned long long) io_end->offset,
199 (long) io_end->size,
200 (unsigned long long) err_block);
201 }
202
203 for (i = 0; i < io_end->num_io_pages; i++) { 204 for (i = 0; i < io_end->num_io_pages; i++) {
204 struct page *page = io_end->pages[i]->p_page; 205 struct page *page = io_end->pages[i]->p_page;
205 struct buffer_head *bh, *head; 206 struct buffer_head *bh, *head;
@@ -236,13 +237,7 @@ static void ext4_end_bio(struct bio *bio, int error)
236 } while (bh != head); 237 } while (bh != head);
237 } 238 }
238 239
239 if (--io_end->pages[i]->p_count == 0) { 240 put_io_page(io_end->pages[i]);
240 struct page *page = io_end->pages[i]->p_page;
241
242 end_page_writeback(page);
243 put_page(page);
244 kmem_cache_free(io_page_cachep, io_end->pages[i]);
245 }
246 241
247 /* 242 /*
248 * If this is a partial write which happened to make 243 * If this is a partial write which happened to make
@@ -254,8 +249,19 @@ static void ext4_end_bio(struct bio *bio, int error)
254 if (!partial_write) 249 if (!partial_write)
255 SetPageUptodate(page); 250 SetPageUptodate(page);
256 } 251 }
257
258 io_end->num_io_pages = 0; 252 io_end->num_io_pages = 0;
253 inode = io_end->inode;
254
255 if (error) {
256 io_end->flag |= EXT4_IO_END_ERROR;
257 ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
258 "(offset %llu size %ld starting block %llu)",
259 inode->i_ino,
260 (unsigned long long) io_end->offset,
261 (long) io_end->size,
262 (unsigned long long)
263 bio->bi_sector >> (inode->i_blkbits - 9));
264 }
259 265
260 /* Add the io_end to per-inode completed io list*/ 266 /* Add the io_end to per-inode completed io list*/
261 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 267 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
@@ -305,7 +311,6 @@ static int io_submit_init(struct ext4_io_submit *io,
305 bio->bi_private = io->io_end = io_end; 311 bio->bi_private = io->io_end = io_end;
306 bio->bi_end_io = ext4_end_bio; 312 bio->bi_end_io = ext4_end_bio;
307 313
308 io_end->inode = inode;
309 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); 314 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
310 315
311 io->io_bio = bio; 316 io->io_bio = bio;
@@ -360,7 +365,7 @@ submit_and_retry:
360 if ((io_end->num_io_pages == 0) || 365 if ((io_end->num_io_pages == 0) ||
361 (io_end->pages[io_end->num_io_pages-1] != io_page)) { 366 (io_end->pages[io_end->num_io_pages-1] != io_page)) {
362 io_end->pages[io_end->num_io_pages++] = io_page; 367 io_end->pages[io_end->num_io_pages++] = io_page;
363 io_page->p_count++; 368 atomic_inc(&io_page->p_count);
364 } 369 }
365 return 0; 370 return 0;
366} 371}
@@ -389,7 +394,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
389 return -ENOMEM; 394 return -ENOMEM;
390 } 395 }
391 io_page->p_page = page; 396 io_page->p_page = page;
392 io_page->p_count = 0; 397 atomic_set(&io_page->p_count, 1);
393 get_page(page); 398 get_page(page);
394 399
395 for (bh = head = page_buffers(page), block_start = 0; 400 for (bh = head = page_buffers(page), block_start = 0;
@@ -421,10 +426,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
421 * PageWriteback bit from the page to prevent the system from 426 * PageWriteback bit from the page to prevent the system from
422 * wedging later on. 427 * wedging later on.
423 */ 428 */
424 if (io_page->p_count == 0) { 429 put_io_page(io_page);
425 put_page(page);
426 end_page_writeback(page);
427 kmem_cache_free(io_page_cachep, io_page);
428 }
429 return ret; 430 return ret;
430} 431}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 40131b777af6..61182fe6254e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -828,12 +828,22 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
828 ei->cur_aio_dio = NULL; 828 ei->cur_aio_dio = NULL;
829 ei->i_sync_tid = 0; 829 ei->i_sync_tid = 0;
830 ei->i_datasync_tid = 0; 830 ei->i_datasync_tid = 0;
831 atomic_set(&ei->i_ioend_count, 0);
831 832
832 return &ei->vfs_inode; 833 return &ei->vfs_inode;
833} 834}
834 835
836static int ext4_drop_inode(struct inode *inode)
837{
838 int drop = generic_drop_inode(inode);
839
840 trace_ext4_drop_inode(inode, drop);
841 return drop;
842}
843
835static void ext4_destroy_inode(struct inode *inode) 844static void ext4_destroy_inode(struct inode *inode)
836{ 845{
846 ext4_ioend_wait(inode);
837 if (!list_empty(&(EXT4_I(inode)->i_orphan))) { 847 if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
838 ext4_msg(inode->i_sb, KERN_ERR, 848 ext4_msg(inode->i_sb, KERN_ERR,
839 "Inode %lu (%p): orphan list check failed!", 849 "Inode %lu (%p): orphan list check failed!",
@@ -1173,6 +1183,7 @@ static const struct super_operations ext4_sops = {
1173 .destroy_inode = ext4_destroy_inode, 1183 .destroy_inode = ext4_destroy_inode,
1174 .write_inode = ext4_write_inode, 1184 .write_inode = ext4_write_inode,
1175 .dirty_inode = ext4_dirty_inode, 1185 .dirty_inode = ext4_dirty_inode,
1186 .drop_inode = ext4_drop_inode,
1176 .evict_inode = ext4_evict_inode, 1187 .evict_inode = ext4_evict_inode,
1177 .put_super = ext4_put_super, 1188 .put_super = ext4_put_super,
1178 .sync_fs = ext4_sync_fs, 1189 .sync_fs = ext4_sync_fs,
@@ -1194,6 +1205,7 @@ static const struct super_operations ext4_nojournal_sops = {
1194 .destroy_inode = ext4_destroy_inode, 1205 .destroy_inode = ext4_destroy_inode,
1195 .write_inode = ext4_write_inode, 1206 .write_inode = ext4_write_inode,
1196 .dirty_inode = ext4_dirty_inode, 1207 .dirty_inode = ext4_dirty_inode,
1208 .drop_inode = ext4_drop_inode,
1197 .evict_inode = ext4_evict_inode, 1209 .evict_inode = ext4_evict_inode,
1198 .write_super = ext4_write_super, 1210 .write_super = ext4_write_super,
1199 .put_super = ext4_put_super, 1211 .put_super = ext4_put_super,
@@ -2699,7 +2711,6 @@ static int ext4_lazyinit_thread(void *arg)
2699 struct ext4_li_request *elr; 2711 struct ext4_li_request *elr;
2700 unsigned long next_wakeup; 2712 unsigned long next_wakeup;
2701 DEFINE_WAIT(wait); 2713 DEFINE_WAIT(wait);
2702 int ret;
2703 2714
2704 BUG_ON(NULL == eli); 2715 BUG_ON(NULL == eli);
2705 2716
@@ -2723,13 +2734,12 @@ cont_thread:
2723 elr = list_entry(pos, struct ext4_li_request, 2734 elr = list_entry(pos, struct ext4_li_request,
2724 lr_request); 2735 lr_request);
2725 2736
2726 if (time_after_eq(jiffies, elr->lr_next_sched)) 2737 if (time_after_eq(jiffies, elr->lr_next_sched)) {
2727 ret = ext4_run_li_request(elr); 2738 if (ext4_run_li_request(elr) != 0) {
2728 2739 /* error, remove the lazy_init job */
2729 if (ret) { 2740 ext4_remove_li_request(elr);
2730 ret = 0; 2741 continue;
2731 ext4_remove_li_request(elr); 2742 }
2732 continue;
2733 } 2743 }
2734 2744
2735 if (time_before(elr->lr_next_sched, next_wakeup)) 2745 if (time_before(elr->lr_next_sched, next_wakeup))
@@ -2740,7 +2750,8 @@ cont_thread:
2740 if (freezing(current)) 2750 if (freezing(current))
2741 refrigerator(); 2751 refrigerator();
2742 2752
2743 if (time_after_eq(jiffies, next_wakeup)) { 2753 if ((time_after_eq(jiffies, next_wakeup)) ||
2754 (MAX_JIFFY_OFFSET == next_wakeup)) {
2744 cond_resched(); 2755 cond_resched();
2745 continue; 2756 continue;
2746 } 2757 }
@@ -3348,6 +3359,24 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3348 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3359 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3349 spin_lock_init(&sbi->s_next_gen_lock); 3360 spin_lock_init(&sbi->s_next_gen_lock);
3350 3361
3362 err = percpu_counter_init(&sbi->s_freeblocks_counter,
3363 ext4_count_free_blocks(sb));
3364 if (!err) {
3365 err = percpu_counter_init(&sbi->s_freeinodes_counter,
3366 ext4_count_free_inodes(sb));
3367 }
3368 if (!err) {
3369 err = percpu_counter_init(&sbi->s_dirs_counter,
3370 ext4_count_dirs(sb));
3371 }
3372 if (!err) {
3373 err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
3374 }
3375 if (err) {
3376 ext4_msg(sb, KERN_ERR, "insufficient memory");
3377 goto failed_mount3;
3378 }
3379
3351 sbi->s_stripe = ext4_get_stripe_size(sbi); 3380 sbi->s_stripe = ext4_get_stripe_size(sbi);
3352 sbi->s_max_writeback_mb_bump = 128; 3381 sbi->s_max_writeback_mb_bump = 128;
3353 3382
@@ -3446,22 +3475,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3446 } 3475 }
3447 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); 3476 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
3448 3477
3449no_journal: 3478 /*
3450 err = percpu_counter_init(&sbi->s_freeblocks_counter, 3479 * The journal may have updated the bg summary counts, so we
3451 ext4_count_free_blocks(sb)); 3480 * need to update the global counters.
3452 if (!err) 3481 */
3453 err = percpu_counter_init(&sbi->s_freeinodes_counter, 3482 percpu_counter_set(&sbi->s_freeblocks_counter,
3454 ext4_count_free_inodes(sb)); 3483 ext4_count_free_blocks(sb));
3455 if (!err) 3484 percpu_counter_set(&sbi->s_freeinodes_counter,
3456 err = percpu_counter_init(&sbi->s_dirs_counter, 3485 ext4_count_free_inodes(sb));
3457 ext4_count_dirs(sb)); 3486 percpu_counter_set(&sbi->s_dirs_counter,
3458 if (!err) 3487 ext4_count_dirs(sb));
3459 err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0); 3488 percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
3460 if (err) {
3461 ext4_msg(sb, KERN_ERR, "insufficient memory");
3462 goto failed_mount_wq;
3463 }
3464 3489
3490no_journal:
3465 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); 3491 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
3466 if (!EXT4_SB(sb)->dio_unwritten_wq) { 3492 if (!EXT4_SB(sb)->dio_unwritten_wq) {
3467 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); 3493 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
@@ -3611,10 +3637,6 @@ failed_mount_wq:
3611 jbd2_journal_destroy(sbi->s_journal); 3637 jbd2_journal_destroy(sbi->s_journal);
3612 sbi->s_journal = NULL; 3638 sbi->s_journal = NULL;
3613 } 3639 }
3614 percpu_counter_destroy(&sbi->s_freeblocks_counter);
3615 percpu_counter_destroy(&sbi->s_freeinodes_counter);
3616 percpu_counter_destroy(&sbi->s_dirs_counter);
3617 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3618failed_mount3: 3640failed_mount3:
3619 if (sbi->s_flex_groups) { 3641 if (sbi->s_flex_groups) {
3620 if (is_vmalloc_addr(sbi->s_flex_groups)) 3642 if (is_vmalloc_addr(sbi->s_flex_groups))
@@ -3622,6 +3644,10 @@ failed_mount3:
3622 else 3644 else
3623 kfree(sbi->s_flex_groups); 3645 kfree(sbi->s_flex_groups);
3624 } 3646 }
3647 percpu_counter_destroy(&sbi->s_freeblocks_counter);
3648 percpu_counter_destroy(&sbi->s_freeinodes_counter);
3649 percpu_counter_destroy(&sbi->s_dirs_counter);
3650 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3625failed_mount2: 3651failed_mount2:
3626 for (i = 0; i < db_count; i++) 3652 for (i = 0; i < db_count; i++)
3627 brelse(sbi->s_group_desc[i]); 3653 brelse(sbi->s_group_desc[i]);
@@ -3949,13 +3975,11 @@ static int ext4_commit_super(struct super_block *sb, int sync)
3949 else 3975 else
3950 es->s_kbytes_written = 3976 es->s_kbytes_written =
3951 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); 3977 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
3952 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeblocks_counter)) 3978 ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
3953 ext4_free_blocks_count_set(es, percpu_counter_sum_positive( 3979 &EXT4_SB(sb)->s_freeblocks_counter));
3954 &EXT4_SB(sb)->s_freeblocks_counter)); 3980 es->s_free_inodes_count =
3955 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter)) 3981 cpu_to_le32(percpu_counter_sum_positive(
3956 es->s_free_inodes_count = 3982 &EXT4_SB(sb)->s_freeinodes_counter));
3957 cpu_to_le32(percpu_counter_sum_positive(
3958 &EXT4_SB(sb)->s_freeinodes_counter));
3959 sb->s_dirt = 0; 3983 sb->s_dirt = 0;
3960 BUFFER_TRACE(sbh, "marking dirty"); 3984 BUFFER_TRACE(sbh, "marking dirty");
3961 mark_buffer_dirty(sbh); 3985 mark_buffer_dirty(sbh);
@@ -4556,12 +4580,10 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
4556 4580
4557static int ext4_quota_off(struct super_block *sb, int type) 4581static int ext4_quota_off(struct super_block *sb, int type)
4558{ 4582{
4559 /* Force all delayed allocation blocks to be allocated */ 4583 /* Force all delayed allocation blocks to be allocated.
4560 if (test_opt(sb, DELALLOC)) { 4584 * Caller already holds s_umount sem */
4561 down_read(&sb->s_umount); 4585 if (test_opt(sb, DELALLOC))
4562 sync_filesystem(sb); 4586 sync_filesystem(sb);
4563 up_read(&sb->s_umount);
4564 }
4565 4587
4566 return dquot_quota_off(sb, type); 4588 return dquot_quota_off(sb, type);
4567} 4589}
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 289010d3270b..e5e345fb2a5c 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -98,6 +98,103 @@ TRACE_EVENT(ext4_allocate_inode,
98 (unsigned long) __entry->dir, __entry->mode) 98 (unsigned long) __entry->dir, __entry->mode)
99); 99);
100 100
101TRACE_EVENT(ext4_evict_inode,
102 TP_PROTO(struct inode *inode),
103
104 TP_ARGS(inode),
105
106 TP_STRUCT__entry(
107 __field( int, dev_major )
108 __field( int, dev_minor )
109 __field( ino_t, ino )
110 __field( int, nlink )
111 ),
112
113 TP_fast_assign(
114 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
115 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
116 __entry->ino = inode->i_ino;
117 __entry->nlink = inode->i_nlink;
118 ),
119
120 TP_printk("dev %d,%d ino %lu nlink %d",
121 __entry->dev_major, __entry->dev_minor,
122 (unsigned long) __entry->ino, __entry->nlink)
123);
124
125TRACE_EVENT(ext4_drop_inode,
126 TP_PROTO(struct inode *inode, int drop),
127
128 TP_ARGS(inode, drop),
129
130 TP_STRUCT__entry(
131 __field( int, dev_major )
132 __field( int, dev_minor )
133 __field( ino_t, ino )
134 __field( int, drop )
135 ),
136
137 TP_fast_assign(
138 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
139 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
140 __entry->ino = inode->i_ino;
141 __entry->drop = drop;
142 ),
143
144 TP_printk("dev %d,%d ino %lu drop %d",
145 __entry->dev_major, __entry->dev_minor,
146 (unsigned long) __entry->ino, __entry->drop)
147);
148
149TRACE_EVENT(ext4_mark_inode_dirty,
150 TP_PROTO(struct inode *inode, unsigned long IP),
151
152 TP_ARGS(inode, IP),
153
154 TP_STRUCT__entry(
155 __field( int, dev_major )
156 __field( int, dev_minor )
157 __field( ino_t, ino )
158 __field(unsigned long, ip )
159 ),
160
161 TP_fast_assign(
162 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
163 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
164 __entry->ino = inode->i_ino;
165 __entry->ip = IP;
166 ),
167
168 TP_printk("dev %d,%d ino %lu caller %pF",
169 __entry->dev_major, __entry->dev_minor,
170 (unsigned long) __entry->ino, (void *)__entry->ip)
171);
172
173TRACE_EVENT(ext4_begin_ordered_truncate,
174 TP_PROTO(struct inode *inode, loff_t new_size),
175
176 TP_ARGS(inode, new_size),
177
178 TP_STRUCT__entry(
179 __field( int, dev_major )
180 __field( int, dev_minor )
181 __field( ino_t, ino )
182 __field( loff_t, new_size )
183 ),
184
185 TP_fast_assign(
186 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
187 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
188 __entry->ino = inode->i_ino;
189 __entry->new_size = new_size;
190 ),
191
192 TP_printk("dev %d,%d ino %lu new_size %lld",
193 __entry->dev_major, __entry->dev_minor,
194 (unsigned long) __entry->ino,
195 (long long) __entry->new_size)
196);
197
101DECLARE_EVENT_CLASS(ext4__write_begin, 198DECLARE_EVENT_CLASS(ext4__write_begin,
102 199
103 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, 200 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,