diff options
author | Christoph Hellwig <hch@infradead.org> | 2013-09-04 09:04:39 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2013-09-04 09:23:46 -0400 |
commit | 7b7a8665edd8db733980389b098530f9e4f630b2 (patch) | |
tree | 968d570a9f0c4d861226aefed2f5f97a131c8d53 /fs/ext4 | |
parent | 4b6ccca701ef5977d0ffbc2c932430dea88b38b6 (diff) |
direct-io: Implement generic deferred AIO completions
Add support to the core direct-io code to defer AIO completions to user
context using a workqueue. This replaces opencoded and less efficient
code in XFS and ext4 (we save a memory allocation for each direct IO)
and will be needed to properly support O_(D)SYNC for AIO.
The communication between the filesystem and the direct I/O code requires
a new buffer head flag, which is a bit ugly but not avoidable until the
direct I/O code stops abusing the buffer_head structure for communicating
with the filesystems.
Currently this creates a per-superblock unbound workqueue for these
completions, which is taken from an earlier patch by Jan Kara. I'm
not really convinced about this use and would prefer a "normal" global
workqueue with a high concurrency limit, but this needs further discussion.
JK: Fixed ext4 part, dynamic allocation of the workqueue.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/ext4')
-rw-r--r-- | fs/ext4/ext4.h | 11 | ||||
-rw-r--r-- | fs/ext4/inode.c | 28 | ||||
-rw-r--r-- | fs/ext4/page-io.c | 30 | ||||
-rw-r--r-- | fs/ext4/super.c | 16 |
4 files changed, 14 insertions, 71 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0ab26fbf3380..b247fbbed99c 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -180,7 +180,6 @@ struct ext4_map_blocks { | |||
180 | * Flags for ext4_io_end->flags | 180 | * Flags for ext4_io_end->flags |
181 | */ | 181 | */ |
182 | #define EXT4_IO_END_UNWRITTEN 0x0001 | 182 | #define EXT4_IO_END_UNWRITTEN 0x0001 |
183 | #define EXT4_IO_END_DIRECT 0x0002 | ||
184 | 183 | ||
185 | /* | 184 | /* |
186 | * For converting uninitialized extents on a work queue. 'handle' is used for | 185 | * For converting uninitialized extents on a work queue. 'handle' is used for |
@@ -196,8 +195,6 @@ typedef struct ext4_io_end { | |||
196 | unsigned int flag; /* unwritten or not */ | 195 | unsigned int flag; /* unwritten or not */ |
197 | loff_t offset; /* offset in the file */ | 196 | loff_t offset; /* offset in the file */ |
198 | ssize_t size; /* size of the extent */ | 197 | ssize_t size; /* size of the extent */ |
199 | struct kiocb *iocb; /* iocb struct for AIO */ | ||
200 | int result; /* error value for AIO */ | ||
201 | atomic_t count; /* reference counter */ | 198 | atomic_t count; /* reference counter */ |
202 | } ext4_io_end_t; | 199 | } ext4_io_end_t; |
203 | 200 | ||
@@ -900,11 +897,9 @@ struct ext4_inode_info { | |||
900 | * Completed IOs that need unwritten extents handling and don't have | 897 | * Completed IOs that need unwritten extents handling and don't have |
901 | * transaction reserved | 898 | * transaction reserved |
902 | */ | 899 | */ |
903 | struct list_head i_unrsv_conversion_list; | ||
904 | atomic_t i_ioend_count; /* Number of outstanding io_end structs */ | 900 | atomic_t i_ioend_count; /* Number of outstanding io_end structs */ |
905 | atomic_t i_unwritten; /* Nr. of inflight conversions pending */ | 901 | atomic_t i_unwritten; /* Nr. of inflight conversions pending */ |
906 | struct work_struct i_rsv_conversion_work; | 902 | struct work_struct i_rsv_conversion_work; |
907 | struct work_struct i_unrsv_conversion_work; | ||
908 | 903 | ||
909 | spinlock_t i_block_reservation_lock; | 904 | spinlock_t i_block_reservation_lock; |
910 | 905 | ||
@@ -1276,8 +1271,6 @@ struct ext4_sb_info { | |||
1276 | struct flex_groups *s_flex_groups; | 1271 | struct flex_groups *s_flex_groups; |
1277 | ext4_group_t s_flex_groups_allocated; | 1272 | ext4_group_t s_flex_groups_allocated; |
1278 | 1273 | ||
1279 | /* workqueue for unreserved extent convertions (dio) */ | ||
1280 | struct workqueue_struct *unrsv_conversion_wq; | ||
1281 | /* workqueue for reserved extent conversions (buffered io) */ | 1274 | /* workqueue for reserved extent conversions (buffered io) */ |
1282 | struct workqueue_struct *rsv_conversion_wq; | 1275 | struct workqueue_struct *rsv_conversion_wq; |
1283 | 1276 | ||
@@ -1340,9 +1333,6 @@ static inline void ext4_set_io_unwritten_flag(struct inode *inode, | |||
1340 | struct ext4_io_end *io_end) | 1333 | struct ext4_io_end *io_end) |
1341 | { | 1334 | { |
1342 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { | 1335 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { |
1343 | /* Writeback has to have coversion transaction reserved */ | ||
1344 | WARN_ON(EXT4_SB(inode->i_sb)->s_journal && !io_end->handle && | ||
1345 | !(io_end->flag & EXT4_IO_END_DIRECT)); | ||
1346 | io_end->flag |= EXT4_IO_END_UNWRITTEN; | 1336 | io_end->flag |= EXT4_IO_END_UNWRITTEN; |
1347 | atomic_inc(&EXT4_I(inode)->i_unwritten); | 1337 | atomic_inc(&EXT4_I(inode)->i_unwritten); |
1348 | } | 1338 | } |
@@ -2716,7 +2706,6 @@ extern void ext4_put_io_end_defer(ext4_io_end_t *io_end); | |||
2716 | extern void ext4_io_submit_init(struct ext4_io_submit *io, | 2706 | extern void ext4_io_submit_init(struct ext4_io_submit *io, |
2717 | struct writeback_control *wbc); | 2707 | struct writeback_control *wbc); |
2718 | extern void ext4_end_io_rsv_work(struct work_struct *work); | 2708 | extern void ext4_end_io_rsv_work(struct work_struct *work); |
2719 | extern void ext4_end_io_unrsv_work(struct work_struct *work); | ||
2720 | extern void ext4_io_submit(struct ext4_io_submit *io); | 2709 | extern void ext4_io_submit(struct ext4_io_submit *io); |
2721 | extern int ext4_bio_write_page(struct ext4_io_submit *io, | 2710 | extern int ext4_bio_write_page(struct ext4_io_submit *io, |
2722 | struct page *page, | 2711 | struct page *page, |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c2ca04e67a4f..123bd81692d1 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -727,8 +727,12 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock, | |||
727 | 727 | ||
728 | ret = ext4_map_blocks(handle, inode, &map, flags); | 728 | ret = ext4_map_blocks(handle, inode, &map, flags); |
729 | if (ret > 0) { | 729 | if (ret > 0) { |
730 | ext4_io_end_t *io_end = ext4_inode_aio(inode); | ||
731 | |||
730 | map_bh(bh, inode->i_sb, map.m_pblk); | 732 | map_bh(bh, inode->i_sb, map.m_pblk); |
731 | bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; | 733 | bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; |
734 | if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN) | ||
735 | set_buffer_defer_completion(bh); | ||
732 | bh->b_size = inode->i_sb->s_blocksize * map.m_len; | 736 | bh->b_size = inode->i_sb->s_blocksize * map.m_len; |
733 | ret = 0; | 737 | ret = 0; |
734 | } | 738 | } |
@@ -2991,19 +2995,13 @@ static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, | |||
2991 | } | 2995 | } |
2992 | 2996 | ||
2993 | static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | 2997 | static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, |
2994 | ssize_t size, void *private, int ret, | 2998 | ssize_t size, void *private) |
2995 | bool is_async) | ||
2996 | { | 2999 | { |
2997 | struct inode *inode = file_inode(iocb->ki_filp); | ||
2998 | ext4_io_end_t *io_end = iocb->private; | 3000 | ext4_io_end_t *io_end = iocb->private; |
2999 | 3001 | ||
3000 | /* if not async direct IO just return */ | 3002 | /* if not async direct IO just return */ |
3001 | if (!io_end) { | 3003 | if (!io_end) |
3002 | inode_dio_done(inode); | ||
3003 | if (is_async) | ||
3004 | aio_complete(iocb, ret, 0); | ||
3005 | return; | 3004 | return; |
3006 | } | ||
3007 | 3005 | ||
3008 | ext_debug("ext4_end_io_dio(): io_end 0x%p " | 3006 | ext_debug("ext4_end_io_dio(): io_end 0x%p " |
3009 | "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", | 3007 | "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", |
@@ -3013,11 +3011,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
3013 | iocb->private = NULL; | 3011 | iocb->private = NULL; |
3014 | io_end->offset = offset; | 3012 | io_end->offset = offset; |
3015 | io_end->size = size; | 3013 | io_end->size = size; |
3016 | if (is_async) { | 3014 | ext4_put_io_end(io_end); |
3017 | io_end->iocb = iocb; | ||
3018 | io_end->result = ret; | ||
3019 | } | ||
3020 | ext4_put_io_end_defer(io_end); | ||
3021 | } | 3015 | } |
3022 | 3016 | ||
3023 | /* | 3017 | /* |
@@ -3102,7 +3096,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
3102 | ret = -ENOMEM; | 3096 | ret = -ENOMEM; |
3103 | goto retake_lock; | 3097 | goto retake_lock; |
3104 | } | 3098 | } |
3105 | io_end->flag |= EXT4_IO_END_DIRECT; | ||
3106 | /* | 3099 | /* |
3107 | * Grab reference for DIO. Will be dropped in ext4_end_io_dio() | 3100 | * Grab reference for DIO. Will be dropped in ext4_end_io_dio() |
3108 | */ | 3101 | */ |
@@ -3147,13 +3140,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
3147 | if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) { | 3140 | if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) { |
3148 | WARN_ON(iocb->private != io_end); | 3141 | WARN_ON(iocb->private != io_end); |
3149 | WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); | 3142 | WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); |
3150 | WARN_ON(io_end->iocb); | ||
3151 | /* | ||
3152 | * Generic code already did inode_dio_done() so we | ||
3153 | * have to clear EXT4_IO_END_DIRECT to not do it for | ||
3154 | * the second time. | ||
3155 | */ | ||
3156 | io_end->flag = 0; | ||
3157 | ext4_put_io_end(io_end); | 3143 | ext4_put_io_end(io_end); |
3158 | iocb->private = NULL; | 3144 | iocb->private = NULL; |
3159 | } | 3145 | } |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 6625d210fb45..d7d0c7b46ed4 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -123,10 +123,6 @@ static void ext4_release_io_end(ext4_io_end_t *io_end) | |||
123 | ext4_finish_bio(bio); | 123 | ext4_finish_bio(bio); |
124 | bio_put(bio); | 124 | bio_put(bio); |
125 | } | 125 | } |
126 | if (io_end->flag & EXT4_IO_END_DIRECT) | ||
127 | inode_dio_done(io_end->inode); | ||
128 | if (io_end->iocb) | ||
129 | aio_complete(io_end->iocb, io_end->result, 0); | ||
130 | kmem_cache_free(io_end_cachep, io_end); | 126 | kmem_cache_free(io_end_cachep, io_end); |
131 | } | 127 | } |
132 | 128 | ||
@@ -204,19 +200,14 @@ static void ext4_add_complete_io(ext4_io_end_t *io_end) | |||
204 | struct workqueue_struct *wq; | 200 | struct workqueue_struct *wq; |
205 | unsigned long flags; | 201 | unsigned long flags; |
206 | 202 | ||
207 | BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); | 203 | /* Only reserved conversions from writeback should enter here */ |
204 | WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); | ||
205 | WARN_ON(!io_end->handle); | ||
208 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | 206 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
209 | if (io_end->handle) { | 207 | wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq; |
210 | wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq; | 208 | if (list_empty(&ei->i_rsv_conversion_list)) |
211 | if (list_empty(&ei->i_rsv_conversion_list)) | 209 | queue_work(wq, &ei->i_rsv_conversion_work); |
212 | queue_work(wq, &ei->i_rsv_conversion_work); | 210 | list_add_tail(&io_end->list, &ei->i_rsv_conversion_list); |
213 | list_add_tail(&io_end->list, &ei->i_rsv_conversion_list); | ||
214 | } else { | ||
215 | wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq; | ||
216 | if (list_empty(&ei->i_unrsv_conversion_list)) | ||
217 | queue_work(wq, &ei->i_unrsv_conversion_work); | ||
218 | list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list); | ||
219 | } | ||
220 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | 211 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
221 | } | 212 | } |
222 | 213 | ||
@@ -256,13 +247,6 @@ void ext4_end_io_rsv_work(struct work_struct *work) | |||
256 | ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list); | 247 | ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list); |
257 | } | 248 | } |
258 | 249 | ||
259 | void ext4_end_io_unrsv_work(struct work_struct *work) | ||
260 | { | ||
261 | struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, | ||
262 | i_unrsv_conversion_work); | ||
263 | ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list); | ||
264 | } | ||
265 | |||
266 | ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) | 250 | ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) |
267 | { | 251 | { |
268 | ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags); | 252 | ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b59373b625e9..5db4f0df8174 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -762,9 +762,7 @@ static void ext4_put_super(struct super_block *sb) | |||
762 | ext4_unregister_li_request(sb); | 762 | ext4_unregister_li_request(sb); |
763 | dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); | 763 | dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); |
764 | 764 | ||
765 | flush_workqueue(sbi->unrsv_conversion_wq); | ||
766 | flush_workqueue(sbi->rsv_conversion_wq); | 765 | flush_workqueue(sbi->rsv_conversion_wq); |
767 | destroy_workqueue(sbi->unrsv_conversion_wq); | ||
768 | destroy_workqueue(sbi->rsv_conversion_wq); | 766 | destroy_workqueue(sbi->rsv_conversion_wq); |
769 | 767 | ||
770 | if (sbi->s_journal) { | 768 | if (sbi->s_journal) { |
@@ -875,14 +873,12 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | |||
875 | #endif | 873 | #endif |
876 | ei->jinode = NULL; | 874 | ei->jinode = NULL; |
877 | INIT_LIST_HEAD(&ei->i_rsv_conversion_list); | 875 | INIT_LIST_HEAD(&ei->i_rsv_conversion_list); |
878 | INIT_LIST_HEAD(&ei->i_unrsv_conversion_list); | ||
879 | spin_lock_init(&ei->i_completed_io_lock); | 876 | spin_lock_init(&ei->i_completed_io_lock); |
880 | ei->i_sync_tid = 0; | 877 | ei->i_sync_tid = 0; |
881 | ei->i_datasync_tid = 0; | 878 | ei->i_datasync_tid = 0; |
882 | atomic_set(&ei->i_ioend_count, 0); | 879 | atomic_set(&ei->i_ioend_count, 0); |
883 | atomic_set(&ei->i_unwritten, 0); | 880 | atomic_set(&ei->i_unwritten, 0); |
884 | INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); | 881 | INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); |
885 | INIT_WORK(&ei->i_unrsv_conversion_work, ext4_end_io_unrsv_work); | ||
886 | 882 | ||
887 | return &ei->vfs_inode; | 883 | return &ei->vfs_inode; |
888 | } | 884 | } |
@@ -3954,14 +3950,6 @@ no_journal: | |||
3954 | goto failed_mount4; | 3950 | goto failed_mount4; |
3955 | } | 3951 | } |
3956 | 3952 | ||
3957 | EXT4_SB(sb)->unrsv_conversion_wq = | ||
3958 | alloc_workqueue("ext4-unrsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); | ||
3959 | if (!EXT4_SB(sb)->unrsv_conversion_wq) { | ||
3960 | printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); | ||
3961 | ret = -ENOMEM; | ||
3962 | goto failed_mount4; | ||
3963 | } | ||
3964 | |||
3965 | /* | 3953 | /* |
3966 | * The jbd2_journal_load will have done any necessary log recovery, | 3954 | * The jbd2_journal_load will have done any necessary log recovery, |
3967 | * so we can safely mount the rest of the filesystem now. | 3955 | * so we can safely mount the rest of the filesystem now. |
@@ -4115,8 +4103,6 @@ failed_mount4: | |||
4115 | ext4_msg(sb, KERN_ERR, "mount failed"); | 4103 | ext4_msg(sb, KERN_ERR, "mount failed"); |
4116 | if (EXT4_SB(sb)->rsv_conversion_wq) | 4104 | if (EXT4_SB(sb)->rsv_conversion_wq) |
4117 | destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); | 4105 | destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); |
4118 | if (EXT4_SB(sb)->unrsv_conversion_wq) | ||
4119 | destroy_workqueue(EXT4_SB(sb)->unrsv_conversion_wq); | ||
4120 | failed_mount_wq: | 4106 | failed_mount_wq: |
4121 | if (sbi->s_journal) { | 4107 | if (sbi->s_journal) { |
4122 | jbd2_journal_destroy(sbi->s_journal); | 4108 | jbd2_journal_destroy(sbi->s_journal); |
@@ -4564,7 +4550,6 @@ static int ext4_sync_fs(struct super_block *sb, int wait) | |||
4564 | 4550 | ||
4565 | trace_ext4_sync_fs(sb, wait); | 4551 | trace_ext4_sync_fs(sb, wait); |
4566 | flush_workqueue(sbi->rsv_conversion_wq); | 4552 | flush_workqueue(sbi->rsv_conversion_wq); |
4567 | flush_workqueue(sbi->unrsv_conversion_wq); | ||
4568 | /* | 4553 | /* |
4569 | * Writeback quota in non-journalled quota case - journalled quota has | 4554 | * Writeback quota in non-journalled quota case - journalled quota has |
4570 | * no dirty dquots | 4555 | * no dirty dquots |
@@ -4600,7 +4585,6 @@ static int ext4_sync_fs_nojournal(struct super_block *sb, int wait) | |||
4600 | 4585 | ||
4601 | trace_ext4_sync_fs(sb, wait); | 4586 | trace_ext4_sync_fs(sb, wait); |
4602 | flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq); | 4587 | flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq); |
4603 | flush_workqueue(EXT4_SB(sb)->unrsv_conversion_wq); | ||
4604 | dquot_writeback_dquots(sb, -1); | 4588 | dquot_writeback_dquots(sb, -1); |
4605 | if (wait && test_opt(sb, BARRIER)) | 4589 | if (wait && test_opt(sb, BARRIER)) |
4606 | ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); | 4590 | ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); |