diff options
author | Jan Kara <jack@suse.cz> | 2013-06-04 14:21:02 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2013-06-04 14:21:02 -0400 |
commit | 2e8fa54e3b48e4ce8c4e9ca4674ffbc973f58be5 (patch) | |
tree | ef95b6ad8bac51264484db5c37db66b8047b8bd7 /fs/ext4/page-io.c | |
parent | 6b523df4fb5ae281ddbc817f40504b33e6226554 (diff) |
ext4: split extent conversion lists to reserved & unreserved parts
Now that we have extent conversions with reserved transaction, we have
to prevent extent conversions without reserved transaction (from DIO
code) to block these (as that would effectively void any transaction
reservation we did). So split lists, work items, and work queues to
reserved and unreserved parts.
Reviewed-by: Zheng Liu <wenqing.lz@taobao.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/page-io.c')
-rw-r--r-- | fs/ext4/page-io.c | 65 |
1 files changed, 42 insertions, 23 deletions
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 5f20bc481041..bcdfd6bdde06 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -58,8 +58,10 @@ void ext4_ioend_shutdown(struct inode *inode) | |||
58 | * We need to make sure the work structure is finished being | 58 | * We need to make sure the work structure is finished being |
59 | * used before we let the inode get destroyed. | 59 | * used before we let the inode get destroyed. |
60 | */ | 60 | */ |
61 | if (work_pending(&EXT4_I(inode)->i_unwritten_work)) | 61 | if (work_pending(&EXT4_I(inode)->i_rsv_conversion_work)) |
62 | cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); | 62 | cancel_work_sync(&EXT4_I(inode)->i_rsv_conversion_work); |
63 | if (work_pending(&EXT4_I(inode)->i_unrsv_conversion_work)) | ||
64 | cancel_work_sync(&EXT4_I(inode)->i_unrsv_conversion_work); | ||
63 | } | 65 | } |
64 | 66 | ||
65 | static void ext4_release_io_end(ext4_io_end_t *io_end) | 67 | static void ext4_release_io_end(ext4_io_end_t *io_end) |
@@ -114,20 +116,17 @@ static int ext4_end_io(ext4_io_end_t *io) | |||
114 | return ret; | 116 | return ret; |
115 | } | 117 | } |
116 | 118 | ||
117 | static void dump_completed_IO(struct inode *inode) | 119 | static void dump_completed_IO(struct inode *inode, struct list_head *head) |
118 | { | 120 | { |
119 | #ifdef EXT4FS_DEBUG | 121 | #ifdef EXT4FS_DEBUG |
120 | struct list_head *cur, *before, *after; | 122 | struct list_head *cur, *before, *after; |
121 | ext4_io_end_t *io, *io0, *io1; | 123 | ext4_io_end_t *io, *io0, *io1; |
122 | 124 | ||
123 | if (list_empty(&EXT4_I(inode)->i_completed_io_list)) { | 125 | if (list_empty(head)) |
124 | ext4_debug("inode %lu completed_io list is empty\n", | ||
125 | inode->i_ino); | ||
126 | return; | 126 | return; |
127 | } | ||
128 | 127 | ||
129 | ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino); | 128 | ext4_debug("Dump inode %lu completed io list\n", inode->i_ino); |
130 | list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) { | 129 | list_for_each_entry(io, head, list) { |
131 | cur = &io->list; | 130 | cur = &io->list; |
132 | before = cur->prev; | 131 | before = cur->prev; |
133 | io0 = container_of(before, ext4_io_end_t, list); | 132 | io0 = container_of(before, ext4_io_end_t, list); |
@@ -148,16 +147,23 @@ static void ext4_add_complete_io(ext4_io_end_t *io_end) | |||
148 | unsigned long flags; | 147 | unsigned long flags; |
149 | 148 | ||
150 | BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); | 149 | BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); |
151 | wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; | ||
152 | |||
153 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | 150 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
154 | if (list_empty(&ei->i_completed_io_list)) | 151 | if (io_end->handle) { |
155 | queue_work(wq, &ei->i_unwritten_work); | 152 | wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq; |
156 | list_add_tail(&io_end->list, &ei->i_completed_io_list); | 153 | if (list_empty(&ei->i_rsv_conversion_list)) |
154 | queue_work(wq, &ei->i_rsv_conversion_work); | ||
155 | list_add_tail(&io_end->list, &ei->i_rsv_conversion_list); | ||
156 | } else { | ||
157 | wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq; | ||
158 | if (list_empty(&ei->i_unrsv_conversion_list)) | ||
159 | queue_work(wq, &ei->i_unrsv_conversion_work); | ||
160 | list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list); | ||
161 | } | ||
157 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | 162 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
158 | } | 163 | } |
159 | 164 | ||
160 | static int ext4_do_flush_completed_IO(struct inode *inode) | 165 | static int ext4_do_flush_completed_IO(struct inode *inode, |
166 | struct list_head *head) | ||
161 | { | 167 | { |
162 | ext4_io_end_t *io; | 168 | ext4_io_end_t *io; |
163 | struct list_head unwritten; | 169 | struct list_head unwritten; |
@@ -166,8 +172,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode) | |||
166 | int err, ret = 0; | 172 | int err, ret = 0; |
167 | 173 | ||
168 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | 174 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
169 | dump_completed_IO(inode); | 175 | dump_completed_IO(inode, head); |
170 | list_replace_init(&ei->i_completed_io_list, &unwritten); | 176 | list_replace_init(head, &unwritten); |
171 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | 177 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
172 | 178 | ||
173 | while (!list_empty(&unwritten)) { | 179 | while (!list_empty(&unwritten)) { |
@@ -183,21 +189,34 @@ static int ext4_do_flush_completed_IO(struct inode *inode) | |||
183 | } | 189 | } |
184 | 190 | ||
185 | /* | 191 | /* |
186 | * work on completed aio dio IO, to convert unwritten extents to extents | 192 | * work on completed IO, to convert unwritten extents to extents |
187 | */ | 193 | */ |
188 | void ext4_end_io_work(struct work_struct *work) | 194 | void ext4_end_io_rsv_work(struct work_struct *work) |
189 | { | 195 | { |
190 | struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, | 196 | struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, |
191 | i_unwritten_work); | 197 | i_rsv_conversion_work); |
192 | ext4_do_flush_completed_IO(&ei->vfs_inode); | 198 | ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list); |
199 | } | ||
200 | |||
201 | void ext4_end_io_unrsv_work(struct work_struct *work) | ||
202 | { | ||
203 | struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, | ||
204 | i_unrsv_conversion_work); | ||
205 | ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list); | ||
193 | } | 206 | } |
194 | 207 | ||
195 | int ext4_flush_unwritten_io(struct inode *inode) | 208 | int ext4_flush_unwritten_io(struct inode *inode) |
196 | { | 209 | { |
197 | int ret; | 210 | int ret, err; |
211 | |||
198 | WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) && | 212 | WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) && |
199 | !(inode->i_state & I_FREEING)); | 213 | !(inode->i_state & I_FREEING)); |
200 | ret = ext4_do_flush_completed_IO(inode); | 214 | ret = ext4_do_flush_completed_IO(inode, |
215 | &EXT4_I(inode)->i_rsv_conversion_list); | ||
216 | err = ext4_do_flush_completed_IO(inode, | ||
217 | &EXT4_I(inode)->i_unrsv_conversion_list); | ||
218 | if (!ret) | ||
219 | ret = err; | ||
201 | ext4_unwritten_wait(inode); | 220 | ext4_unwritten_wait(inode); |
202 | return ret; | 221 | return ret; |
203 | } | 222 | } |