aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/page-io.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2013-01-28 09:43:46 -0500
committerTheodore Ts'o <tytso@mit.edu>2013-01-28 09:43:46 -0500
commit84c17543ab5685d950da73209df0ecda26e72d3b (patch)
treec7f2025fa8bcc209168d171c825060ff95a2be97 /fs/ext4/page-io.c
parentfe089c77f1466c74f0f19ad2475b1630216b8b19 (diff)
ext4: move work from io_end to inode
It does not make much sense to have struct work in ext4_io_end_t because we always use it for only one ext4_io_end_t per inode (the first one in the i_completed_io list). So just move the structure to inode itself. This also allows for a small simplification in processing io_end structures. Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/page-io.c')
-rw-r--r--fs/ext4/page-io.c33
1 files changed, 9 insertions, 24 deletions
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 05795f10e55a..a0290176ee75 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -151,16 +151,13 @@ void ext4_add_complete_io(ext4_io_end_t *io_end)
151 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; 151 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
152 152
153 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 153 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
154 if (list_empty(&ei->i_completed_io_list)) { 154 if (list_empty(&ei->i_completed_io_list))
155 io_end->flag |= EXT4_IO_END_QUEUED; 155 queue_work(wq, &ei->i_unwritten_work);
156 queue_work(wq, &io_end->work);
157 }
158 list_add_tail(&io_end->list, &ei->i_completed_io_list); 156 list_add_tail(&io_end->list, &ei->i_completed_io_list);
159 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 157 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
160} 158}
161 159
162static int ext4_do_flush_completed_IO(struct inode *inode, 160static int ext4_do_flush_completed_IO(struct inode *inode)
163 ext4_io_end_t *work_io)
164{ 161{
165 ext4_io_end_t *io; 162 ext4_io_end_t *io;
166 struct list_head unwritten, complete, to_free; 163 struct list_head unwritten, complete, to_free;
@@ -191,19 +188,7 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
191 while (!list_empty(&complete)) { 188 while (!list_empty(&complete)) {
192 io = list_entry(complete.next, ext4_io_end_t, list); 189 io = list_entry(complete.next, ext4_io_end_t, list);
193 io->flag &= ~EXT4_IO_END_UNWRITTEN; 190 io->flag &= ~EXT4_IO_END_UNWRITTEN;
194 /* end_io context can not be destroyed now because it still 191 list_move(&io->list, &to_free);
195 * used by queued worker. Worker thread will destroy it later */
196 if (io->flag & EXT4_IO_END_QUEUED)
197 list_del_init(&io->list);
198 else
199 list_move(&io->list, &to_free);
200 }
201 /* If we are called from worker context, it is time to clear queued
202 * flag, and destroy it's end_io if it was converted already */
203 if (work_io) {
204 work_io->flag &= ~EXT4_IO_END_QUEUED;
205 if (!(work_io->flag & EXT4_IO_END_UNWRITTEN))
206 list_add_tail(&work_io->list, &to_free);
207 } 192 }
208 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 193 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
209 194
@@ -218,10 +203,11 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
218/* 203/*
219 * work on completed aio dio IO, to convert unwritten extents to extents 204 * work on completed aio dio IO, to convert unwritten extents to extents
220 */ 205 */
221static void ext4_end_io_work(struct work_struct *work) 206void ext4_end_io_work(struct work_struct *work)
222{ 207{
223 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); 208 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
224 ext4_do_flush_completed_IO(io->inode, io); 209 i_unwritten_work);
210 ext4_do_flush_completed_IO(&ei->vfs_inode);
225} 211}
226 212
227int ext4_flush_unwritten_io(struct inode *inode) 213int ext4_flush_unwritten_io(struct inode *inode)
@@ -229,7 +215,7 @@ int ext4_flush_unwritten_io(struct inode *inode)
229 int ret; 215 int ret;
230 WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) && 216 WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
231 !(inode->i_state & I_FREEING)); 217 !(inode->i_state & I_FREEING));
232 ret = ext4_do_flush_completed_IO(inode, NULL); 218 ret = ext4_do_flush_completed_IO(inode);
233 ext4_unwritten_wait(inode); 219 ext4_unwritten_wait(inode);
234 return ret; 220 return ret;
235} 221}
@@ -240,7 +226,6 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
240 if (io) { 226 if (io) {
241 atomic_inc(&EXT4_I(inode)->i_ioend_count); 227 atomic_inc(&EXT4_I(inode)->i_ioend_count);
242 io->inode = inode; 228 io->inode = inode;
243 INIT_WORK(&io->work, ext4_end_io_work);
244 INIT_LIST_HEAD(&io->list); 229 INIT_LIST_HEAD(&io->list);
245 } 230 }
246 return io; 231 return io;