aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/fsync.c2
-rw-r--r--fs/ext4/page-io.c9
3 files changed, 10 insertions, 2 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 6481e3ca3528..37e7d8b66c99 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -185,6 +185,7 @@ struct mpage_da_data {
185#define EXT4_IO_END_ERROR 0x0002 185#define EXT4_IO_END_ERROR 0x0002
186#define EXT4_IO_END_QUEUED 0x0004 186#define EXT4_IO_END_QUEUED 0x0004
187#define EXT4_IO_END_DIRECT 0x0008 187#define EXT4_IO_END_DIRECT 0x0008
188#define EXT4_IO_END_IN_FSYNC 0x0010
188 189
189struct ext4_io_page { 190struct ext4_io_page {
190 struct page *p_page; 191 struct page *p_page;
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 00a2cb753efd..bb6c7d811313 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -89,6 +89,7 @@ int ext4_flush_completed_IO(struct inode *inode)
89 io = list_entry(ei->i_completed_io_list.next, 89 io = list_entry(ei->i_completed_io_list.next,
90 ext4_io_end_t, list); 90 ext4_io_end_t, list);
91 list_del_init(&io->list); 91 list_del_init(&io->list);
92 io->flag |= EXT4_IO_END_IN_FSYNC;
92 /* 93 /*
93 * Calling ext4_end_io_nolock() to convert completed 94 * Calling ext4_end_io_nolock() to convert completed
94 * IO to written. 95 * IO to written.
@@ -108,6 +109,7 @@ int ext4_flush_completed_IO(struct inode *inode)
108 if (ret < 0) 109 if (ret < 0)
109 ret2 = ret; 110 ret2 = ret;
110 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 111 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
112 io->flag &= ~EXT4_IO_END_IN_FSYNC;
111 } 113 }
112 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 114 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
113 return (ret2 < 0) ? ret2 : 0; 115 return (ret2 < 0) ? ret2 : 0;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 9e1b8eb1e7ac..dcdeef169a69 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -129,12 +129,18 @@ static void ext4_end_io_work(struct work_struct *work)
129 unsigned long flags; 129 unsigned long flags;
130 130
131 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 131 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
132 if (io->flag & EXT4_IO_END_IN_FSYNC)
133 goto requeue;
132 if (list_empty(&io->list)) { 134 if (list_empty(&io->list)) {
133 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 135 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
134 goto free; 136 goto free;
135 } 137 }
136 138
137 if (!mutex_trylock(&inode->i_mutex)) { 139 if (!mutex_trylock(&inode->i_mutex)) {
140 bool was_queued;
141requeue:
142 was_queued = !!(io->flag & EXT4_IO_END_QUEUED);
143 io->flag |= EXT4_IO_END_QUEUED;
138 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 144 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
139 /* 145 /*
140 * Requeue the work instead of waiting so that the work 146 * Requeue the work instead of waiting so that the work
@@ -147,9 +153,8 @@ static void ext4_end_io_work(struct work_struct *work)
147 * yield the cpu if it sees an end_io request that has already 153 * yield the cpu if it sees an end_io request that has already
148 * been requeued. 154 * been requeued.
149 */ 155 */
150 if (io->flag & EXT4_IO_END_QUEUED) 156 if (was_queued)
151 yield(); 157 yield();
152 io->flag |= EXT4_IO_END_QUEUED;
153 return; 158 return;
154 } 159 }
155 list_del_init(&io->list); 160 list_del_init(&io->list);