aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorTao Ma <boyu.mt@taobao.com>2011-10-30 18:26:08 -0400
committerTheodore Ts'o <tytso@mit.edu>2011-10-30 18:26:08 -0400
commitd73d5046a72467d4510825b99e2269e09ad80e15 (patch)
tree4ac8b5729e1aeffe8348557a7f77da1791c268cc /fs/ext4
parent6d6a435190bdf2e04c9465cde5bdc3ac68cf11a4 (diff)
ext4: Use correct locking for ext4_end_io_nolock()
We must hold i_completed_io_lock when manipulating anything on the i_completed_io_list linked list. This includes io->lock, which we were checking in ext4_end_io_nolock(). So move this check to ext4_end_io_work(). This also has the bonus of avoiding extra work if it is already done without needing to take the mutex. Signed-off-by: Tao Ma <boyu.mt@taobao.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/fsync.c3
-rw-r--r--fs/ext4/page-io.c14
2 files changed, 11 insertions, 6 deletions
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index c942924a0645..851ac5b3cec9 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -83,9 +83,6 @@ int ext4_flush_completed_IO(struct inode *inode)
83 int ret = 0; 83 int ret = 0;
84 int ret2 = 0; 84 int ret2 = 0;
85 85
86 if (list_empty(&ei->i_completed_io_list))
87 return ret;
88
89 dump_completed_IO(inode); 86 dump_completed_IO(inode);
90 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 87 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
91 while (!list_empty(&ei->i_completed_io_list)){ 88 while (!list_empty(&ei->i_completed_io_list)){
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 92f38ee13f8a..aed40966f342 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -87,6 +87,9 @@ void ext4_free_io_end(ext4_io_end_t *io)
87 87
88/* 88/*
89 * check a range of space and convert unwritten extents to written. 89 * check a range of space and convert unwritten extents to written.
90 *
91 * Called with inode->i_mutex; we depend on this when we manipulate
92 * io->flag, since we could otherwise race with ext4_flush_completed_IO()
90 */ 93 */
91int ext4_end_io_nolock(ext4_io_end_t *io) 94int ext4_end_io_nolock(ext4_io_end_t *io)
92{ 95{
@@ -100,9 +103,6 @@ int ext4_end_io_nolock(ext4_io_end_t *io)
100 "list->prev 0x%p\n", 103 "list->prev 0x%p\n",
101 io, inode->i_ino, io->list.next, io->list.prev); 104 io, inode->i_ino, io->list.next, io->list.prev);
102 105
103 if (list_empty(&io->list))
104 return ret;
105
106 if (!(io->flag & EXT4_IO_END_UNWRITTEN)) 106 if (!(io->flag & EXT4_IO_END_UNWRITTEN))
107 return ret; 107 return ret;
108 108
@@ -142,6 +142,13 @@ static void ext4_end_io_work(struct work_struct *work)
142 unsigned long flags; 142 unsigned long flags;
143 int ret; 143 int ret;
144 144
145 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
146 if (list_empty(&io->list)) {
147 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
148 goto free;
149 }
150 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
151
145 if (!mutex_trylock(&inode->i_mutex)) { 152 if (!mutex_trylock(&inode->i_mutex)) {
146 /* 153 /*
147 * Requeue the work instead of waiting so that the work 154 * Requeue the work instead of waiting so that the work
@@ -170,6 +177,7 @@ static void ext4_end_io_work(struct work_struct *work)
170 list_del_init(&io->list); 177 list_del_init(&io->list);
171 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 178 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
172 mutex_unlock(&inode->i_mutex); 179 mutex_unlock(&inode->i_mutex);
180free:
173 ext4_free_io_end(io); 181 ext4_free_io_end(io);
174} 182}
175 183