summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2017-06-30 08:47:15 -0400
committerBob Peterson <rpeterso@redhat.com>2017-07-05 08:20:24 -0400
commit4fd1a5795214bc6405f14691c1344ae8c3f17215 (patch)
treeac0d1a48b243c6e27b40460503bc99290fa3c564
parent722f6f62a563108dc0f311bd86120b8fbfa0c6df (diff)
gfs2: Get rid of flush_delayed_work in gfs2_evict_inode
So far, gfs2_evict_inode clears gl->gl_object and then flushes the glock work queue to make sure that inode glops which dereference gl->gl_object have finished running before the inode is destroyed. However, flushing the work queue may do more work than needed, and in particular, it may call into DLM, which we want to avoid here. Use a bit lock (GIF_GLOP_PENDING) to synchronize between the inode glops and gfs2_evict_inode instead to get rid of the flushing. In addition, flush the work queues of existing glocks before reusing them for new inodes to get those glocks into a known state: the glock state engine currently doesn't handle glock re-appropriation correctly. (We may be able to fix the glock state engine instead later.) Based on a patch by Steven Whitehouse <swhiteho@redhat.com>. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> Signed-off-by: Bob Peterson <rpeterso@redhat.com>
-rw-r--r--fs/gfs2/glock.h7
-rw-r--r--fs/gfs2/glops.c39
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/inode.c7
-rw-r--r--fs/gfs2/super.c4
5 files changed, 46 insertions, 12 deletions
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index ab1ef322f7a5..9ad4a6ac6c84 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -257,4 +257,11 @@ static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
257 return gh->gh_gl; 257 return gh->gh_gl;
258} 258}
259 259
260static inline void glock_set_object(struct gfs2_glock *gl, void *object)
261{
262 spin_lock(&gl->gl_lockref.lock);
263 gl->gl_object = object;
264 spin_unlock(&gl->gl_lockref.lock);
265}
266
260#endif /* __GLOCK_DOT_H__ */ 267#endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 5db59d444838..7449b19135c3 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -197,6 +197,27 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
197 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 197 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
198} 198}
199 199
200static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
201{
202 struct gfs2_inode *ip;
203
204 spin_lock(&gl->gl_lockref.lock);
205 ip = gl->gl_object;
206 if (ip)
207 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
208 spin_unlock(&gl->gl_lockref.lock);
209 return ip;
210}
211
212static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
213{
214 if (!ip)
215 return;
216
217 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
218 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
219}
220
200/** 221/**
201 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 222 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
202 * @gl: the glock protecting the inode 223 * @gl: the glock protecting the inode
@@ -205,25 +226,24 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
205 226
206static void inode_go_sync(struct gfs2_glock *gl) 227static void inode_go_sync(struct gfs2_glock *gl)
207{ 228{
208 struct gfs2_inode *ip = gl->gl_object; 229 struct gfs2_inode *ip = gfs2_glock2inode(gl);
230 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
209 struct address_space *metamapping = gfs2_glock2aspace(gl); 231 struct address_space *metamapping = gfs2_glock2aspace(gl);
210 int error; 232 int error;
211 233
212 if (ip && !S_ISREG(ip->i_inode.i_mode)) 234 if (isreg) {
213 ip = NULL;
214 if (ip) {
215 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 235 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
216 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 236 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
217 inode_dio_wait(&ip->i_inode); 237 inode_dio_wait(&ip->i_inode);
218 } 238 }
219 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 239 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
220 return; 240 goto out;
221 241
222 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 242 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
223 243
224 gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH); 244 gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
225 filemap_fdatawrite(metamapping); 245 filemap_fdatawrite(metamapping);
226 if (ip) { 246 if (isreg) {
227 struct address_space *mapping = ip->i_inode.i_mapping; 247 struct address_space *mapping = ip->i_inode.i_mapping;
228 filemap_fdatawrite(mapping); 248 filemap_fdatawrite(mapping);
229 error = filemap_fdatawait(mapping); 249 error = filemap_fdatawait(mapping);
@@ -238,6 +258,9 @@ static void inode_go_sync(struct gfs2_glock *gl)
238 */ 258 */
239 smp_mb__before_atomic(); 259 smp_mb__before_atomic();
240 clear_bit(GLF_DIRTY, &gl->gl_flags); 260 clear_bit(GLF_DIRTY, &gl->gl_flags);
261
262out:
263 gfs2_clear_glop_pending(ip);
241} 264}
242 265
243/** 266/**
@@ -253,7 +276,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
253 276
254static void inode_go_inval(struct gfs2_glock *gl, int flags) 277static void inode_go_inval(struct gfs2_glock *gl, int flags)
255{ 278{
256 struct gfs2_inode *ip = gl->gl_object; 279 struct gfs2_inode *ip = gfs2_glock2inode(gl);
257 280
258 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); 281 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
259 282
@@ -274,6 +297,8 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
274 } 297 }
275 if (ip && S_ISREG(ip->i_inode.i_mode)) 298 if (ip && S_ISREG(ip->i_inode.i_mode))
276 truncate_inode_pages(ip->i_inode.i_mapping, 0); 299 truncate_inode_pages(ip->i_inode.i_mapping, 0);
300
301 gfs2_clear_glop_pending(ip);
277} 302}
278 303
279/** 304/**
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 638c0203f242..01af34cb589d 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -385,6 +385,7 @@ enum {
385 GIF_SW_PAGED = 3, 385 GIF_SW_PAGED = 3,
386 GIF_ORDERED = 4, 386 GIF_ORDERED = 4,
387 GIF_FREE_VFS_INODE = 5, 387 GIF_FREE_VFS_INODE = 5,
388 GIF_GLOP_PENDING = 6,
388}; 389};
389 390
390struct gfs2_inode { 391struct gfs2_inode {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 9f605ea4810c..912d4e66fabc 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -144,7 +144,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
144 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); 144 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
145 if (unlikely(error)) 145 if (unlikely(error))
146 goto fail; 146 goto fail;
147 ip->i_gl->gl_object = ip; 147 flush_delayed_work(&ip->i_gl->gl_work);
148 glock_set_object(ip->i_gl, ip);
148 149
149 error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); 150 error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
150 if (unlikely(error)) 151 if (unlikely(error))
@@ -173,8 +174,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
173 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); 174 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
174 if (unlikely(error)) 175 if (unlikely(error))
175 goto fail_put; 176 goto fail_put;
176 177 flush_delayed_work(&ip->i_iopen_gh.gh_gl->gl_work);
177 ip->i_iopen_gh.gh_gl->gl_object = ip; 178 glock_set_object(ip->i_iopen_gh.gh_gl, ip);
178 gfs2_glock_put(io_gl); 179 gfs2_glock_put(io_gl);
179 io_gl = NULL; 180 io_gl = NULL;
180 181
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 29b0473f6e74..7d12c1232c42 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1631,8 +1631,8 @@ out:
1631 gfs2_ordered_del_inode(ip); 1631 gfs2_ordered_del_inode(ip);
1632 clear_inode(inode); 1632 clear_inode(inode);
1633 gfs2_dir_hash_inval(ip); 1633 gfs2_dir_hash_inval(ip);
1634 ip->i_gl->gl_object = NULL; 1634 glock_set_object(ip->i_gl, NULL);
1635 flush_delayed_work(&ip->i_gl->gl_work); 1635 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1636 gfs2_glock_add_to_lru(ip->i_gl); 1636 gfs2_glock_add_to_lru(ip->i_gl);
1637 gfs2_glock_put(ip->i_gl); 1637 gfs2_glock_put(ip->i_gl);
1638 ip->i_gl = NULL; 1638 ip->i_gl = NULL;