diff options
author | Andreas Gruenbacher <agruenba@redhat.com> | 2017-06-30 08:55:08 -0400 |
---|---|---|
committer | Bob Peterson <rpeterso@redhat.com> | 2017-07-05 08:20:52 -0400 |
commit | 6f6597baae206c544c49ad7f1129d5adc1e9019d (patch) | |
tree | 3bcfcb1ebf6713cdd95f4146012c236e3d6f5fbf | |
parent | 4fd1a5795214bc6405f14691c1344ae8c3f17215 (diff) |
gfs2: Protect gl->gl_object by spin lock
Put all remaining accesses to gl->gl_object under the
gl->gl_lockref.lock spinlock to prevent races.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
-rw-r--r-- | fs/gfs2/bmap.c | 2 | ||||
-rw-r--r-- | fs/gfs2/dir.c | 4 | ||||
-rw-r--r-- | fs/gfs2/glops.c | 17 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 2 | ||||
-rw-r--r-- | fs/gfs2/inode.c | 8 | ||||
-rw-r--r-- | fs/gfs2/lops.c | 2 | ||||
-rw-r--r-- | fs/gfs2/rgrp.c | 6 | ||||
-rw-r--r-- | fs/gfs2/super.c | 11 | ||||
-rw-r--r-- | fs/gfs2/xattr.c | 4 |
9 files changed, 35 insertions, 21 deletions
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 3814a60e0aea..56e2943ff994 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
@@ -970,7 +970,7 @@ more_rgrps: | |||
970 | continue; | 970 | continue; |
971 | bn = be64_to_cpu(*p); | 971 | bn = be64_to_cpu(*p); |
972 | if (gfs2_holder_initialized(rd_gh)) { | 972 | if (gfs2_holder_initialized(rd_gh)) { |
973 | rgd = (struct gfs2_rgrpd *)rd_gh->gh_gl->gl_object; | 973 | rgd = gfs2_glock2rgrp(rd_gh->gh_gl); |
974 | gfs2_assert_withdraw(sdp, | 974 | gfs2_assert_withdraw(sdp, |
975 | gfs2_glock_is_locked_by_me(rd_gh->gh_gl)); | 975 | gfs2_glock_is_locked_by_me(rd_gh->gh_gl)); |
976 | } else { | 976 | } else { |
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 96a7487b09b6..db427658ccd9 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c | |||
@@ -2032,8 +2032,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, | |||
2032 | gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); | 2032 | gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); |
2033 | 2033 | ||
2034 | for (x = 0; x < rlist.rl_rgrps; x++) { | 2034 | for (x = 0; x < rlist.rl_rgrps; x++) { |
2035 | struct gfs2_rgrpd *rgd; | 2035 | struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl); |
2036 | rgd = rlist.rl_ghs[x].gh_gl->gl_object; | 2036 | |
2037 | rg_blocks += rgd->rd_length; | 2037 | rg_blocks += rgd->rd_length; |
2038 | } | 2038 | } |
2039 | 2039 | ||
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 7449b19135c3..5e69636d4dd3 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -137,7 +137,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) | |||
137 | * | 137 | * |
138 | * Called when demoting or unlocking an EX glock. We must flush | 138 | * Called when demoting or unlocking an EX glock. We must flush |
139 | * to disk all dirty buffers/pages relating to this glock, and must not | 139 | * to disk all dirty buffers/pages relating to this glock, and must not |
140 | * not return to caller to demote/unlock the glock until I/O is complete. | 140 | * return to caller to demote/unlock the glock until I/O is complete. |
141 | */ | 141 | */ |
142 | 142 | ||
143 | static void rgrp_go_sync(struct gfs2_glock *gl) | 143 | static void rgrp_go_sync(struct gfs2_glock *gl) |
@@ -184,7 +184,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags) | |||
184 | { | 184 | { |
185 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | 185 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
186 | struct address_space *mapping = &sdp->sd_aspace; | 186 | struct address_space *mapping = &sdp->sd_aspace; |
187 | struct gfs2_rgrpd *rgd = gl->gl_object; | 187 | struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); |
188 | 188 | ||
189 | if (rgd) | 189 | if (rgd) |
190 | gfs2_rgrp_brelse(rgd); | 190 | gfs2_rgrp_brelse(rgd); |
@@ -209,6 +209,17 @@ static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) | |||
209 | return ip; | 209 | return ip; |
210 | } | 210 | } |
211 | 211 | ||
212 | struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) | ||
213 | { | ||
214 | struct gfs2_rgrpd *rgd; | ||
215 | |||
216 | spin_lock(&gl->gl_lockref.lock); | ||
217 | rgd = gl->gl_object; | ||
218 | spin_unlock(&gl->gl_lockref.lock); | ||
219 | |||
220 | return rgd; | ||
221 | } | ||
222 | |||
212 | static void gfs2_clear_glop_pending(struct gfs2_inode *ip) | 223 | static void gfs2_clear_glop_pending(struct gfs2_inode *ip) |
213 | { | 224 | { |
214 | if (!ip) | 225 | if (!ip) |
@@ -566,7 +577,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl) | |||
566 | */ | 577 | */ |
567 | static void iopen_go_callback(struct gfs2_glock *gl, bool remote) | 578 | static void iopen_go_callback(struct gfs2_glock *gl, bool remote) |
568 | { | 579 | { |
569 | struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; | 580 | struct gfs2_inode *ip = gl->gl_object; |
570 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | 581 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
571 | 582 | ||
572 | if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) | 583 | if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 01af34cb589d..de4238493d80 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -857,5 +857,7 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) | |||
857 | preempt_enable(); | 857 | preempt_enable(); |
858 | } | 858 | } |
859 | 859 | ||
860 | extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl); | ||
861 | |||
860 | #endif /* __INCORE_DOT_H__ */ | 862 | #endif /* __INCORE_DOT_H__ */ |
861 | 863 | ||
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 912d4e66fabc..50108fa724c7 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -202,14 +202,14 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, | |||
202 | 202 | ||
203 | fail_refresh: | 203 | fail_refresh: |
204 | ip->i_iopen_gh.gh_flags |= GL_NOCACHE; | 204 | ip->i_iopen_gh.gh_flags |= GL_NOCACHE; |
205 | ip->i_iopen_gh.gh_gl->gl_object = NULL; | 205 | glock_set_object(ip->i_iopen_gh.gh_gl, NULL); |
206 | gfs2_glock_dq_uninit(&ip->i_iopen_gh); | 206 | gfs2_glock_dq_uninit(&ip->i_iopen_gh); |
207 | fail_put: | 207 | fail_put: |
208 | if (io_gl) | 208 | if (io_gl) |
209 | gfs2_glock_put(io_gl); | 209 | gfs2_glock_put(io_gl); |
210 | if (gfs2_holder_initialized(&i_gh)) | 210 | if (gfs2_holder_initialized(&i_gh)) |
211 | gfs2_glock_dq_uninit(&i_gh); | 211 | gfs2_glock_dq_uninit(&i_gh); |
212 | ip->i_gl->gl_object = NULL; | 212 | glock_set_object(ip->i_gl, NULL); |
213 | fail: | 213 | fail: |
214 | iget_failed(inode); | 214 | iget_failed(inode); |
215 | return ERR_PTR(error); | 215 | return ERR_PTR(error); |
@@ -706,7 +706,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, | |||
706 | if (error) | 706 | if (error) |
707 | goto fail_free_inode; | 707 | goto fail_free_inode; |
708 | 708 | ||
709 | ip->i_gl->gl_object = ip; | 709 | glock_set_object(ip->i_gl, ip); |
710 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); | 710 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); |
711 | if (error) | 711 | if (error) |
712 | goto fail_free_inode; | 712 | goto fail_free_inode; |
@@ -732,7 +732,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, | |||
732 | if (error) | 732 | if (error) |
733 | goto fail_gunlock2; | 733 | goto fail_gunlock2; |
734 | 734 | ||
735 | ip->i_iopen_gh.gh_gl->gl_object = ip; | 735 | glock_set_object(ip->i_iopen_gh.gh_gl, ip); |
736 | gfs2_glock_put(io_gl); | 736 | gfs2_glock_put(io_gl); |
737 | gfs2_set_iop(inode); | 737 | gfs2_set_iop(inode); |
738 | insert_inode_hash(inode); | 738 | insert_inode_hash(inode); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 2d4ce25a105f..b50106bf2902 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -71,7 +71,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd) | |||
71 | { | 71 | { |
72 | struct gfs2_glock *gl = bd->bd_gl; | 72 | struct gfs2_glock *gl = bd->bd_gl; |
73 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | 73 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
74 | struct gfs2_rgrpd *rgd = gl->gl_object; | 74 | struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); |
75 | unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; | 75 | unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; |
76 | struct gfs2_bitmap *bi = rgd->rd_bits + index; | 76 | struct gfs2_bitmap *bi = rgd->rd_bits + index; |
77 | 77 | ||
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 83c9909ff14a..836e38ba5d0a 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -705,9 +705,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) | |||
705 | rb_erase(n, &sdp->sd_rindex_tree); | 705 | rb_erase(n, &sdp->sd_rindex_tree); |
706 | 706 | ||
707 | if (gl) { | 707 | if (gl) { |
708 | spin_lock(&gl->gl_lockref.lock); | 708 | glock_set_object(gl, NULL); |
709 | gl->gl_object = NULL; | ||
710 | spin_unlock(&gl->gl_lockref.lock); | ||
711 | gfs2_glock_add_to_lru(gl); | 709 | gfs2_glock_add_to_lru(gl); |
712 | gfs2_glock_put(gl); | 710 | gfs2_glock_put(gl); |
713 | } | 711 | } |
@@ -917,7 +915,7 @@ static int read_rindex_entry(struct gfs2_inode *ip) | |||
917 | error = rgd_insert(rgd); | 915 | error = rgd_insert(rgd); |
918 | spin_unlock(&sdp->sd_rindex_spin); | 916 | spin_unlock(&sdp->sd_rindex_spin); |
919 | if (!error) { | 917 | if (!error) { |
920 | rgd->rd_gl->gl_object = rgd; | 918 | glock_set_object(rgd->rd_gl, rgd); |
921 | rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK; | 919 | rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK; |
922 | rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + | 920 | rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + |
923 | rgd->rd_length) * bsize) - 1; | 921 | rgd->rd_length) * bsize) - 1; |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 7d12c1232c42..bd5ad6c5514e 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -1105,9 +1105,12 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host | |||
1105 | gfs2_holder_uninit(gh); | 1105 | gfs2_holder_uninit(gh); |
1106 | error = err; | 1106 | error = err; |
1107 | } else { | 1107 | } else { |
1108 | if (!error) | 1108 | if (!error) { |
1109 | error = statfs_slow_fill( | 1109 | struct gfs2_rgrpd *rgd = |
1110 | gh->gh_gl->gl_object, sc); | 1110 | gfs2_glock2rgrp(gh->gh_gl); |
1111 | |||
1112 | error = statfs_slow_fill(rgd, sc); | ||
1113 | } | ||
1111 | gfs2_glock_dq_uninit(gh); | 1114 | gfs2_glock_dq_uninit(gh); |
1112 | } | 1115 | } |
1113 | } | 1116 | } |
@@ -1637,7 +1640,7 @@ out: | |||
1637 | gfs2_glock_put(ip->i_gl); | 1640 | gfs2_glock_put(ip->i_gl); |
1638 | ip->i_gl = NULL; | 1641 | ip->i_gl = NULL; |
1639 | if (gfs2_holder_initialized(&ip->i_iopen_gh)) { | 1642 | if (gfs2_holder_initialized(&ip->i_iopen_gh)) { |
1640 | ip->i_iopen_gh.gh_gl->gl_object = NULL; | 1643 | glock_set_object(ip->i_iopen_gh.gh_gl, NULL); |
1641 | ip->i_iopen_gh.gh_flags |= GL_NOCACHE; | 1644 | ip->i_iopen_gh.gh_flags |= GL_NOCACHE; |
1642 | gfs2_glock_dq_uninit(&ip->i_iopen_gh); | 1645 | gfs2_glock_dq_uninit(&ip->i_iopen_gh); |
1643 | } | 1646 | } |
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index d87721aeb575..54179554c7d2 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c | |||
@@ -1327,8 +1327,8 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) | |||
1327 | gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); | 1327 | gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); |
1328 | 1328 | ||
1329 | for (x = 0; x < rlist.rl_rgrps; x++) { | 1329 | for (x = 0; x < rlist.rl_rgrps; x++) { |
1330 | struct gfs2_rgrpd *rgd; | 1330 | struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl); |
1331 | rgd = rlist.rl_ghs[x].gh_gl->gl_object; | 1331 | |
1332 | rg_blocks += rgd->rd_length; | 1332 | rg_blocks += rgd->rd_length; |
1333 | } | 1333 | } |
1334 | 1334 | ||