aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/dir.c7
-rw-r--r--fs/gfs2/glock.c124
-rw-r--r--fs/gfs2/glock.h7
-rw-r--r--fs/gfs2/glops.c56
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c19
-rw-r--r--fs/gfs2/log.c3
-rw-r--r--fs/gfs2/lops.c6
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/rgrp.c6
-rw-r--r--fs/gfs2/super.c33
-rw-r--r--fs/gfs2/sys.c4
-rw-r--r--fs/gfs2/xattr.c4
14 files changed, 175 insertions, 102 deletions
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 4d810be532dd..9fa3aef9a5b3 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -970,7 +970,7 @@ more_rgrps:
970 continue; 970 continue;
971 bn = be64_to_cpu(*p); 971 bn = be64_to_cpu(*p);
972 if (gfs2_holder_initialized(rd_gh)) { 972 if (gfs2_holder_initialized(rd_gh)) {
973 rgd = (struct gfs2_rgrpd *)rd_gh->gh_gl->gl_object; 973 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
974 gfs2_assert_withdraw(sdp, 974 gfs2_assert_withdraw(sdp,
975 gfs2_glock_is_locked_by_me(rd_gh->gh_gl)); 975 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
976 } else { 976 } else {
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 79113219be5f..db427658ccd9 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1444,7 +1444,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx,
1444 "g.offset (%u)\n", 1444 "g.offset (%u)\n",
1445 (unsigned long long)bh->b_blocknr, 1445 (unsigned long long)bh->b_blocknr,
1446 entries2, g.offset); 1446 entries2, g.offset);
1447 1447 gfs2_consist_inode(ip);
1448 error = -EIO; 1448 error = -EIO;
1449 goto out_free; 1449 goto out_free;
1450 } 1450 }
@@ -1612,6 +1612,7 @@ int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
1612 (unsigned long long)dip->i_no_addr, 1612 (unsigned long long)dip->i_no_addr,
1613 dip->i_entries, 1613 dip->i_entries,
1614 g.offset); 1614 g.offset);
1615 gfs2_consist_inode(dip);
1615 error = -EIO; 1616 error = -EIO;
1616 goto out; 1617 goto out;
1617 } 1618 }
@@ -2031,8 +2032,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
2031 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); 2032 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
2032 2033
2033 for (x = 0; x < rlist.rl_rgrps; x++) { 2034 for (x = 0; x < rlist.rl_rgrps; x++) {
2034 struct gfs2_rgrpd *rgd; 2035 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
2035 rgd = rlist.rl_ghs[x].gh_gl->gl_object; 2036
2036 rg_blocks += rgd->rd_length; 2037 rg_blocks += rgd->rd_length;
2037 } 2038 }
2038 2039
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 959a19ced4d5..6cd71c50b8bd 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -152,20 +152,34 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
152 spin_unlock(&lru_lock); 152 spin_unlock(&lru_lock);
153} 153}
154 154
155/** 155/*
156 * gfs2_glock_put() - Decrement reference count on glock 156 * Enqueue the glock on the work queue. Passes one glock reference on to the
157 * @gl: The glock to put 157 * work queue.
158 *
159 */ 158 */
159static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
160 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
161 /*
162 * We are holding the lockref spinlock, and the work was still
163 * queued above. The queued work (glock_work_func) takes that
164 * spinlock before dropping its glock reference(s), so it
165 * cannot have dropped them in the meantime.
166 */
167 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
168 gl->gl_lockref.count--;
169 }
170}
160 171
161void gfs2_glock_put(struct gfs2_glock *gl) 172static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
173 spin_lock(&gl->gl_lockref.lock);
174 __gfs2_glock_queue_work(gl, delay);
175 spin_unlock(&gl->gl_lockref.lock);
176}
177
178static void __gfs2_glock_put(struct gfs2_glock *gl)
162{ 179{
163 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 180 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
164 struct address_space *mapping = gfs2_glock2aspace(gl); 181 struct address_space *mapping = gfs2_glock2aspace(gl);
165 182
166 if (lockref_put_or_lock(&gl->gl_lockref))
167 return;
168
169 lockref_mark_dead(&gl->gl_lockref); 183 lockref_mark_dead(&gl->gl_lockref);
170 184
171 gfs2_glock_remove_from_lru(gl); 185 gfs2_glock_remove_from_lru(gl);
@@ -178,6 +192,20 @@ void gfs2_glock_put(struct gfs2_glock *gl)
178} 192}
179 193
180/** 194/**
195 * gfs2_glock_put() - Decrement reference count on glock
196 * @gl: The glock to put
197 *
198 */
199
200void gfs2_glock_put(struct gfs2_glock *gl)
201{
202 if (lockref_put_or_lock(&gl->gl_lockref))
203 return;
204
205 __gfs2_glock_put(gl);
206}
207
208/**
181 * may_grant - check if its ok to grant a new lock 209 * may_grant - check if its ok to grant a new lock
182 * @gl: The glock 210 * @gl: The glock
183 * @gh: The lock request which we wish to grant 211 * @gh: The lock request which we wish to grant
@@ -482,8 +510,7 @@ __acquires(&gl->gl_lockref.lock)
482 target == LM_ST_UNLOCKED && 510 target == LM_ST_UNLOCKED &&
483 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { 511 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
484 finish_xmote(gl, target); 512 finish_xmote(gl, target);
485 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 513 gfs2_glock_queue_work(gl, 0);
486 gfs2_glock_put(gl);
487 } 514 }
488 else if (ret) { 515 else if (ret) {
489 pr_err("lm_lock ret %d\n", ret); 516 pr_err("lm_lock ret %d\n", ret);
@@ -492,8 +519,7 @@ __acquires(&gl->gl_lockref.lock)
492 } 519 }
493 } else { /* lock_nolock */ 520 } else { /* lock_nolock */
494 finish_xmote(gl, target); 521 finish_xmote(gl, target);
495 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 522 gfs2_glock_queue_work(gl, 0);
496 gfs2_glock_put(gl);
497 } 523 }
498 524
499 spin_lock(&gl->gl_lockref.lock); 525 spin_lock(&gl->gl_lockref.lock);
@@ -565,8 +591,7 @@ out_sched:
565 clear_bit(GLF_LOCK, &gl->gl_flags); 591 clear_bit(GLF_LOCK, &gl->gl_flags);
566 smp_mb__after_atomic(); 592 smp_mb__after_atomic();
567 gl->gl_lockref.count++; 593 gl->gl_lockref.count++;
568 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 594 __gfs2_glock_queue_work(gl, 0);
569 gl->gl_lockref.count--;
570 return; 595 return;
571 596
572out_unlock: 597out_unlock:
@@ -601,11 +626,11 @@ static void glock_work_func(struct work_struct *work)
601{ 626{
602 unsigned long delay = 0; 627 unsigned long delay = 0;
603 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 628 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
604 int drop_ref = 0; 629 unsigned int drop_refs = 1;
605 630
606 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 631 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
607 finish_xmote(gl, gl->gl_reply); 632 finish_xmote(gl, gl->gl_reply);
608 drop_ref = 1; 633 drop_refs++;
609 } 634 }
610 spin_lock(&gl->gl_lockref.lock); 635 spin_lock(&gl->gl_lockref.lock);
611 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 636 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -623,17 +648,25 @@ static void glock_work_func(struct work_struct *work)
623 } 648 }
624 } 649 }
625 run_queue(gl, 0); 650 run_queue(gl, 0);
626 spin_unlock(&gl->gl_lockref.lock); 651 if (delay) {
627 if (!delay) 652 /* Keep one glock reference for the work we requeue. */
628 gfs2_glock_put(gl); 653 drop_refs--;
629 else {
630 if (gl->gl_name.ln_type != LM_TYPE_INODE) 654 if (gl->gl_name.ln_type != LM_TYPE_INODE)
631 delay = 0; 655 delay = 0;
632 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 656 __gfs2_glock_queue_work(gl, delay);
633 gfs2_glock_put(gl);
634 } 657 }
635 if (drop_ref) 658
636 gfs2_glock_put(gl); 659 /*
660 * Drop the remaining glock references manually here. (Mind that
661 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
662 * here as well.)
663 */
664 gl->gl_lockref.count -= drop_refs;
665 if (!gl->gl_lockref.count) {
666 __gfs2_glock_put(gl);
667 return;
668 }
669 spin_unlock(&gl->gl_lockref.lock);
637} 670}
638 671
639/** 672/**
@@ -986,8 +1019,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
986 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { 1019 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
987 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1020 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
988 gl->gl_lockref.count++; 1021 gl->gl_lockref.count++;
989 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1022 __gfs2_glock_queue_work(gl, 0);
990 gl->gl_lockref.count--;
991 } 1023 }
992 run_queue(gl, 1); 1024 run_queue(gl, 1);
993 spin_unlock(&gl->gl_lockref.lock); 1025 spin_unlock(&gl->gl_lockref.lock);
@@ -1047,17 +1079,15 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1047 gfs2_glock_add_to_lru(gl); 1079 gfs2_glock_add_to_lru(gl);
1048 1080
1049 trace_gfs2_glock_queue(gh, 0); 1081 trace_gfs2_glock_queue(gh, 0);
1082 if (unlikely(!fast_path)) {
1083 gl->gl_lockref.count++;
1084 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1085 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1086 gl->gl_name.ln_type == LM_TYPE_INODE)
1087 delay = gl->gl_hold_time;
1088 __gfs2_glock_queue_work(gl, delay);
1089 }
1050 spin_unlock(&gl->gl_lockref.lock); 1090 spin_unlock(&gl->gl_lockref.lock);
1051 if (likely(fast_path))
1052 return;
1053
1054 gfs2_glock_hold(gl);
1055 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1056 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1057 gl->gl_name.ln_type == LM_TYPE_INODE)
1058 delay = gl->gl_hold_time;
1059 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1060 gfs2_glock_put(gl);
1061} 1091}
1062 1092
1063void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1093void gfs2_glock_dq_wait(struct gfs2_holder *gh)
@@ -1233,9 +1263,8 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1233 1263
1234 spin_lock(&gl->gl_lockref.lock); 1264 spin_lock(&gl->gl_lockref.lock);
1235 handle_callback(gl, state, delay, true); 1265 handle_callback(gl, state, delay, true);
1266 __gfs2_glock_queue_work(gl, delay);
1236 spin_unlock(&gl->gl_lockref.lock); 1267 spin_unlock(&gl->gl_lockref.lock);
1237 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1238 gfs2_glock_put(gl);
1239} 1268}
1240 1269
1241/** 1270/**
@@ -1294,10 +1323,8 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1294 1323
1295 gl->gl_lockref.count++; 1324 gl->gl_lockref.count++;
1296 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1325 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1326 __gfs2_glock_queue_work(gl, 0);
1297 spin_unlock(&gl->gl_lockref.lock); 1327 spin_unlock(&gl->gl_lockref.lock);
1298
1299 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1300 gfs2_glock_put(gl);
1301} 1328}
1302 1329
1303static int glock_cmp(void *priv, struct list_head *a, struct list_head *b) 1330static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
@@ -1355,8 +1382,7 @@ add_back_to_lru:
1355 if (demote_ok(gl)) 1382 if (demote_ok(gl))
1356 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1383 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1357 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1384 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1358 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1385 __gfs2_glock_queue_work(gl, 0);
1359 gl->gl_lockref.count--;
1360 spin_unlock(&gl->gl_lockref.lock); 1386 spin_unlock(&gl->gl_lockref.lock);
1361 cond_resched_lock(&lru_lock); 1387 cond_resched_lock(&lru_lock);
1362 } 1388 }
@@ -1462,13 +1488,12 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1462 1488
1463static void thaw_glock(struct gfs2_glock *gl) 1489static void thaw_glock(struct gfs2_glock *gl)
1464{ 1490{
1465 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) 1491 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1466 goto out;
1467 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1468 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
1469out:
1470 gfs2_glock_put(gl); 1492 gfs2_glock_put(gl);
1493 return;
1471 } 1494 }
1495 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1496 gfs2_glock_queue_work(gl, 0);
1472} 1497}
1473 1498
1474/** 1499/**
@@ -1484,9 +1509,8 @@ static void clear_glock(struct gfs2_glock *gl)
1484 spin_lock(&gl->gl_lockref.lock); 1509 spin_lock(&gl->gl_lockref.lock);
1485 if (gl->gl_state != LM_ST_UNLOCKED) 1510 if (gl->gl_state != LM_ST_UNLOCKED)
1486 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1511 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1512 __gfs2_glock_queue_work(gl, 0);
1487 spin_unlock(&gl->gl_lockref.lock); 1513 spin_unlock(&gl->gl_lockref.lock);
1488 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1489 gfs2_glock_put(gl);
1490} 1514}
1491 1515
1492/** 1516/**
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index ab1ef322f7a5..9ad4a6ac6c84 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -257,4 +257,11 @@ static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
257 return gh->gh_gl; 257 return gh->gh_gl;
258} 258}
259 259
260static inline void glock_set_object(struct gfs2_glock *gl, void *object)
261{
262 spin_lock(&gl->gl_lockref.lock);
263 gl->gl_object = object;
264 spin_unlock(&gl->gl_lockref.lock);
265}
266
260#endif /* __GLOCK_DOT_H__ */ 267#endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 5db59d444838..5e69636d4dd3 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -137,7 +137,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
137 * 137 *
138 * Called when demoting or unlocking an EX glock. We must flush 138 * Called when demoting or unlocking an EX glock. We must flush
139 * to disk all dirty buffers/pages relating to this glock, and must not 139 * to disk all dirty buffers/pages relating to this glock, and must not
140 * not return to caller to demote/unlock the glock until I/O is complete. 140 * return to caller to demote/unlock the glock until I/O is complete.
141 */ 141 */
142 142
143static void rgrp_go_sync(struct gfs2_glock *gl) 143static void rgrp_go_sync(struct gfs2_glock *gl)
@@ -184,7 +184,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
184{ 184{
185 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 185 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
186 struct address_space *mapping = &sdp->sd_aspace; 186 struct address_space *mapping = &sdp->sd_aspace;
187 struct gfs2_rgrpd *rgd = gl->gl_object; 187 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
188 188
189 if (rgd) 189 if (rgd)
190 gfs2_rgrp_brelse(rgd); 190 gfs2_rgrp_brelse(rgd);
@@ -197,6 +197,38 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
197 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 197 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
198} 198}
199 199
200static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
201{
202 struct gfs2_inode *ip;
203
204 spin_lock(&gl->gl_lockref.lock);
205 ip = gl->gl_object;
206 if (ip)
207 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
208 spin_unlock(&gl->gl_lockref.lock);
209 return ip;
210}
211
212struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
213{
214 struct gfs2_rgrpd *rgd;
215
216 spin_lock(&gl->gl_lockref.lock);
217 rgd = gl->gl_object;
218 spin_unlock(&gl->gl_lockref.lock);
219
220 return rgd;
221}
222
223static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
224{
225 if (!ip)
226 return;
227
228 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
229 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
230}
231
200/** 232/**
201 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 233 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
202 * @gl: the glock protecting the inode 234 * @gl: the glock protecting the inode
@@ -205,25 +237,24 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
205 237
206static void inode_go_sync(struct gfs2_glock *gl) 238static void inode_go_sync(struct gfs2_glock *gl)
207{ 239{
208 struct gfs2_inode *ip = gl->gl_object; 240 struct gfs2_inode *ip = gfs2_glock2inode(gl);
241 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
209 struct address_space *metamapping = gfs2_glock2aspace(gl); 242 struct address_space *metamapping = gfs2_glock2aspace(gl);
210 int error; 243 int error;
211 244
212 if (ip && !S_ISREG(ip->i_inode.i_mode)) 245 if (isreg) {
213 ip = NULL;
214 if (ip) {
215 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 246 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
216 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 247 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
217 inode_dio_wait(&ip->i_inode); 248 inode_dio_wait(&ip->i_inode);
218 } 249 }
219 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 250 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
220 return; 251 goto out;
221 252
222 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 253 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
223 254
224 gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH); 255 gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
225 filemap_fdatawrite(metamapping); 256 filemap_fdatawrite(metamapping);
226 if (ip) { 257 if (isreg) {
227 struct address_space *mapping = ip->i_inode.i_mapping; 258 struct address_space *mapping = ip->i_inode.i_mapping;
228 filemap_fdatawrite(mapping); 259 filemap_fdatawrite(mapping);
229 error = filemap_fdatawait(mapping); 260 error = filemap_fdatawait(mapping);
@@ -238,6 +269,9 @@ static void inode_go_sync(struct gfs2_glock *gl)
238 */ 269 */
239 smp_mb__before_atomic(); 270 smp_mb__before_atomic();
240 clear_bit(GLF_DIRTY, &gl->gl_flags); 271 clear_bit(GLF_DIRTY, &gl->gl_flags);
272
273out:
274 gfs2_clear_glop_pending(ip);
241} 275}
242 276
243/** 277/**
@@ -253,7 +287,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
253 287
254static void inode_go_inval(struct gfs2_glock *gl, int flags) 288static void inode_go_inval(struct gfs2_glock *gl, int flags)
255{ 289{
256 struct gfs2_inode *ip = gl->gl_object; 290 struct gfs2_inode *ip = gfs2_glock2inode(gl);
257 291
258 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); 292 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
259 293
@@ -274,6 +308,8 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
274 } 308 }
275 if (ip && S_ISREG(ip->i_inode.i_mode)) 309 if (ip && S_ISREG(ip->i_inode.i_mode))
276 truncate_inode_pages(ip->i_inode.i_mapping, 0); 310 truncate_inode_pages(ip->i_inode.i_mapping, 0);
311
312 gfs2_clear_glop_pending(ip);
277} 313}
278 314
279/** 315/**
@@ -541,7 +577,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl)
541 */ 577 */
542static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 578static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
543{ 579{
544 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 580 struct gfs2_inode *ip = gl->gl_object;
545 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 581 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
546 582
547 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) 583 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index aa3d44527fa2..790e73984288 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -336,7 +336,6 @@ enum {
336}; 336};
337 337
338struct gfs2_glock { 338struct gfs2_glock {
339 struct hlist_bl_node gl_list;
340 unsigned long gl_flags; /* GLF_... */ 339 unsigned long gl_flags; /* GLF_... */
341 struct lm_lockname gl_name; 340 struct lm_lockname gl_name;
342 341
@@ -386,6 +385,7 @@ enum {
386 GIF_SW_PAGED = 3, 385 GIF_SW_PAGED = 3,
387 GIF_ORDERED = 4, 386 GIF_ORDERED = 4,
388 GIF_FREE_VFS_INODE = 5, 387 GIF_FREE_VFS_INODE = 5,
388 GIF_GLOP_PENDING = 6,
389}; 389};
390 390
391struct gfs2_inode { 391struct gfs2_inode {
@@ -820,7 +820,6 @@ struct gfs2_sbd {
820 wait_queue_head_t sd_reserving_log_wait; 820 wait_queue_head_t sd_reserving_log_wait;
821 821
822 unsigned int sd_log_flush_head; 822 unsigned int sd_log_flush_head;
823 u64 sd_log_flush_wrapped;
824 823
825 spinlock_t sd_ail_lock; 824 spinlock_t sd_ail_lock;
826 struct list_head sd_ail1_list; 825 struct list_head sd_ail1_list;
@@ -857,5 +856,7 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
857 preempt_enable(); 856 preempt_enable();
858} 857}
859 858
859extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
860
860#endif /* __INCORE_DOT_H__ */ 861#endif /* __INCORE_DOT_H__ */
861 862
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 9f605ea4810c..acca501f8110 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -144,7 +144,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
144 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); 144 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
145 if (unlikely(error)) 145 if (unlikely(error))
146 goto fail; 146 goto fail;
147 ip->i_gl->gl_object = ip; 147 flush_delayed_work(&ip->i_gl->gl_work);
148 glock_set_object(ip->i_gl, ip);
148 149
149 error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); 150 error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
150 if (unlikely(error)) 151 if (unlikely(error))
@@ -173,8 +174,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
173 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); 174 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
174 if (unlikely(error)) 175 if (unlikely(error))
175 goto fail_put; 176 goto fail_put;
176 177 flush_delayed_work(&ip->i_iopen_gh.gh_gl->gl_work);
177 ip->i_iopen_gh.gh_gl->gl_object = ip; 178 glock_set_object(ip->i_iopen_gh.gh_gl, ip);
178 gfs2_glock_put(io_gl); 179 gfs2_glock_put(io_gl);
179 io_gl = NULL; 180 io_gl = NULL;
180 181
@@ -201,14 +202,14 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
201 202
202fail_refresh: 203fail_refresh:
203 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 204 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
204 ip->i_iopen_gh.gh_gl->gl_object = NULL; 205 glock_set_object(ip->i_iopen_gh.gh_gl, NULL);
205 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 206 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
206fail_put: 207fail_put:
207 if (io_gl) 208 if (io_gl)
208 gfs2_glock_put(io_gl); 209 gfs2_glock_put(io_gl);
209 if (gfs2_holder_initialized(&i_gh)) 210 if (gfs2_holder_initialized(&i_gh))
210 gfs2_glock_dq_uninit(&i_gh); 211 gfs2_glock_dq_uninit(&i_gh);
211 ip->i_gl->gl_object = NULL; 212 glock_set_object(ip->i_gl, NULL);
212fail: 213fail:
213 iget_failed(inode); 214 iget_failed(inode);
214 return ERR_PTR(error); 215 return ERR_PTR(error);
@@ -607,6 +608,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
607 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 608 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
608 if (error) 609 if (error)
609 goto fail; 610 goto fail;
611 gfs2_holder_mark_uninitialized(ghs + 1);
610 612
611 error = create_ok(dip, name, mode); 613 error = create_ok(dip, name, mode);
612 if (error) 614 if (error)
@@ -705,7 +707,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
705 if (error) 707 if (error)
706 goto fail_free_inode; 708 goto fail_free_inode;
707 709
708 ip->i_gl->gl_object = ip; 710 glock_set_object(ip->i_gl, ip);
709 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); 711 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
710 if (error) 712 if (error)
711 goto fail_free_inode; 713 goto fail_free_inode;
@@ -731,7 +733,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
731 if (error) 733 if (error)
732 goto fail_gunlock2; 734 goto fail_gunlock2;
733 735
734 ip->i_iopen_gh.gh_gl->gl_object = ip; 736 glock_set_object(ip->i_iopen_gh.gh_gl, ip);
735 gfs2_glock_put(io_gl); 737 gfs2_glock_put(io_gl);
736 gfs2_set_iop(inode); 738 gfs2_set_iop(inode);
737 insert_inode_hash(inode); 739 insert_inode_hash(inode);
@@ -778,7 +780,6 @@ fail_gunlock3:
778fail_gunlock2: 780fail_gunlock2:
779 if (io_gl) 781 if (io_gl)
780 clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); 782 clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
781 gfs2_glock_dq_uninit(ghs + 1);
782fail_free_inode: 783fail_free_inode:
783 if (ip->i_gl) 784 if (ip->i_gl)
784 gfs2_glock_put(ip->i_gl); 785 gfs2_glock_put(ip->i_gl);
@@ -799,6 +800,8 @@ fail_gunlock:
799 &GFS2_I(inode)->i_flags); 800 &GFS2_I(inode)->i_flags);
800 iput(inode); 801 iput(inode);
801 } 802 }
803 if (gfs2_holder_initialized(ghs + 1))
804 gfs2_glock_dq_uninit(ghs + 1);
802fail: 805fail:
803 return error; 806 return error;
804} 807}
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index d2955daf17a4..9a624f694400 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -722,7 +722,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
722 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 722 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
723 723
724 sdp->sd_log_flush_head = sdp->sd_log_head; 724 sdp->sd_log_flush_head = sdp->sd_log_head;
725 sdp->sd_log_flush_wrapped = 0;
726 tr = sdp->sd_log_tr; 725 tr = sdp->sd_log_tr;
727 if (tr) { 726 if (tr) {
728 sdp->sd_log_tr = NULL; 727 sdp->sd_log_tr = NULL;
@@ -775,7 +774,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
775 } 774 }
776 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ 775 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
777 trace_gfs2_log_blocks(sdp, -1); 776 trace_gfs2_log_blocks(sdp, -1);
778 sdp->sd_log_flush_wrapped = 0;
779 log_write_header(sdp, 0); 777 log_write_header(sdp, 0);
780 sdp->sd_log_head = sdp->sd_log_flush_head; 778 sdp->sd_log_head = sdp->sd_log_flush_head;
781 } 779 }
@@ -880,7 +878,6 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
880 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); 878 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
881 879
882 sdp->sd_log_flush_head = sdp->sd_log_head; 880 sdp->sd_log_flush_head = sdp->sd_log_head;
883 sdp->sd_log_flush_wrapped = 0;
884 881
885 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT); 882 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT);
886 883
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 885d36e7a29f..e5259cd92ea4 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -71,7 +71,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd)
71{ 71{
72 struct gfs2_glock *gl = bd->bd_gl; 72 struct gfs2_glock *gl = bd->bd_gl;
73 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 73 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
74 struct gfs2_rgrpd *rgd = gl->gl_object; 74 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; 75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
76 struct gfs2_bitmap *bi = rgd->rd_bits + index; 76 struct gfs2_bitmap *bi = rgd->rd_bits + index;
77 77
@@ -134,10 +134,8 @@ static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
134 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && 134 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
135 (sdp->sd_log_flush_head != sdp->sd_log_head)); 135 (sdp->sd_log_flush_head != sdp->sd_log_head));
136 136
137 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { 137 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
138 sdp->sd_log_flush_head = 0; 138 sdp->sd_log_flush_head = 0;
139 sdp->sd_log_flush_wrapped = 1;
140 }
141} 139}
142 140
143static u64 gfs2_log_bmap(struct gfs2_sbd *sdp) 141static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 67d1fc4668f7..0a89e6f7a314 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -52,7 +52,6 @@ static void gfs2_init_glock_once(void *foo)
52{ 52{
53 struct gfs2_glock *gl = foo; 53 struct gfs2_glock *gl = foo;
54 54
55 INIT_HLIST_BL_NODE(&gl->gl_list);
56 spin_lock_init(&gl->gl_lockref.lock); 55 spin_lock_init(&gl->gl_lockref.lock);
57 INIT_LIST_HEAD(&gl->gl_holders); 56 INIT_LIST_HEAD(&gl->gl_holders);
58 INIT_LIST_HEAD(&gl->gl_lru); 57 INIT_LIST_HEAD(&gl->gl_lru);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 83c9909ff14a..836e38ba5d0a 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -705,9 +705,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
705 rb_erase(n, &sdp->sd_rindex_tree); 705 rb_erase(n, &sdp->sd_rindex_tree);
706 706
707 if (gl) { 707 if (gl) {
708 spin_lock(&gl->gl_lockref.lock); 708 glock_set_object(gl, NULL);
709 gl->gl_object = NULL;
710 spin_unlock(&gl->gl_lockref.lock);
711 gfs2_glock_add_to_lru(gl); 709 gfs2_glock_add_to_lru(gl);
712 gfs2_glock_put(gl); 710 gfs2_glock_put(gl);
713 } 711 }
@@ -917,7 +915,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
917 error = rgd_insert(rgd); 915 error = rgd_insert(rgd);
918 spin_unlock(&sdp->sd_rindex_spin); 916 spin_unlock(&sdp->sd_rindex_spin);
919 if (!error) { 917 if (!error) {
920 rgd->rd_gl->gl_object = rgd; 918 glock_set_object(rgd->rd_gl, rgd);
921 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK; 919 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
922 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + 920 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
923 rgd->rd_length) * bsize) - 1; 921 rgd->rd_length) * bsize) - 1;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 29b0473f6e74..fdedec379b78 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1105,9 +1105,12 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
1105 gfs2_holder_uninit(gh); 1105 gfs2_holder_uninit(gh);
1106 error = err; 1106 error = err;
1107 } else { 1107 } else {
1108 if (!error) 1108 if (!error) {
1109 error = statfs_slow_fill( 1109 struct gfs2_rgrpd *rgd =
1110 gh->gh_gl->gl_object, sc); 1110 gfs2_glock2rgrp(gh->gh_gl);
1111
1112 error = statfs_slow_fill(rgd, sc);
1113 }
1111 gfs2_glock_dq_uninit(gh); 1114 gfs2_glock_dq_uninit(gh);
1112 } 1115 }
1113 } 1116 }
@@ -1535,6 +1538,12 @@ static void gfs2_evict_inode(struct inode *inode)
1535 if (inode->i_nlink || (sb->s_flags & MS_RDONLY)) 1538 if (inode->i_nlink || (sb->s_flags & MS_RDONLY))
1536 goto out; 1539 goto out;
1537 1540
1541 if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1542 BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1543 gfs2_holder_mark_uninitialized(&gh);
1544 goto alloc_failed;
1545 }
1546
1538 /* Must not read inode block until block type has been verified */ 1547 /* Must not read inode block until block type has been verified */
1539 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh); 1548 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1540 if (unlikely(error)) { 1549 if (unlikely(error)) {
@@ -1543,11 +1552,9 @@ static void gfs2_evict_inode(struct inode *inode)
1543 goto out; 1552 goto out;
1544 } 1553 }
1545 1554
1546 if (!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) { 1555 error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1547 error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); 1556 if (error)
1548 if (error) 1557 goto out_truncate;
1549 goto out_truncate;
1550 }
1551 1558
1552 if (test_bit(GIF_INVALID, &ip->i_flags)) { 1559 if (test_bit(GIF_INVALID, &ip->i_flags)) {
1553 error = gfs2_inode_refresh(ip); 1560 error = gfs2_inode_refresh(ip);
@@ -1555,6 +1562,7 @@ static void gfs2_evict_inode(struct inode *inode)
1555 goto out_truncate; 1562 goto out_truncate;
1556 } 1563 }
1557 1564
1565alloc_failed:
1558 if (gfs2_holder_initialized(&ip->i_iopen_gh) && 1566 if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1559 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { 1567 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1560 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1568 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
@@ -1621,7 +1629,8 @@ out_unlock:
1621 } 1629 }
1622 gfs2_holder_uninit(&ip->i_iopen_gh); 1630 gfs2_holder_uninit(&ip->i_iopen_gh);
1623 } 1631 }
1624 gfs2_glock_dq_uninit(&gh); 1632 if (gfs2_holder_initialized(&gh))
1633 gfs2_glock_dq_uninit(&gh);
1625 if (error && error != GLR_TRYFAILED && error != -EROFS) 1634 if (error && error != GLR_TRYFAILED && error != -EROFS)
1626 fs_warn(sdp, "gfs2_evict_inode: %d\n", error); 1635 fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
1627out: 1636out:
@@ -1631,13 +1640,13 @@ out:
1631 gfs2_ordered_del_inode(ip); 1640 gfs2_ordered_del_inode(ip);
1632 clear_inode(inode); 1641 clear_inode(inode);
1633 gfs2_dir_hash_inval(ip); 1642 gfs2_dir_hash_inval(ip);
1634 ip->i_gl->gl_object = NULL; 1643 glock_set_object(ip->i_gl, NULL);
1635 flush_delayed_work(&ip->i_gl->gl_work); 1644 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1636 gfs2_glock_add_to_lru(ip->i_gl); 1645 gfs2_glock_add_to_lru(ip->i_gl);
1637 gfs2_glock_put(ip->i_gl); 1646 gfs2_glock_put(ip->i_gl);
1638 ip->i_gl = NULL; 1647 ip->i_gl = NULL;
1639 if (gfs2_holder_initialized(&ip->i_iopen_gh)) { 1648 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1640 ip->i_iopen_gh.gh_gl->gl_object = NULL; 1649 glock_set_object(ip->i_iopen_gh.gh_gl, NULL);
1641 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1650 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1642 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1651 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1643 } 1652 }
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index e77bc52b468f..ca1f97ff898c 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -626,12 +626,12 @@ static struct attribute *tune_attrs[] = {
626 NULL, 626 NULL,
627}; 627};
628 628
629static struct attribute_group tune_group = { 629static const struct attribute_group tune_group = {
630 .name = "tune", 630 .name = "tune",
631 .attrs = tune_attrs, 631 .attrs = tune_attrs,
632}; 632};
633 633
634static struct attribute_group lock_module_group = { 634static const struct attribute_group lock_module_group = {
635 .name = "lock_module", 635 .name = "lock_module",
636 .attrs = lock_module_attrs, 636 .attrs = lock_module_attrs,
637}; 637};
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index d87721aeb575..54179554c7d2 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1327,8 +1327,8 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1327 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); 1327 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
1328 1328
1329 for (x = 0; x < rlist.rl_rgrps; x++) { 1329 for (x = 0; x < rlist.rl_rgrps; x++) {
1330 struct gfs2_rgrpd *rgd; 1330 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
1331 rgd = rlist.rl_ghs[x].gh_gl->gl_object; 1331
1332 rg_blocks += rgd->rd_length; 1332 rg_blocks += rgd->rd_length;
1333 } 1333 }
1334 1334