diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2011-04-14 11:50:31 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2011-04-20 04:01:17 -0400 |
commit | f42ab0852946c1fb5103682c5897eb3da908e4b0 (patch) | |
tree | 3847b23d2cac6bab422e6e001e0c6d6c66a81f1e | |
parent | 627c10b7e471b5dcfb7101d6cc74d219619c9bc4 (diff) |
GFS2: Optimise glock lru and end of life inodes
The GLF_LRU flag introduced in the previous patch can be
used to check if a glock is on the lru list when a new
holder is queued and if so remove it, without having first
to get the lru_lock.
The main purpose of this patch however is to optimise the
glocks left over when an inode at end of life is being
evicted. Previously such glocks were left with the GLF_LFLUSH
flag set, so that when reclaimed, each one required a log flush.
This patch resets the GLF_LFLUSH flag when there is nothing
left to flush thus preventing later log flushes as glocks are
reused or demoted.
In order to do this, we need to keep track of the number of
revokes which are outstanding, and also to clear the GLF_LFLUSH
bit after a log commit when only revokes have been processed.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
-rw-r--r-- | fs/gfs2/glock.c | 41 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 1 | ||||
-rw-r--r-- | fs/gfs2/inode.c | 59 | ||||
-rw-r--r-- | fs/gfs2/inode.h | 1 | ||||
-rw-r--r-- | fs/gfs2/lops.c | 27 | ||||
-rw-r--r-- | fs/gfs2/main.c | 1 | ||||
-rw-r--r-- | fs/gfs2/super.c | 78 |
7 files changed, 119 insertions, 89 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 0c6c69090140..cb8776f0102e 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -145,14 +145,9 @@ static int demote_ok(const struct gfs2_glock *gl) | |||
145 | { | 145 | { |
146 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 146 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
147 | 147 | ||
148 | /* assert_spin_locked(&gl->gl_spin); */ | ||
149 | |||
150 | if (gl->gl_state == LM_ST_UNLOCKED) | 148 | if (gl->gl_state == LM_ST_UNLOCKED) |
151 | return 0; | 149 | return 0; |
152 | if (test_bit(GLF_LFLUSH, &gl->gl_flags)) | 150 | if (!list_empty(&gl->gl_holders)) |
153 | return 0; | ||
154 | if ((gl->gl_name.ln_type != LM_TYPE_INODE) && | ||
155 | !list_empty(&gl->gl_holders)) | ||
156 | return 0; | 151 | return 0; |
157 | if (glops->go_demote_ok) | 152 | if (glops->go_demote_ok) |
158 | return glops->go_demote_ok(gl); | 153 | return glops->go_demote_ok(gl); |
@@ -174,6 +169,17 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl) | |||
174 | spin_unlock(&lru_lock); | 169 | spin_unlock(&lru_lock); |
175 | } | 170 | } |
176 | 171 | ||
172 | static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) | ||
173 | { | ||
174 | spin_lock(&lru_lock); | ||
175 | if (!list_empty(&gl->gl_lru)) { | ||
176 | list_del_init(&gl->gl_lru); | ||
177 | atomic_dec(&lru_count); | ||
178 | clear_bit(GLF_LRU, &gl->gl_flags); | ||
179 | } | ||
180 | spin_unlock(&lru_lock); | ||
181 | } | ||
182 | |||
177 | /** | 183 | /** |
178 | * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list | 184 | * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list |
179 | * @gl: the glock | 185 | * @gl: the glock |
@@ -217,12 +223,7 @@ void gfs2_glock_put(struct gfs2_glock *gl) | |||
217 | spin_lock_bucket(gl->gl_hash); | 223 | spin_lock_bucket(gl->gl_hash); |
218 | hlist_bl_del_rcu(&gl->gl_list); | 224 | hlist_bl_del_rcu(&gl->gl_list); |
219 | spin_unlock_bucket(gl->gl_hash); | 225 | spin_unlock_bucket(gl->gl_hash); |
220 | spin_lock(&lru_lock); | 226 | gfs2_glock_remove_from_lru(gl); |
221 | if (!list_empty(&gl->gl_lru)) { | ||
222 | list_del_init(&gl->gl_lru); | ||
223 | atomic_dec(&lru_count); | ||
224 | } | ||
225 | spin_unlock(&lru_lock); | ||
226 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); | 227 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
227 | GLOCK_BUG_ON(gl, mapping && mapping->nrpages); | 228 | GLOCK_BUG_ON(gl, mapping && mapping->nrpages); |
228 | trace_gfs2_glock_put(gl); | 229 | trace_gfs2_glock_put(gl); |
@@ -1025,6 +1026,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh) | |||
1025 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 1026 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) |
1026 | return -EIO; | 1027 | return -EIO; |
1027 | 1028 | ||
1029 | if (test_bit(GLF_LRU, &gl->gl_flags)) | ||
1030 | gfs2_glock_remove_from_lru(gl); | ||
1031 | |||
1028 | spin_lock(&gl->gl_spin); | 1032 | spin_lock(&gl->gl_spin); |
1029 | add_to_queue(gh); | 1033 | add_to_queue(gh); |
1030 | if ((LM_FLAG_NOEXP & gh->gh_flags) && | 1034 | if ((LM_FLAG_NOEXP & gh->gh_flags) && |
@@ -1082,7 +1086,8 @@ void gfs2_glock_dq(struct gfs2_holder *gh) | |||
1082 | !test_bit(GLF_DEMOTE, &gl->gl_flags)) | 1086 | !test_bit(GLF_DEMOTE, &gl->gl_flags)) |
1083 | fast_path = 1; | 1087 | fast_path = 1; |
1084 | } | 1088 | } |
1085 | __gfs2_glock_schedule_for_reclaim(gl); | 1089 | if (!test_bit(GLF_LFLUSH, &gl->gl_flags)) |
1090 | __gfs2_glock_schedule_for_reclaim(gl); | ||
1086 | trace_gfs2_glock_queue(gh, 0); | 1091 | trace_gfs2_glock_queue(gh, 0); |
1087 | spin_unlock(&gl->gl_spin); | 1092 | spin_unlock(&gl->gl_spin); |
1088 | if (likely(fast_path)) | 1093 | if (likely(fast_path)) |
@@ -1461,12 +1466,7 @@ static void thaw_glock(struct gfs2_glock *gl) | |||
1461 | 1466 | ||
1462 | static void clear_glock(struct gfs2_glock *gl) | 1467 | static void clear_glock(struct gfs2_glock *gl) |
1463 | { | 1468 | { |
1464 | spin_lock(&lru_lock); | 1469 | gfs2_glock_remove_from_lru(gl); |
1465 | if (!list_empty(&gl->gl_lru)) { | ||
1466 | list_del_init(&gl->gl_lru); | ||
1467 | atomic_dec(&lru_count); | ||
1468 | } | ||
1469 | spin_unlock(&lru_lock); | ||
1470 | 1470 | ||
1471 | spin_lock(&gl->gl_spin); | 1471 | spin_lock(&gl->gl_spin); |
1472 | if (gl->gl_state != LM_ST_UNLOCKED) | 1472 | if (gl->gl_state != LM_ST_UNLOCKED) |
@@ -1666,7 +1666,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) | |||
1666 | dtime *= 1000000/HZ; /* demote time in uSec */ | 1666 | dtime *= 1000000/HZ; /* demote time in uSec */ |
1667 | if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) | 1667 | if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) |
1668 | dtime = 0; | 1668 | dtime = 0; |
1669 | gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n", | 1669 | gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n", |
1670 | state2str(gl->gl_state), | 1670 | state2str(gl->gl_state), |
1671 | gl->gl_name.ln_type, | 1671 | gl->gl_name.ln_type, |
1672 | (unsigned long long)gl->gl_name.ln_number, | 1672 | (unsigned long long)gl->gl_name.ln_number, |
@@ -1674,6 +1674,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) | |||
1674 | state2str(gl->gl_target), | 1674 | state2str(gl->gl_target), |
1675 | state2str(gl->gl_demote_state), dtime, | 1675 | state2str(gl->gl_demote_state), dtime, |
1676 | atomic_read(&gl->gl_ail_count), | 1676 | atomic_read(&gl->gl_ail_count), |
1677 | atomic_read(&gl->gl_revokes), | ||
1677 | atomic_read(&gl->gl_ref)); | 1678 | atomic_read(&gl->gl_ref)); |
1678 | 1679 | ||
1679 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | 1680 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 48eb1eed51b5..5067beaffa68 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -236,6 +236,7 @@ struct gfs2_glock { | |||
236 | 236 | ||
237 | struct list_head gl_ail_list; | 237 | struct list_head gl_ail_list; |
238 | atomic_t gl_ail_count; | 238 | atomic_t gl_ail_count; |
239 | atomic_t gl_revokes; | ||
239 | struct delayed_work gl_work; | 240 | struct delayed_work gl_work; |
240 | struct work_struct gl_delete; | 241 | struct work_struct gl_delete; |
241 | struct rcu_head gl_rcu; | 242 | struct rcu_head gl_rcu; |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 9134dcb89479..9b7b9e40073b 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -341,65 +341,6 @@ int gfs2_inode_refresh(struct gfs2_inode *ip) | |||
341 | return error; | 341 | return error; |
342 | } | 342 | } |
343 | 343 | ||
344 | int gfs2_dinode_dealloc(struct gfs2_inode *ip) | ||
345 | { | ||
346 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | ||
347 | struct gfs2_alloc *al; | ||
348 | struct gfs2_rgrpd *rgd; | ||
349 | int error; | ||
350 | |||
351 | if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { | ||
352 | if (gfs2_consist_inode(ip)) | ||
353 | gfs2_dinode_print(ip); | ||
354 | return -EIO; | ||
355 | } | ||
356 | |||
357 | al = gfs2_alloc_get(ip); | ||
358 | if (!al) | ||
359 | return -ENOMEM; | ||
360 | |||
361 | error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); | ||
362 | if (error) | ||
363 | goto out; | ||
364 | |||
365 | error = gfs2_rindex_hold(sdp, &al->al_ri_gh); | ||
366 | if (error) | ||
367 | goto out_qs; | ||
368 | |||
369 | rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); | ||
370 | if (!rgd) { | ||
371 | gfs2_consist_inode(ip); | ||
372 | error = -EIO; | ||
373 | goto out_rindex_relse; | ||
374 | } | ||
375 | |||
376 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, | ||
377 | &al->al_rgd_gh); | ||
378 | if (error) | ||
379 | goto out_rindex_relse; | ||
380 | |||
381 | error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1); | ||
382 | if (error) | ||
383 | goto out_rg_gunlock; | ||
384 | |||
385 | set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); | ||
386 | set_bit(GLF_LFLUSH, &ip->i_gl->gl_flags); | ||
387 | |||
388 | gfs2_free_di(rgd, ip); | ||
389 | |||
390 | gfs2_trans_end(sdp); | ||
391 | |||
392 | out_rg_gunlock: | ||
393 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | ||
394 | out_rindex_relse: | ||
395 | gfs2_glock_dq_uninit(&al->al_ri_gh); | ||
396 | out_qs: | ||
397 | gfs2_quota_unhold(ip); | ||
398 | out: | ||
399 | gfs2_alloc_put(ip); | ||
400 | return error; | ||
401 | } | ||
402 | |||
403 | /** | 344 | /** |
404 | * gfs2_change_nlink - Change nlink count on inode | 345 | * gfs2_change_nlink - Change nlink count on inode |
405 | * @ip: The GFS2 inode | 346 | * @ip: The GFS2 inode |
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index 099ca305e518..842346eae836 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h | |||
@@ -106,7 +106,6 @@ extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr); | |||
106 | 106 | ||
107 | extern int gfs2_inode_refresh(struct gfs2_inode *ip); | 107 | extern int gfs2_inode_refresh(struct gfs2_inode *ip); |
108 | 108 | ||
109 | extern int gfs2_dinode_dealloc(struct gfs2_inode *inode); | ||
110 | extern int gfs2_change_nlink(struct gfs2_inode *ip, int diff); | 109 | extern int gfs2_change_nlink(struct gfs2_inode *ip, int diff); |
111 | extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, | 110 | extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, |
112 | int is_root); | 111 | int is_root); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 611a51d476b2..05bbb124699f 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -320,12 +320,16 @@ static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) | |||
320 | 320 | ||
321 | static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | 321 | static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) |
322 | { | 322 | { |
323 | struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); | ||
324 | struct gfs2_glock *gl = bd->bd_gl; | ||
323 | struct gfs2_trans *tr; | 325 | struct gfs2_trans *tr; |
324 | 326 | ||
325 | tr = current->journal_info; | 327 | tr = current->journal_info; |
326 | tr->tr_touched = 1; | 328 | tr->tr_touched = 1; |
327 | tr->tr_num_revoke++; | 329 | tr->tr_num_revoke++; |
328 | sdp->sd_log_num_revoke++; | 330 | sdp->sd_log_num_revoke++; |
331 | atomic_inc(&gl->gl_revokes); | ||
332 | set_bit(GLF_LFLUSH, &gl->gl_flags); | ||
329 | list_add(&le->le_list, &sdp->sd_log_le_revoke); | 333 | list_add(&le->le_list, &sdp->sd_log_le_revoke); |
330 | } | 334 | } |
331 | 335 | ||
@@ -348,9 +352,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) | |||
348 | ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke); | 352 | ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke); |
349 | offset = sizeof(struct gfs2_log_descriptor); | 353 | offset = sizeof(struct gfs2_log_descriptor); |
350 | 354 | ||
351 | while (!list_empty(head)) { | 355 | list_for_each_entry(bd, head, bd_le.le_list) { |
352 | bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); | ||
353 | list_del_init(&bd->bd_le.le_list); | ||
354 | sdp->sd_log_num_revoke--; | 356 | sdp->sd_log_num_revoke--; |
355 | 357 | ||
356 | if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { | 358 | if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { |
@@ -365,8 +367,6 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) | |||
365 | } | 367 | } |
366 | 368 | ||
367 | *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno); | 369 | *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno); |
368 | kmem_cache_free(gfs2_bufdata_cachep, bd); | ||
369 | |||
370 | offset += sizeof(u64); | 370 | offset += sizeof(u64); |
371 | } | 371 | } |
372 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); | 372 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); |
@@ -374,6 +374,22 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) | |||
374 | submit_bh(WRITE_SYNC, bh); | 374 | submit_bh(WRITE_SYNC, bh); |
375 | } | 375 | } |
376 | 376 | ||
377 | static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | ||
378 | { | ||
379 | struct list_head *head = &sdp->sd_log_le_revoke; | ||
380 | struct gfs2_bufdata *bd; | ||
381 | struct gfs2_glock *gl; | ||
382 | |||
383 | while (!list_empty(head)) { | ||
384 | bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); | ||
385 | list_del_init(&bd->bd_le.le_list); | ||
386 | gl = bd->bd_gl; | ||
387 | atomic_dec(&gl->gl_revokes); | ||
388 | clear_bit(GLF_LFLUSH, &gl->gl_flags); | ||
389 | kmem_cache_free(gfs2_bufdata_cachep, bd); | ||
390 | } | ||
391 | } | ||
392 | |||
377 | static void revoke_lo_before_scan(struct gfs2_jdesc *jd, | 393 | static void revoke_lo_before_scan(struct gfs2_jdesc *jd, |
378 | struct gfs2_log_header_host *head, int pass) | 394 | struct gfs2_log_header_host *head, int pass) |
379 | { | 395 | { |
@@ -747,6 +763,7 @@ const struct gfs2_log_operations gfs2_buf_lops = { | |||
747 | const struct gfs2_log_operations gfs2_revoke_lops = { | 763 | const struct gfs2_log_operations gfs2_revoke_lops = { |
748 | .lo_add = revoke_lo_add, | 764 | .lo_add = revoke_lo_add, |
749 | .lo_before_commit = revoke_lo_before_commit, | 765 | .lo_before_commit = revoke_lo_before_commit, |
766 | .lo_after_commit = revoke_lo_after_commit, | ||
750 | .lo_before_scan = revoke_lo_before_scan, | 767 | .lo_before_scan = revoke_lo_before_scan, |
751 | .lo_scan_elements = revoke_lo_scan_elements, | 768 | .lo_scan_elements = revoke_lo_scan_elements, |
752 | .lo_after_scan = revoke_lo_after_scan, | 769 | .lo_after_scan = revoke_lo_after_scan, |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 888a5f5a1a58..cfa327d33194 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -53,6 +53,7 @@ static void gfs2_init_glock_once(void *foo) | |||
53 | INIT_LIST_HEAD(&gl->gl_lru); | 53 | INIT_LIST_HEAD(&gl->gl_lru); |
54 | INIT_LIST_HEAD(&gl->gl_ail_list); | 54 | INIT_LIST_HEAD(&gl->gl_ail_list); |
55 | atomic_set(&gl->gl_ail_count, 0); | 55 | atomic_set(&gl->gl_ail_count, 0); |
56 | atomic_set(&gl->gl_revokes, 0); | ||
56 | } | 57 | } |
57 | 58 | ||
58 | static void gfs2_init_gl_aspace_once(void *foo) | 59 | static void gfs2_init_gl_aspace_once(void *foo) |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index b62c8427672c..215c37bfc2a4 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -1315,6 +1315,78 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
1315 | return 0; | 1315 | return 0; |
1316 | } | 1316 | } |
1317 | 1317 | ||
1318 | static void gfs2_final_release_pages(struct gfs2_inode *ip) | ||
1319 | { | ||
1320 | struct inode *inode = &ip->i_inode; | ||
1321 | struct gfs2_glock *gl = ip->i_gl; | ||
1322 | |||
1323 | truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0); | ||
1324 | truncate_inode_pages(&inode->i_data, 0); | ||
1325 | |||
1326 | if (atomic_read(&gl->gl_revokes) == 0) { | ||
1327 | clear_bit(GLF_LFLUSH, &gl->gl_flags); | ||
1328 | clear_bit(GLF_DIRTY, &gl->gl_flags); | ||
1329 | } | ||
1330 | } | ||
1331 | |||
1332 | static int gfs2_dinode_dealloc(struct gfs2_inode *ip) | ||
1333 | { | ||
1334 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | ||
1335 | struct gfs2_alloc *al; | ||
1336 | struct gfs2_rgrpd *rgd; | ||
1337 | int error; | ||
1338 | |||
1339 | if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { | ||
1340 | if (gfs2_consist_inode(ip)) | ||
1341 | gfs2_dinode_print(ip); | ||
1342 | return -EIO; | ||
1343 | } | ||
1344 | |||
1345 | al = gfs2_alloc_get(ip); | ||
1346 | if (!al) | ||
1347 | return -ENOMEM; | ||
1348 | |||
1349 | error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); | ||
1350 | if (error) | ||
1351 | goto out; | ||
1352 | |||
1353 | error = gfs2_rindex_hold(sdp, &al->al_ri_gh); | ||
1354 | if (error) | ||
1355 | goto out_qs; | ||
1356 | |||
1357 | rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); | ||
1358 | if (!rgd) { | ||
1359 | gfs2_consist_inode(ip); | ||
1360 | error = -EIO; | ||
1361 | goto out_rindex_relse; | ||
1362 | } | ||
1363 | |||
1364 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, | ||
1365 | &al->al_rgd_gh); | ||
1366 | if (error) | ||
1367 | goto out_rindex_relse; | ||
1368 | |||
1369 | error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1); | ||
1370 | if (error) | ||
1371 | goto out_rg_gunlock; | ||
1372 | |||
1373 | gfs2_free_di(rgd, ip); | ||
1374 | |||
1375 | gfs2_final_release_pages(ip); | ||
1376 | |||
1377 | gfs2_trans_end(sdp); | ||
1378 | |||
1379 | out_rg_gunlock: | ||
1380 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | ||
1381 | out_rindex_relse: | ||
1382 | gfs2_glock_dq_uninit(&al->al_ri_gh); | ||
1383 | out_qs: | ||
1384 | gfs2_quota_unhold(ip); | ||
1385 | out: | ||
1386 | gfs2_alloc_put(ip); | ||
1387 | return error; | ||
1388 | } | ||
1389 | |||
1318 | /* | 1390 | /* |
1319 | * We have to (at the moment) hold the inodes main lock to cover | 1391 | * We have to (at the moment) hold the inodes main lock to cover |
1320 | * the gap between unlocking the shared lock on the iopen lock and | 1392 | * the gap between unlocking the shared lock on the iopen lock and |
@@ -1378,15 +1450,13 @@ static void gfs2_evict_inode(struct inode *inode) | |||
1378 | } | 1450 | } |
1379 | 1451 | ||
1380 | error = gfs2_dinode_dealloc(ip); | 1452 | error = gfs2_dinode_dealloc(ip); |
1381 | if (error) | 1453 | goto out_unlock; |
1382 | goto out_unlock; | ||
1383 | 1454 | ||
1384 | out_truncate: | 1455 | out_truncate: |
1385 | error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); | 1456 | error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); |
1386 | if (error) | 1457 | if (error) |
1387 | goto out_unlock; | 1458 | goto out_unlock; |
1388 | /* Needs to be done before glock release & also in a transaction */ | 1459 | gfs2_final_release_pages(ip); |
1389 | truncate_inode_pages(&inode->i_data, 0); | ||
1390 | gfs2_trans_end(sdp); | 1460 | gfs2_trans_end(sdp); |
1391 | 1461 | ||
1392 | out_unlock: | 1462 | out_unlock: |