aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2008-11-18 08:38:48 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2009-01-05 02:39:06 -0500
commit813e0c46c9e2a0c6f0b6e774faac82afd7a2e812 (patch)
treecb09aa118f9e053f02e17f7c5ff11139e8e22244 /fs/gfs2
parent37b2c8377c98acb60cf4d0126e385ef2153bded9 (diff)
GFS2: Fix "truncate in progress" hang
Following on from the recent clean up of gfs2_quotad, this patch moves the processing of "truncate in progress" inodes from the glock workqueue into gfs2_quotad. This fixes a hang due to the "truncate in progress" processing requiring glocks in order to complete. It might seem odd to use gfs2_quotad for this particular item, but we have to use a pre-existing thread since creating a thread implies a GFP_KERNEL memory allocation which is not allowed from the glock workqueue context. Of the existing threads, gfs2_logd and gfs2_recoverd may deadlock if used for this operation. gfs2_scand and gfs2_glockd are both scheduled for removal at some (hopefully not too distant) future point. That leaves only gfs2_quotad whose workload is generally fairly light and is easily adapted for this extra task. Also, as a result of this change, it opens the way for a future patch to make the reading of the inode's information asynchronous with respect to the glock workqueue, which is another improvement that has been on the list for some time now. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c33
-rw-r--r--fs/gfs2/glock.h1
-rw-r--r--fs/gfs2/glops.c11
-rw-r--r--fs/gfs2/incore.h3
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/quota.c31
7 files changed, 75 insertions, 7 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 27cb9cca9c08..4ddf3bd55dda 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -40,6 +40,7 @@
40#include "quota.h" 40#include "quota.h"
41#include "super.h" 41#include "super.h"
42#include "util.h" 42#include "util.h"
43#include "bmap.h"
43 44
44struct gfs2_gl_hash_bucket { 45struct gfs2_gl_hash_bucket {
45 struct hlist_head hb_list; 46 struct hlist_head hb_list;
@@ -289,7 +290,8 @@ static void gfs2_holder_wake(struct gfs2_holder *gh)
289 * do_promote - promote as many requests as possible on the current queue 290 * do_promote - promote as many requests as possible on the current queue
290 * @gl: The glock 291 * @gl: The glock
291 * 292 *
292 * Returns: true if there is a blocked holder at the head of the list 293 * Returns: 1 if there is a blocked holder at the head of the list, or 2
294 * if a type specific operation is underway.
293 */ 295 */
294 296
295static int do_promote(struct gfs2_glock *gl) 297static int do_promote(struct gfs2_glock *gl)
@@ -312,6 +314,8 @@ restart:
312 ret = glops->go_lock(gh); 314 ret = glops->go_lock(gh);
313 spin_lock(&gl->gl_spin); 315 spin_lock(&gl->gl_spin);
314 if (ret) { 316 if (ret) {
317 if (ret == 1)
318 return 2;
315 gh->gh_error = ret; 319 gh->gh_error = ret;
316 list_del_init(&gh->gh_list); 320 list_del_init(&gh->gh_list);
317 gfs2_holder_wake(gh); 321 gfs2_holder_wake(gh);
@@ -416,6 +420,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
416 const struct gfs2_glock_operations *glops = gl->gl_ops; 420 const struct gfs2_glock_operations *glops = gl->gl_ops;
417 struct gfs2_holder *gh; 421 struct gfs2_holder *gh;
418 unsigned state = ret & LM_OUT_ST_MASK; 422 unsigned state = ret & LM_OUT_ST_MASK;
423 int rv;
419 424
420 spin_lock(&gl->gl_spin); 425 spin_lock(&gl->gl_spin);
421 state_change(gl, state); 426 state_change(gl, state);
@@ -470,7 +475,6 @@ retry:
470 gfs2_demote_wake(gl); 475 gfs2_demote_wake(gl);
471 if (state != LM_ST_UNLOCKED) { 476 if (state != LM_ST_UNLOCKED) {
472 if (glops->go_xmote_bh) { 477 if (glops->go_xmote_bh) {
473 int rv;
474 spin_unlock(&gl->gl_spin); 478 spin_unlock(&gl->gl_spin);
475 rv = glops->go_xmote_bh(gl, gh); 479 rv = glops->go_xmote_bh(gl, gh);
476 if (rv == -EAGAIN) 480 if (rv == -EAGAIN)
@@ -481,10 +485,13 @@ retry:
481 goto out; 485 goto out;
482 } 486 }
483 } 487 }
484 do_promote(gl); 488 rv = do_promote(gl);
489 if (rv == 2)
490 goto out_locked;
485 } 491 }
486out: 492out:
487 clear_bit(GLF_LOCK, &gl->gl_flags); 493 clear_bit(GLF_LOCK, &gl->gl_flags);
494out_locked:
488 spin_unlock(&gl->gl_spin); 495 spin_unlock(&gl->gl_spin);
489 gfs2_glock_put(gl); 496 gfs2_glock_put(gl);
490} 497}
@@ -584,6 +591,7 @@ __releases(&gl->gl_spin)
584__acquires(&gl->gl_spin) 591__acquires(&gl->gl_spin)
585{ 592{
586 struct gfs2_holder *gh = NULL; 593 struct gfs2_holder *gh = NULL;
594 int ret;
587 595
588 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 596 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
589 return; 597 return;
@@ -602,8 +610,11 @@ __acquires(&gl->gl_spin)
602 } else { 610 } else {
603 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 611 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
604 gfs2_demote_wake(gl); 612 gfs2_demote_wake(gl);
605 if (do_promote(gl) == 0) 613 ret = do_promote(gl);
614 if (ret == 0)
606 goto out; 615 goto out;
616 if (ret == 2)
617 return;
607 gh = find_first_waiter(gl); 618 gh = find_first_waiter(gl);
608 gl->gl_target = gh->gh_state; 619 gl->gl_target = gh->gh_state;
609 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 620 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
@@ -1556,6 +1567,20 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1556 } 1567 }
1557} 1568}
1558 1569
1570void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1571{
1572 struct gfs2_glock *gl = ip->i_gl;
1573 int ret;
1574
1575 ret = gfs2_truncatei_resume(ip);
1576 gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1577
1578 spin_lock(&gl->gl_spin);
1579 clear_bit(GLF_LOCK, &gl->gl_flags);
1580 run_queue(gl, 1);
1581 spin_unlock(&gl->gl_spin);
1582}
1583
1559static const char *state2str(unsigned state) 1584static const char *state2str(unsigned state)
1560{ 1585{
1561 switch(state) { 1586 switch(state) {
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 695c6b193611..13a64ee6523b 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -132,6 +132,7 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
132void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); 132void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
133void gfs2_reclaim_glock(struct gfs2_sbd *sdp); 133void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
134void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); 134void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
135void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
135 136
136int __init gfs2_glock_init(void); 137int __init gfs2_glock_init(void);
137void gfs2_glock_exit(void); 138void gfs2_glock_exit(void);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 68ee66552d19..8ebff8ebae20 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -227,6 +227,7 @@ static int inode_go_demote_ok(struct gfs2_glock *gl)
227static int inode_go_lock(struct gfs2_holder *gh) 227static int inode_go_lock(struct gfs2_holder *gh)
228{ 228{
229 struct gfs2_glock *gl = gh->gh_gl; 229 struct gfs2_glock *gl = gh->gh_gl;
230 struct gfs2_sbd *sdp = gl->gl_sbd;
230 struct gfs2_inode *ip = gl->gl_object; 231 struct gfs2_inode *ip = gl->gl_object;
231 int error = 0; 232 int error = 0;
232 233
@@ -241,8 +242,14 @@ static int inode_go_lock(struct gfs2_holder *gh)
241 242
242 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 243 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
243 (gl->gl_state == LM_ST_EXCLUSIVE) && 244 (gl->gl_state == LM_ST_EXCLUSIVE) &&
244 (gh->gh_state == LM_ST_EXCLUSIVE)) 245 (gh->gh_state == LM_ST_EXCLUSIVE)) {
245 error = gfs2_truncatei_resume(ip); 246 spin_lock(&sdp->sd_trunc_lock);
247 if (list_empty(&ip->i_trunc_list))
248 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
249 spin_unlock(&sdp->sd_trunc_lock);
250 wake_up(&sdp->sd_quota_wait);
251 return 1;
252 }
246 253
247 return error; 254 return error;
248} 255}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index cfebc1793574..dd7d0f8f3575 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -244,6 +244,7 @@ struct gfs2_inode {
244 struct gfs2_alloc *i_alloc; 244 struct gfs2_alloc *i_alloc;
245 u64 i_goal; /* goal block for allocations */ 245 u64 i_goal; /* goal block for allocations */
246 struct rw_semaphore i_rw_mutex; 246 struct rw_semaphore i_rw_mutex;
247 struct list_head i_trunc_list;
247 u32 i_entries; 248 u32 i_entries;
248 u32 i_diskflags; 249 u32 i_diskflags;
249 u8 i_height; 250 u8 i_height;
@@ -550,6 +551,8 @@ struct gfs2_sbd {
550 spinlock_t sd_quota_spin; 551 spinlock_t sd_quota_spin;
551 struct mutex sd_quota_mutex; 552 struct mutex sd_quota_mutex;
552 wait_queue_head_t sd_quota_wait; 553 wait_queue_head_t sd_quota_wait;
554 struct list_head sd_trunc_list;
555 spinlock_t sd_trunc_lock;
553 556
554 unsigned int sd_quota_slots; 557 unsigned int sd_quota_slots;
555 unsigned int sd_quota_chunks; 558 unsigned int sd_quota_chunks;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index e3f6f1844a21..cf39295ccb90 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -30,6 +30,7 @@ static void gfs2_init_inode_once(void *foo)
30 30
31 inode_init_once(&ip->i_inode); 31 inode_init_once(&ip->i_inode);
32 init_rwsem(&ip->i_rw_mutex); 32 init_rwsem(&ip->i_rw_mutex);
33 INIT_LIST_HEAD(&ip->i_trunc_list);
33 ip->i_alloc = NULL; 34 ip->i_alloc = NULL;
34} 35}
35 36
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 5d137063b679..a9a83804eea7 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -107,6 +107,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
107 spin_lock_init(&sdp->sd_quota_spin); 107 spin_lock_init(&sdp->sd_quota_spin);
108 mutex_init(&sdp->sd_quota_mutex); 108 mutex_init(&sdp->sd_quota_mutex);
109 init_waitqueue_head(&sdp->sd_quota_wait); 109 init_waitqueue_head(&sdp->sd_quota_wait);
110 INIT_LIST_HEAD(&sdp->sd_trunc_list);
111 spin_lock_init(&sdp->sd_trunc_lock);
110 112
111 spin_lock_init(&sdp->sd_log_lock); 113 spin_lock_init(&sdp->sd_log_lock);
112 114
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 0cfe44f0b6ab..b08d09696b3e 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1296,6 +1296,25 @@ static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1296 } 1296 }
1297} 1297}
1298 1298
1299static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1300{
1301 struct gfs2_inode *ip;
1302
1303 while(1) {
1304 ip = NULL;
1305 spin_lock(&sdp->sd_trunc_lock);
1306 if (!list_empty(&sdp->sd_trunc_list)) {
1307 ip = list_entry(sdp->sd_trunc_list.next,
1308 struct gfs2_inode, i_trunc_list);
1309 list_del_init(&ip->i_trunc_list);
1310 }
1311 spin_unlock(&sdp->sd_trunc_lock);
1312 if (ip == NULL)
1313 return;
1314 gfs2_glock_finish_truncate(ip);
1315 }
1316}
1317
1299/** 1318/**
1300 * gfs2_quotad - Write cached quota changes into the quota file 1319 * gfs2_quotad - Write cached quota changes into the quota file
1301 * @sdp: Pointer to GFS2 superblock 1320 * @sdp: Pointer to GFS2 superblock
@@ -1310,6 +1329,7 @@ int gfs2_quotad(void *data)
1310 unsigned long quotad_timeo = 0; 1329 unsigned long quotad_timeo = 0;
1311 unsigned long t = 0; 1330 unsigned long t = 0;
1312 DEFINE_WAIT(wait); 1331 DEFINE_WAIT(wait);
1332 int empty;
1313 1333
1314 while (!kthread_should_stop()) { 1334 while (!kthread_should_stop()) {
1315 1335
@@ -1324,12 +1344,21 @@ int gfs2_quotad(void *data)
1324 /* FIXME: This should be turned into a shrinker */ 1344 /* FIXME: This should be turned into a shrinker */
1325 gfs2_quota_scan(sdp); 1345 gfs2_quota_scan(sdp);
1326 1346
1347 /* Check for & recover partially truncated inodes */
1348 quotad_check_trunc_list(sdp);
1349
1327 if (freezing(current)) 1350 if (freezing(current))
1328 refrigerator(); 1351 refrigerator();
1329 t = min(quotad_timeo, statfs_timeo); 1352 t = min(quotad_timeo, statfs_timeo);
1330 1353
1331 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE); 1354 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE);
1332 t -= schedule_timeout(t); 1355 spin_lock(&sdp->sd_trunc_lock);
1356 empty = list_empty(&sdp->sd_trunc_list);
1357 spin_unlock(&sdp->sd_trunc_lock);
1358 if (empty)
1359 t -= schedule_timeout(t);
1360 else
1361 t = 0;
1333 finish_wait(&sdp->sd_quota_wait, &wait); 1362 finish_wait(&sdp->sd_quota_wait, &wait);
1334 } 1363 }
1335 1364