aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/recovery.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-07-20 16:09:02 -0400
committerTejun Heo <tj@kernel.org>2010-07-23 07:14:25 -0400
commit6ecd7c2dd9f5dd4f6e8f65c8027159f9c73b0e4c (patch)
treeba12b0b9ba00e21ff73eee82714be448c5a7c6e9 /fs/gfs2/recovery.c
parent991ea75cb1df7188d209274b3d51c105b4f18ffe (diff)
gfs2: use workqueue instead of slow-work
Workqueue can now handle high concurrency. Convert gfs to use workqueue instead of slow-work. * Steven pointed out that recovery path might be run from allocation path and thus requires forward progress guarantee without memory allocation. Create and use gfs_recovery_wq with rescuer. Please note that forward progress wasn't guaranteed with slow-work. * Updated to use non-reentrant workqueue. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/recovery.c')
-rw-r--r--fs/gfs2/recovery.c54
1 files changed, 21 insertions, 33 deletions
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 4b9bece3d437..f7f89a94a5a4 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -14,7 +14,6 @@
14#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h> 15#include <linux/gfs2_ondisk.h>
16#include <linux/crc32.h> 16#include <linux/crc32.h>
17#include <linux/slow-work.h>
18 17
19#include "gfs2.h" 18#include "gfs2.h"
20#include "incore.h" 19#include "incore.h"
@@ -28,6 +27,8 @@
28#include "util.h" 27#include "util.h"
29#include "dir.h" 28#include "dir.h"
30 29
30struct workqueue_struct *gfs_recovery_wq;
31
31int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, 32int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
32 struct buffer_head **bh) 33 struct buffer_head **bh)
33{ 34{
@@ -443,23 +444,7 @@ static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
443 kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp); 444 kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
444} 445}
445 446
446static int gfs2_recover_get_ref(struct slow_work *work) 447void gfs2_recover_func(struct work_struct *work)
447{
448 struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work);
449 if (test_and_set_bit(JDF_RECOVERY, &jd->jd_flags))
450 return -EBUSY;
451 return 0;
452}
453
454static void gfs2_recover_put_ref(struct slow_work *work)
455{
456 struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work);
457 clear_bit(JDF_RECOVERY, &jd->jd_flags);
458 smp_mb__after_clear_bit();
459 wake_up_bit(&jd->jd_flags, JDF_RECOVERY);
460}
461
462static void gfs2_recover_work(struct slow_work *work)
463{ 448{
464 struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work); 449 struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work);
465 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 450 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
@@ -578,7 +563,7 @@ static void gfs2_recover_work(struct slow_work *work)
578 gfs2_glock_dq_uninit(&j_gh); 563 gfs2_glock_dq_uninit(&j_gh);
579 564
580 fs_info(sdp, "jid=%u: Done\n", jd->jd_jid); 565 fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
581 return; 566 goto done;
582 567
583fail_gunlock_tr: 568fail_gunlock_tr:
584 gfs2_glock_dq_uninit(&t_gh); 569 gfs2_glock_dq_uninit(&t_gh);
@@ -590,32 +575,35 @@ fail_gunlock_j:
590 } 575 }
591 576
592 fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done"); 577 fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done");
593
594fail: 578fail:
595 gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP); 579 gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
580done:
581 clear_bit(JDF_RECOVERY, &jd->jd_flags);
582 smp_mb__after_clear_bit();
583 wake_up_bit(&jd->jd_flags, JDF_RECOVERY);
596} 584}
597 585
598struct slow_work_ops gfs2_recover_ops = {
599 .owner = THIS_MODULE,
600 .get_ref = gfs2_recover_get_ref,
601 .put_ref = gfs2_recover_put_ref,
602 .execute = gfs2_recover_work,
603};
604
605
606static int gfs2_recovery_wait(void *word) 586static int gfs2_recovery_wait(void *word)
607{ 587{
608 schedule(); 588 schedule();
609 return 0; 589 return 0;
610} 590}
611 591
612int gfs2_recover_journal(struct gfs2_jdesc *jd) 592int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
613{ 593{
614 int rv; 594 int rv;
615 rv = slow_work_enqueue(&jd->jd_work); 595
616 if (rv) 596 if (test_and_set_bit(JDF_RECOVERY, &jd->jd_flags))
617 return rv; 597 return -EBUSY;
618 wait_on_bit(&jd->jd_flags, JDF_RECOVERY, gfs2_recovery_wait, TASK_UNINTERRUPTIBLE); 598
599 /* we have JDF_RECOVERY, queue should always succeed */
600 rv = queue_work(gfs_recovery_wq, &jd->jd_work);
601 BUG_ON(!rv);
602
603 if (wait)
604 wait_on_bit(&jd->jd_flags, JDF_RECOVERY, gfs2_recovery_wait,
605 TASK_UNINTERRUPTIBLE);
606
619 return 0; 607 return 0;
620} 608}
621 609