aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/log.c
diff options
context:
space:
mode:
authorBenjamin Marzinski <bmarzins@redhat.com>2014-11-13 21:42:04 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2014-11-17 05:36:39 -0500
commit2e60d7683c8d2ea21317f6d9f4cd3bf5428ce162 (patch)
treefc7900ad18814d1ea46879f93eab063ff5754d1e /fs/gfs2/log.c
parent48b6bca6b7b8309697fc8a101793befe92d249d9 (diff)
GFS2: update freeze code to use freeze/thaw_super on all nodes
The current gfs2 freezing code is considerably more complicated than it should be because it doesn't use the vfs freezing code on any node except the one that begins the freeze. This is because it needs to acquire a cluster glock before calling the vfs code to prevent a deadlock, and without the new freeze_super and thaw_super hooks, that was impossible. To deal with the issue, gfs2 had to do some hacky locking tricks to make sure that a frozen node couldn't be holding on a lock it needed to do the unfreeze ioctl. This patch makes use of the new hooks to simply the gfs2 locking code. Now, all the nodes in the cluster freeze and thaw in exactly the same way. Every node in the cluster caches the freeze glock in the shared state. The new freeze_super hook allows the freezing node to grab this freeze glock in the exclusive state without first calling the vfs freeze_super function. All the nodes in the cluster see this lock change, and call the vfs freeze_super function. The vfs locking code guarantees that the nodes can't get stuck holding the glocks necessary to unfreeze the system. To unfreeze, the freezing node uses the new thaw_super hook to drop the freeze glock. Again, all the nodes notice this, reacquire the glock in shared mode and call the vfs thaw_super function. Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/log.c')
-rw-r--r--fs/gfs2/log.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 3966fadbcebd..536e7a6252cd 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -339,6 +339,7 @@ void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
339 339
340int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) 340int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
341{ 341{
342 int ret = 0;
342 unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); 343 unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
343 unsigned wanted = blks + reserved_blks; 344 unsigned wanted = blks + reserved_blks;
344 DEFINE_WAIT(wait); 345 DEFINE_WAIT(wait);
@@ -362,9 +363,13 @@ retry:
362 } while(free_blocks <= wanted); 363 } while(free_blocks <= wanted);
363 finish_wait(&sdp->sd_log_waitq, &wait); 364 finish_wait(&sdp->sd_log_waitq, &wait);
364 } 365 }
366 atomic_inc(&sdp->sd_reserving_log);
365 if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, 367 if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
366 free_blocks - blks) != free_blocks) 368 free_blocks - blks) != free_blocks) {
369 if (atomic_dec_and_test(&sdp->sd_reserving_log))
370 wake_up(&sdp->sd_reserving_log_wait);
367 goto retry; 371 goto retry;
372 }
368 trace_gfs2_log_blocks(sdp, -blks); 373 trace_gfs2_log_blocks(sdp, -blks);
369 374
370 /* 375 /*
@@ -377,9 +382,11 @@ retry:
377 down_read(&sdp->sd_log_flush_lock); 382 down_read(&sdp->sd_log_flush_lock);
378 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { 383 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
379 gfs2_log_release(sdp, blks); 384 gfs2_log_release(sdp, blks);
380 return -EROFS; 385 ret = -EROFS;
381 } 386 }
382 return 0; 387 if (atomic_dec_and_test(&sdp->sd_reserving_log))
388 wake_up(&sdp->sd_reserving_log_wait);
389 return ret;
383} 390}
384 391
385/** 392/**
@@ -652,9 +659,12 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
652 u32 hash; 659 u32 hash;
653 int rw = WRITE_FLUSH_FUA | REQ_META; 660 int rw = WRITE_FLUSH_FUA | REQ_META;
654 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 661 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
662 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
655 lh = page_address(page); 663 lh = page_address(page);
656 clear_page(lh); 664 clear_page(lh);
657 665
666 gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
667
658 tail = current_tail(sdp); 668 tail = current_tail(sdp);
659 669
660 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 670 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
@@ -695,6 +705,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
695 enum gfs2_flush_type type) 705 enum gfs2_flush_type type)
696{ 706{
697 struct gfs2_trans *tr; 707 struct gfs2_trans *tr;
708 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
698 709
699 down_write(&sdp->sd_log_flush_lock); 710 down_write(&sdp->sd_log_flush_lock);
700 711
@@ -713,8 +724,12 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
713 INIT_LIST_HEAD(&tr->tr_ail1_list); 724 INIT_LIST_HEAD(&tr->tr_ail1_list);
714 INIT_LIST_HEAD(&tr->tr_ail2_list); 725 INIT_LIST_HEAD(&tr->tr_ail2_list);
715 tr->tr_first = sdp->sd_log_flush_head; 726 tr->tr_first = sdp->sd_log_flush_head;
727 if (unlikely (state == SFS_FROZEN))
728 gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
716 } 729 }
717 730
731 if (unlikely(state == SFS_FROZEN))
732 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
718 gfs2_assert_withdraw(sdp, 733 gfs2_assert_withdraw(sdp,
719 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); 734 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
720 735
@@ -745,8 +760,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
745 spin_unlock(&sdp->sd_ail_lock); 760 spin_unlock(&sdp->sd_ail_lock);
746 gfs2_log_unlock(sdp); 761 gfs2_log_unlock(sdp);
747 762
748 if (atomic_read(&sdp->sd_log_freeze))
749 type = FREEZE_FLUSH;
750 if (type != NORMAL_FLUSH) { 763 if (type != NORMAL_FLUSH) {
751 if (!sdp->sd_log_idle) { 764 if (!sdp->sd_log_idle) {
752 for (;;) { 765 for (;;) {
@@ -763,21 +776,8 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
763 } 776 }
764 if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH) 777 if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH)
765 gfs2_log_shutdown(sdp); 778 gfs2_log_shutdown(sdp);
766 if (type == FREEZE_FLUSH) { 779 if (type == FREEZE_FLUSH)
767 int error; 780 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
768
769 atomic_set(&sdp->sd_log_freeze, 0);
770 wake_up(&sdp->sd_log_frozen_wait);
771 error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
772 LM_ST_SHARED, 0,
773 &sdp->sd_thaw_gh);
774 if (error) {
775 printk(KERN_INFO "GFS2: couln't get freeze lock : %d\n", error);
776 gfs2_assert_withdraw(sdp, 0);
777 }
778 else
779 gfs2_glock_dq_uninit(&sdp->sd_thaw_gh);
780 }
781 } 781 }
782 782
783 trace_gfs2_log_flush(sdp, 0); 783 trace_gfs2_log_flush(sdp, 0);
@@ -888,7 +888,7 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
888 888
889static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) 889static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
890{ 890{
891 return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1) || atomic_read(&sdp->sd_log_freeze)); 891 return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
892} 892}
893 893
894static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) 894static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)