diff options
author | David Teigland <teigland@redhat.com> | 2013-03-05 16:01:47 -0500 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2013-04-04 04:52:14 -0400 |
commit | 57c7310b8eb96b0fe3b0aaa8dc194adbae03bef3 (patch) | |
tree | 47d9cebf7d567debd012e3869e522166b5d9e5a8 /fs/gfs2 | |
parent | 66ade474237745a57b7e87da9a93c7ec69fd52bb (diff) |
GFS2: use kmalloc for lvb bitmap
The temp lvb bitmap was on the stack, which could
be an alignment problem for __set_bit_le. Use
kmalloc for it instead.
Signed-off-by: David Teigland <teigland@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/incore.h | 1 | ||||
-rw-r--r-- | fs/gfs2/lock_dlm.c | 31 |
2 files changed, 19 insertions, 13 deletions
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 156e42ec84ea..5c29216e9cc1 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -588,6 +588,7 @@ struct lm_lockstruct { | |||
588 | struct dlm_lksb ls_control_lksb; /* control_lock */ | 588 | struct dlm_lksb ls_control_lksb; /* control_lock */ |
589 | char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */ | 589 | char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */ |
590 | struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ | 590 | struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ |
591 | char *ls_lvb_bits; | ||
591 | 592 | ||
592 | spinlock_t ls_recover_spin; /* protects following fields */ | 593 | spinlock_t ls_recover_spin; /* protects following fields */ |
593 | unsigned long ls_recover_flags; /* DFL_ */ | 594 | unsigned long ls_recover_flags; /* DFL_ */ |
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 9802de0f85e6..b15bb45911ca 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c | |||
@@ -580,7 +580,6 @@ static void gfs2_control_func(struct work_struct *work) | |||
580 | { | 580 | { |
581 | struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); | 581 | struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); |
582 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 582 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
583 | char lvb_bits[GDLM_LVB_SIZE]; | ||
584 | uint32_t block_gen, start_gen, lvb_gen, flags; | 583 | uint32_t block_gen, start_gen, lvb_gen, flags; |
585 | int recover_set = 0; | 584 | int recover_set = 0; |
586 | int write_lvb = 0; | 585 | int write_lvb = 0; |
@@ -634,7 +633,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
634 | return; | 633 | return; |
635 | } | 634 | } |
636 | 635 | ||
637 | control_lvb_read(ls, &lvb_gen, lvb_bits); | 636 | control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); |
638 | 637 | ||
639 | spin_lock(&ls->ls_recover_spin); | 638 | spin_lock(&ls->ls_recover_spin); |
640 | if (block_gen != ls->ls_recover_block || | 639 | if (block_gen != ls->ls_recover_block || |
@@ -664,10 +663,10 @@ static void gfs2_control_func(struct work_struct *work) | |||
664 | 663 | ||
665 | ls->ls_recover_result[i] = 0; | 664 | ls->ls_recover_result[i] = 0; |
666 | 665 | ||
667 | if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) | 666 | if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) |
668 | continue; | 667 | continue; |
669 | 668 | ||
670 | __clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); | 669 | __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); |
671 | write_lvb = 1; | 670 | write_lvb = 1; |
672 | } | 671 | } |
673 | } | 672 | } |
@@ -691,7 +690,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
691 | continue; | 690 | continue; |
692 | if (ls->ls_recover_submit[i] < start_gen) { | 691 | if (ls->ls_recover_submit[i] < start_gen) { |
693 | ls->ls_recover_submit[i] = 0; | 692 | ls->ls_recover_submit[i] = 0; |
694 | __set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); | 693 | __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); |
695 | } | 694 | } |
696 | } | 695 | } |
697 | /* even if there are no bits to set, we need to write the | 696 | /* even if there are no bits to set, we need to write the |
@@ -705,7 +704,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
705 | spin_unlock(&ls->ls_recover_spin); | 704 | spin_unlock(&ls->ls_recover_spin); |
706 | 705 | ||
707 | if (write_lvb) { | 706 | if (write_lvb) { |
708 | control_lvb_write(ls, start_gen, lvb_bits); | 707 | control_lvb_write(ls, start_gen, ls->ls_lvb_bits); |
709 | flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; | 708 | flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; |
710 | } else { | 709 | } else { |
711 | flags = DLM_LKF_CONVERT; | 710 | flags = DLM_LKF_CONVERT; |
@@ -725,7 +724,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
725 | */ | 724 | */ |
726 | 725 | ||
727 | for (i = 0; i < recover_size; i++) { | 726 | for (i = 0; i < recover_size; i++) { |
728 | if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) { | 727 | if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { |
729 | fs_info(sdp, "recover generation %u jid %d\n", | 728 | fs_info(sdp, "recover generation %u jid %d\n", |
730 | start_gen, i); | 729 | start_gen, i); |
731 | gfs2_recover_set(sdp, i); | 730 | gfs2_recover_set(sdp, i); |
@@ -758,7 +757,6 @@ static void gfs2_control_func(struct work_struct *work) | |||
758 | static int control_mount(struct gfs2_sbd *sdp) | 757 | static int control_mount(struct gfs2_sbd *sdp) |
759 | { | 758 | { |
760 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 759 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
761 | char lvb_bits[GDLM_LVB_SIZE]; | ||
762 | uint32_t start_gen, block_gen, mount_gen, lvb_gen; | 760 | uint32_t start_gen, block_gen, mount_gen, lvb_gen; |
763 | int mounted_mode; | 761 | int mounted_mode; |
764 | int retries = 0; | 762 | int retries = 0; |
@@ -857,7 +855,7 @@ locks_done: | |||
857 | * lvb_gen will be non-zero. | 855 | * lvb_gen will be non-zero. |
858 | */ | 856 | */ |
859 | 857 | ||
860 | control_lvb_read(ls, &lvb_gen, lvb_bits); | 858 | control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); |
861 | 859 | ||
862 | if (lvb_gen == 0xFFFFFFFF) { | 860 | if (lvb_gen == 0xFFFFFFFF) { |
863 | /* special value to force mount attempts to fail */ | 861 | /* special value to force mount attempts to fail */ |
@@ -887,7 +885,7 @@ locks_done: | |||
887 | * and all lvb bits to be clear (no pending journal recoveries.) | 885 | * and all lvb bits to be clear (no pending journal recoveries.) |
888 | */ | 886 | */ |
889 | 887 | ||
890 | if (!all_jid_bits_clear(lvb_bits)) { | 888 | if (!all_jid_bits_clear(ls->ls_lvb_bits)) { |
891 | /* journals need recovery, wait until all are clear */ | 889 | /* journals need recovery, wait until all are clear */ |
892 | fs_info(sdp, "control_mount wait for journal recovery\n"); | 890 | fs_info(sdp, "control_mount wait for journal recovery\n"); |
893 | goto restart; | 891 | goto restart; |
@@ -949,7 +947,6 @@ static int dlm_recovery_wait(void *word) | |||
949 | static int control_first_done(struct gfs2_sbd *sdp) | 947 | static int control_first_done(struct gfs2_sbd *sdp) |
950 | { | 948 | { |
951 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 949 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
952 | char lvb_bits[GDLM_LVB_SIZE]; | ||
953 | uint32_t start_gen, block_gen; | 950 | uint32_t start_gen, block_gen; |
954 | int error; | 951 | int error; |
955 | 952 | ||
@@ -991,8 +988,8 @@ restart: | |||
991 | memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); | 988 | memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); |
992 | spin_unlock(&ls->ls_recover_spin); | 989 | spin_unlock(&ls->ls_recover_spin); |
993 | 990 | ||
994 | memset(lvb_bits, 0, sizeof(lvb_bits)); | 991 | memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); |
995 | control_lvb_write(ls, start_gen, lvb_bits); | 992 | control_lvb_write(ls, start_gen, ls->ls_lvb_bits); |
996 | 993 | ||
997 | error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); | 994 | error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); |
998 | if (error) | 995 | if (error) |
@@ -1022,6 +1019,12 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, | |||
1022 | uint32_t old_size, new_size; | 1019 | uint32_t old_size, new_size; |
1023 | int i, max_jid; | 1020 | int i, max_jid; |
1024 | 1021 | ||
1022 | if (!ls->ls_lvb_bits) { | ||
1023 | ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); | ||
1024 | if (!ls->ls_lvb_bits) | ||
1025 | return -ENOMEM; | ||
1026 | } | ||
1027 | |||
1025 | max_jid = 0; | 1028 | max_jid = 0; |
1026 | for (i = 0; i < num_slots; i++) { | 1029 | for (i = 0; i < num_slots; i++) { |
1027 | if (max_jid < slots[i].slot - 1) | 1030 | if (max_jid < slots[i].slot - 1) |
@@ -1057,6 +1060,7 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, | |||
1057 | 1060 | ||
1058 | static void free_recover_size(struct lm_lockstruct *ls) | 1061 | static void free_recover_size(struct lm_lockstruct *ls) |
1059 | { | 1062 | { |
1063 | kfree(ls->ls_lvb_bits); | ||
1060 | kfree(ls->ls_recover_submit); | 1064 | kfree(ls->ls_recover_submit); |
1061 | kfree(ls->ls_recover_result); | 1065 | kfree(ls->ls_recover_result); |
1062 | ls->ls_recover_submit = NULL; | 1066 | ls->ls_recover_submit = NULL; |
@@ -1205,6 +1209,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) | |||
1205 | ls->ls_recover_size = 0; | 1209 | ls->ls_recover_size = 0; |
1206 | ls->ls_recover_submit = NULL; | 1210 | ls->ls_recover_submit = NULL; |
1207 | ls->ls_recover_result = NULL; | 1211 | ls->ls_recover_result = NULL; |
1212 | ls->ls_lvb_bits = NULL; | ||
1208 | 1213 | ||
1209 | error = set_recover_size(sdp, NULL, 0); | 1214 | error = set_recover_size(sdp, NULL, 0); |
1210 | if (error) | 1215 | if (error) |