diff options
Diffstat (limited to 'drivers/block/drbd/drbd_main.c')
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 251 |
1 files changed, 211 insertions, 40 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 298b868910dc..a5dca6affcbb 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #include <linux/reboot.h> | 45 | #include <linux/reboot.h> |
46 | #include <linux/notifier.h> | 46 | #include <linux/notifier.h> |
47 | #include <linux/kthread.h> | 47 | #include <linux/kthread.h> |
48 | 48 | #include <linux/workqueue.h> | |
49 | #define __KERNEL_SYSCALLS__ | 49 | #define __KERNEL_SYSCALLS__ |
50 | #include <linux/unistd.h> | 50 | #include <linux/unistd.h> |
51 | #include <linux/vmalloc.h> | 51 | #include <linux/vmalloc.h> |
@@ -2299,6 +2299,7 @@ static void drbd_cleanup(void) | |||
2299 | idr_for_each_entry(&minors, mdev, i) { | 2299 | idr_for_each_entry(&minors, mdev, i) { |
2300 | idr_remove(&minors, mdev_to_minor(mdev)); | 2300 | idr_remove(&minors, mdev_to_minor(mdev)); |
2301 | idr_remove(&mdev->tconn->volumes, mdev->vnr); | 2301 | idr_remove(&mdev->tconn->volumes, mdev->vnr); |
2302 | destroy_workqueue(mdev->submit.wq); | ||
2302 | del_gendisk(mdev->vdisk); | 2303 | del_gendisk(mdev->vdisk); |
2303 | /* synchronize_rcu(); No other threads running at this point */ | 2304 | /* synchronize_rcu(); No other threads running at this point */ |
2304 | kref_put(&mdev->kref, &drbd_minor_destroy); | 2305 | kref_put(&mdev->kref, &drbd_minor_destroy); |
@@ -2588,6 +2589,21 @@ void conn_destroy(struct kref *kref) | |||
2588 | kfree(tconn); | 2589 | kfree(tconn); |
2589 | } | 2590 | } |
2590 | 2591 | ||
2592 | int init_submitter(struct drbd_conf *mdev) | ||
2593 | { | ||
2594 | /* opencoded create_singlethread_workqueue(), | ||
2595 | * to be able to say "drbd%d", ..., minor */ | ||
2596 | mdev->submit.wq = alloc_workqueue("drbd%u_submit", | ||
2597 | WQ_UNBOUND | WQ_MEM_RECLAIM, 1, mdev->minor); | ||
2598 | if (!mdev->submit.wq) | ||
2599 | return -ENOMEM; | ||
2600 | |||
2601 | INIT_WORK(&mdev->submit.worker, do_submit); | ||
2602 | spin_lock_init(&mdev->submit.lock); | ||
2603 | INIT_LIST_HEAD(&mdev->submit.writes); | ||
2604 | return 0; | ||
2605 | } | ||
2606 | |||
2591 | enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr) | 2607 | enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr) |
2592 | { | 2608 | { |
2593 | struct drbd_conf *mdev; | 2609 | struct drbd_conf *mdev; |
@@ -2677,6 +2693,12 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, | |||
2677 | goto out_idr_remove_minor; | 2693 | goto out_idr_remove_minor; |
2678 | } | 2694 | } |
2679 | 2695 | ||
2696 | if (init_submitter(mdev)) { | ||
2697 | err = ERR_NOMEM; | ||
2698 | drbd_msg_put_info("unable to create submit workqueue"); | ||
2699 | goto out_idr_remove_vol; | ||
2700 | } | ||
2701 | |||
2680 | add_disk(disk); | 2702 | add_disk(disk); |
2681 | kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */ | 2703 | kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */ |
2682 | 2704 | ||
@@ -2687,6 +2709,8 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, | |||
2687 | 2709 | ||
2688 | return NO_ERROR; | 2710 | return NO_ERROR; |
2689 | 2711 | ||
2712 | out_idr_remove_vol: | ||
2713 | idr_remove(&tconn->volumes, vnr_got); | ||
2690 | out_idr_remove_minor: | 2714 | out_idr_remove_minor: |
2691 | idr_remove(&minors, minor_got); | 2715 | idr_remove(&minors, minor_got); |
2692 | synchronize_rcu(); | 2716 | synchronize_rcu(); |
@@ -2794,6 +2818,7 @@ void drbd_free_bc(struct drbd_backing_dev *ldev) | |||
2794 | blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); | 2818 | blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); |
2795 | blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); | 2819 | blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); |
2796 | 2820 | ||
2821 | kfree(ldev->disk_conf); | ||
2797 | kfree(ldev); | 2822 | kfree(ldev); |
2798 | } | 2823 | } |
2799 | 2824 | ||
@@ -2833,8 +2858,9 @@ void conn_md_sync(struct drbd_tconn *tconn) | |||
2833 | rcu_read_unlock(); | 2858 | rcu_read_unlock(); |
2834 | } | 2859 | } |
2835 | 2860 | ||
2861 | /* aligned 4kByte */ | ||
2836 | struct meta_data_on_disk { | 2862 | struct meta_data_on_disk { |
2837 | u64 la_size; /* last agreed size. */ | 2863 | u64 la_size_sect; /* last agreed size. */ |
2838 | u64 uuid[UI_SIZE]; /* UUIDs. */ | 2864 | u64 uuid[UI_SIZE]; /* UUIDs. */ |
2839 | u64 device_uuid; | 2865 | u64 device_uuid; |
2840 | u64 reserved_u64_1; | 2866 | u64 reserved_u64_1; |
@@ -2842,13 +2868,17 @@ struct meta_data_on_disk { | |||
2842 | u32 magic; | 2868 | u32 magic; |
2843 | u32 md_size_sect; | 2869 | u32 md_size_sect; |
2844 | u32 al_offset; /* offset to this block */ | 2870 | u32 al_offset; /* offset to this block */ |
2845 | u32 al_nr_extents; /* important for restoring the AL */ | 2871 | u32 al_nr_extents; /* important for restoring the AL (userspace) */ |
2846 | /* `-- act_log->nr_elements <-- ldev->dc.al_extents */ | 2872 | /* `-- act_log->nr_elements <-- ldev->dc.al_extents */ |
2847 | u32 bm_offset; /* offset to the bitmap, from here */ | 2873 | u32 bm_offset; /* offset to the bitmap, from here */ |
2848 | u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ | 2874 | u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ |
2849 | u32 la_peer_max_bio_size; /* last peer max_bio_size */ | 2875 | u32 la_peer_max_bio_size; /* last peer max_bio_size */ |
2850 | u32 reserved_u32[3]; | ||
2851 | 2876 | ||
2877 | /* see al_tr_number_to_on_disk_sector() */ | ||
2878 | u32 al_stripes; | ||
2879 | u32 al_stripe_size_4k; | ||
2880 | |||
2881 | u8 reserved_u8[4096 - (7*8 + 10*4)]; | ||
2852 | } __packed; | 2882 | } __packed; |
2853 | 2883 | ||
2854 | /** | 2884 | /** |
@@ -2861,6 +2891,10 @@ void drbd_md_sync(struct drbd_conf *mdev) | |||
2861 | sector_t sector; | 2891 | sector_t sector; |
2862 | int i; | 2892 | int i; |
2863 | 2893 | ||
2894 | /* Don't accidentally change the DRBD meta data layout. */ | ||
2895 | BUILD_BUG_ON(UI_SIZE != 4); | ||
2896 | BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096); | ||
2897 | |||
2864 | del_timer(&mdev->md_sync_timer); | 2898 | del_timer(&mdev->md_sync_timer); |
2865 | /* timer may be rearmed by drbd_md_mark_dirty() now. */ | 2899 | /* timer may be rearmed by drbd_md_mark_dirty() now. */ |
2866 | if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) | 2900 | if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) |
@@ -2875,9 +2909,9 @@ void drbd_md_sync(struct drbd_conf *mdev) | |||
2875 | if (!buffer) | 2909 | if (!buffer) |
2876 | goto out; | 2910 | goto out; |
2877 | 2911 | ||
2878 | memset(buffer, 0, 512); | 2912 | memset(buffer, 0, sizeof(*buffer)); |
2879 | 2913 | ||
2880 | buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); | 2914 | buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); |
2881 | for (i = UI_CURRENT; i < UI_SIZE; i++) | 2915 | for (i = UI_CURRENT; i < UI_SIZE; i++) |
2882 | buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]); | 2916 | buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]); |
2883 | buffer->flags = cpu_to_be32(mdev->ldev->md.flags); | 2917 | buffer->flags = cpu_to_be32(mdev->ldev->md.flags); |
@@ -2892,7 +2926,10 @@ void drbd_md_sync(struct drbd_conf *mdev) | |||
2892 | buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset); | 2926 | buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset); |
2893 | buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size); | 2927 | buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size); |
2894 | 2928 | ||
2895 | D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset); | 2929 | buffer->al_stripes = cpu_to_be32(mdev->ldev->md.al_stripes); |
2930 | buffer->al_stripe_size_4k = cpu_to_be32(mdev->ldev->md.al_stripe_size_4k); | ||
2931 | |||
2932 | D_ASSERT(drbd_md_ss(mdev->ldev) == mdev->ldev->md.md_offset); | ||
2896 | sector = mdev->ldev->md.md_offset; | 2933 | sector = mdev->ldev->md.md_offset; |
2897 | 2934 | ||
2898 | if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { | 2935 | if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { |
@@ -2910,13 +2947,141 @@ out: | |||
2910 | put_ldev(mdev); | 2947 | put_ldev(mdev); |
2911 | } | 2948 | } |
2912 | 2949 | ||
2950 | static int check_activity_log_stripe_size(struct drbd_conf *mdev, | ||
2951 | struct meta_data_on_disk *on_disk, | ||
2952 | struct drbd_md *in_core) | ||
2953 | { | ||
2954 | u32 al_stripes = be32_to_cpu(on_disk->al_stripes); | ||
2955 | u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k); | ||
2956 | u64 al_size_4k; | ||
2957 | |||
2958 | /* both not set: default to old fixed size activity log */ | ||
2959 | if (al_stripes == 0 && al_stripe_size_4k == 0) { | ||
2960 | al_stripes = 1; | ||
2961 | al_stripe_size_4k = MD_32kB_SECT/8; | ||
2962 | } | ||
2963 | |||
2964 | /* some paranoia plausibility checks */ | ||
2965 | |||
2966 | /* we need both values to be set */ | ||
2967 | if (al_stripes == 0 || al_stripe_size_4k == 0) | ||
2968 | goto err; | ||
2969 | |||
2970 | al_size_4k = (u64)al_stripes * al_stripe_size_4k; | ||
2971 | |||
2972 | /* Upper limit of activity log area, to avoid potential overflow | ||
2973 | * problems in al_tr_number_to_on_disk_sector(). As right now, more | ||
2974 | * than 72 * 4k blocks total only increases the amount of history, | ||
2975 | * limiting this arbitrarily to 16 GB is not a real limitation ;-) */ | ||
2976 | if (al_size_4k > (16 * 1024 * 1024/4)) | ||
2977 | goto err; | ||
2978 | |||
2979 | /* Lower limit: we need at least 8 transaction slots (32kB) | ||
2980 | * to not break existing setups */ | ||
2981 | if (al_size_4k < MD_32kB_SECT/8) | ||
2982 | goto err; | ||
2983 | |||
2984 | in_core->al_stripe_size_4k = al_stripe_size_4k; | ||
2985 | in_core->al_stripes = al_stripes; | ||
2986 | in_core->al_size_4k = al_size_4k; | ||
2987 | |||
2988 | return 0; | ||
2989 | err: | ||
2990 | dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n", | ||
2991 | al_stripes, al_stripe_size_4k); | ||
2992 | return -EINVAL; | ||
2993 | } | ||
2994 | |||
2995 | static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | ||
2996 | { | ||
2997 | sector_t capacity = drbd_get_capacity(bdev->md_bdev); | ||
2998 | struct drbd_md *in_core = &bdev->md; | ||
2999 | s32 on_disk_al_sect; | ||
3000 | s32 on_disk_bm_sect; | ||
3001 | |||
3002 | /* The on-disk size of the activity log, calculated from offsets, and | ||
3003 | * the size of the activity log calculated from the stripe settings, | ||
3004 | * should match. | ||
3005 | * Though we could relax this a bit: it is ok, if the striped activity log | ||
3006 | * fits in the available on-disk activity log size. | ||
3007 | * Right now, that would break how resize is implemented. | ||
3008 | * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware | ||
3009 | * of possible unused padding space in the on disk layout. */ | ||
3010 | if (in_core->al_offset < 0) { | ||
3011 | if (in_core->bm_offset > in_core->al_offset) | ||
3012 | goto err; | ||
3013 | on_disk_al_sect = -in_core->al_offset; | ||
3014 | on_disk_bm_sect = in_core->al_offset - in_core->bm_offset; | ||
3015 | } else { | ||
3016 | if (in_core->al_offset != MD_4kB_SECT) | ||
3017 | goto err; | ||
3018 | if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT) | ||
3019 | goto err; | ||
3020 | |||
3021 | on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT; | ||
3022 | on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset; | ||
3023 | } | ||
3024 | |||
3025 | /* old fixed size meta data is exactly that: fixed. */ | ||
3026 | if (in_core->meta_dev_idx >= 0) { | ||
3027 | if (in_core->md_size_sect != MD_128MB_SECT | ||
3028 | || in_core->al_offset != MD_4kB_SECT | ||
3029 | || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT | ||
3030 | || in_core->al_stripes != 1 | ||
3031 | || in_core->al_stripe_size_4k != MD_32kB_SECT/8) | ||
3032 | goto err; | ||
3033 | } | ||
3034 | |||
3035 | if (capacity < in_core->md_size_sect) | ||
3036 | goto err; | ||
3037 | if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev)) | ||
3038 | goto err; | ||
3039 | |||
3040 | /* should be aligned, and at least 32k */ | ||
3041 | if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT)) | ||
3042 | goto err; | ||
3043 | |||
3044 | /* should fit (for now: exactly) into the available on-disk space; | ||
3045 | * overflow prevention is in check_activity_log_stripe_size() above. */ | ||
3046 | if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT) | ||
3047 | goto err; | ||
3048 | |||
3049 | /* again, should be aligned */ | ||
3050 | if (in_core->bm_offset & 7) | ||
3051 | goto err; | ||
3052 | |||
3053 | /* FIXME check for device grow with flex external meta data? */ | ||
3054 | |||
3055 | /* can the available bitmap space cover the last agreed device size? */ | ||
3056 | if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512) | ||
3057 | goto err; | ||
3058 | |||
3059 | return 0; | ||
3060 | |||
3061 | err: | ||
3062 | dev_err(DEV, "meta data offsets don't make sense: idx=%d " | ||
3063 | "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, " | ||
3064 | "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n", | ||
3065 | in_core->meta_dev_idx, | ||
3066 | in_core->al_stripes, in_core->al_stripe_size_4k, | ||
3067 | in_core->al_offset, in_core->bm_offset, in_core->md_size_sect, | ||
3068 | (unsigned long long)in_core->la_size_sect, | ||
3069 | (unsigned long long)capacity); | ||
3070 | |||
3071 | return -EINVAL; | ||
3072 | } | ||
3073 | |||
3074 | |||
2913 | /** | 3075 | /** |
2914 | * drbd_md_read() - Reads in the meta data super block | 3076 | * drbd_md_read() - Reads in the meta data super block |
2915 | * @mdev: DRBD device. | 3077 | * @mdev: DRBD device. |
2916 | * @bdev: Device from which the meta data should be read in. | 3078 | * @bdev: Device from which the meta data should be read in. |
2917 | * | 3079 | * |
2918 | * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case | 3080 | * Return NO_ERROR on success, and an enum drbd_ret_code in case |
2919 | * something goes wrong. | 3081 | * something goes wrong. |
3082 | * | ||
3083 | * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS, | ||
3084 | * even before @bdev is assigned to @mdev->ldev. | ||
2920 | */ | 3085 | */ |
2921 | int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | 3086 | int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) |
2922 | { | 3087 | { |
@@ -2924,12 +3089,17 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | |||
2924 | u32 magic, flags; | 3089 | u32 magic, flags; |
2925 | int i, rv = NO_ERROR; | 3090 | int i, rv = NO_ERROR; |
2926 | 3091 | ||
2927 | if (!get_ldev_if_state(mdev, D_ATTACHING)) | 3092 | if (mdev->state.disk != D_DISKLESS) |
2928 | return ERR_IO_MD_DISK; | 3093 | return ERR_DISK_CONFIGURED; |
2929 | 3094 | ||
2930 | buffer = drbd_md_get_buffer(mdev); | 3095 | buffer = drbd_md_get_buffer(mdev); |
2931 | if (!buffer) | 3096 | if (!buffer) |
2932 | goto out; | 3097 | return ERR_NOMEM; |
3098 | |||
3099 | /* First, figure out where our meta data superblock is located, | ||
3100 | * and read it. */ | ||
3101 | bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx; | ||
3102 | bdev->md.md_offset = drbd_md_ss(bdev); | ||
2933 | 3103 | ||
2934 | if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) { | 3104 | if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) { |
2935 | /* NOTE: can't do normal error processing here as this is | 3105 | /* NOTE: can't do normal error processing here as this is |
@@ -2948,45 +3118,51 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | |||
2948 | rv = ERR_MD_UNCLEAN; | 3118 | rv = ERR_MD_UNCLEAN; |
2949 | goto err; | 3119 | goto err; |
2950 | } | 3120 | } |
3121 | |||
3122 | rv = ERR_MD_INVALID; | ||
2951 | if (magic != DRBD_MD_MAGIC_08) { | 3123 | if (magic != DRBD_MD_MAGIC_08) { |
2952 | if (magic == DRBD_MD_MAGIC_07) | 3124 | if (magic == DRBD_MD_MAGIC_07) |
2953 | dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n"); | 3125 | dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n"); |
2954 | else | 3126 | else |
2955 | dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n"); | 3127 | dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n"); |
2956 | rv = ERR_MD_INVALID; | ||
2957 | goto err; | 3128 | goto err; |
2958 | } | 3129 | } |
2959 | if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) { | 3130 | |
2960 | dev_err(DEV, "unexpected al_offset: %d (expected %d)\n", | 3131 | if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) { |
2961 | be32_to_cpu(buffer->al_offset), bdev->md.al_offset); | 3132 | dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n", |
2962 | rv = ERR_MD_INVALID; | 3133 | be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE); |
2963 | goto err; | 3134 | goto err; |
2964 | } | 3135 | } |
3136 | |||
3137 | |||
3138 | /* convert to in_core endian */ | ||
3139 | bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect); | ||
3140 | for (i = UI_CURRENT; i < UI_SIZE; i++) | ||
3141 | bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); | ||
3142 | bdev->md.flags = be32_to_cpu(buffer->flags); | ||
3143 | bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); | ||
3144 | |||
3145 | bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect); | ||
3146 | bdev->md.al_offset = be32_to_cpu(buffer->al_offset); | ||
3147 | bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset); | ||
3148 | |||
3149 | if (check_activity_log_stripe_size(mdev, buffer, &bdev->md)) | ||
3150 | goto err; | ||
3151 | if (check_offsets_and_sizes(mdev, bdev)) | ||
3152 | goto err; | ||
3153 | |||
2965 | if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { | 3154 | if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { |
2966 | dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n", | 3155 | dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n", |
2967 | be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); | 3156 | be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); |
2968 | rv = ERR_MD_INVALID; | ||
2969 | goto err; | 3157 | goto err; |
2970 | } | 3158 | } |
2971 | if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { | 3159 | if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { |
2972 | dev_err(DEV, "unexpected md_size: %u (expected %u)\n", | 3160 | dev_err(DEV, "unexpected md_size: %u (expected %u)\n", |
2973 | be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); | 3161 | be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); |
2974 | rv = ERR_MD_INVALID; | ||
2975 | goto err; | 3162 | goto err; |
2976 | } | 3163 | } |
2977 | 3164 | ||
2978 | if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) { | 3165 | rv = NO_ERROR; |
2979 | dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n", | ||
2980 | be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE); | ||
2981 | rv = ERR_MD_INVALID; | ||
2982 | goto err; | ||
2983 | } | ||
2984 | |||
2985 | bdev->md.la_size_sect = be64_to_cpu(buffer->la_size); | ||
2986 | for (i = UI_CURRENT; i < UI_SIZE; i++) | ||
2987 | bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); | ||
2988 | bdev->md.flags = be32_to_cpu(buffer->flags); | ||
2989 | bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); | ||
2990 | 3166 | ||
2991 | spin_lock_irq(&mdev->tconn->req_lock); | 3167 | spin_lock_irq(&mdev->tconn->req_lock); |
2992 | if (mdev->state.conn < C_CONNECTED) { | 3168 | if (mdev->state.conn < C_CONNECTED) { |
@@ -2999,8 +3175,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | |||
2999 | 3175 | ||
3000 | err: | 3176 | err: |
3001 | drbd_md_put_buffer(mdev); | 3177 | drbd_md_put_buffer(mdev); |
3002 | out: | ||
3003 | put_ldev(mdev); | ||
3004 | 3178 | ||
3005 | return rv; | 3179 | return rv; |
3006 | } | 3180 | } |
@@ -3238,8 +3412,12 @@ static int w_go_diskless(struct drbd_work *w, int unused) | |||
3238 | * end up here after a failed attach, before ldev was even assigned. | 3412 | * end up here after a failed attach, before ldev was even assigned. |
3239 | */ | 3413 | */ |
3240 | if (mdev->bitmap && mdev->ldev) { | 3414 | if (mdev->bitmap && mdev->ldev) { |
3415 | /* An interrupted resync or similar is allowed to recounts bits | ||
3416 | * while we detach. | ||
3417 | * Any modifications would not be expected anymore, though. | ||
3418 | */ | ||
3241 | if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write, | 3419 | if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write, |
3242 | "detach", BM_LOCKED_MASK)) { | 3420 | "detach", BM_LOCKED_TEST_ALLOWED)) { |
3243 | if (test_bit(WAS_READ_ERROR, &mdev->flags)) { | 3421 | if (test_bit(WAS_READ_ERROR, &mdev->flags)) { |
3244 | drbd_md_set_flag(mdev, MDF_FULL_SYNC); | 3422 | drbd_md_set_flag(mdev, MDF_FULL_SYNC); |
3245 | drbd_md_sync(mdev); | 3423 | drbd_md_sync(mdev); |
@@ -3251,13 +3429,6 @@ static int w_go_diskless(struct drbd_work *w, int unused) | |||
3251 | return 0; | 3429 | return 0; |
3252 | } | 3430 | } |
3253 | 3431 | ||
3254 | void drbd_go_diskless(struct drbd_conf *mdev) | ||
3255 | { | ||
3256 | D_ASSERT(mdev->state.disk == D_FAILED); | ||
3257 | if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) | ||
3258 | drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless); | ||
3259 | } | ||
3260 | |||
3261 | /** | 3432 | /** |
3262 | * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap | 3433 | * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap |
3263 | * @mdev: DRBD device. | 3434 | * @mdev: DRBD device. |