aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2010-05-17 10:10:43 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2010-05-17 20:03:49 -0400
commit0c3f34516e8c5a1a0ba3585a7777d32bbbdf4ecb (patch)
treebb61a6235de8a6a87ebc4d4f39630d17e3c9a3de /drivers/block/drbd
parent9a25a04c8079725c1b1ab756694a8e0757844b40 (diff)
drbd: Create new current UUID as late as possible
The choice was to either delay creation of the new UUID until IO got thawed or to delay it until the first IO request. Both are correct, the later is more friendly to users of dual-primary setups, that actually only write on one side. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd')
-rw-r--r--drivers/block/drbd/drbd_int.h9
-rw-r--r--drivers/block/drbd/drbd_main.c25
-rw-r--r--drivers/block/drbd/drbd_receiver.c11
3 files changed, 39 insertions, 6 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c194348a46ed..e9654c8d5b62 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -943,7 +943,8 @@ struct drbd_conf {
943 struct drbd_work resync_work, 943 struct drbd_work resync_work,
944 unplug_work, 944 unplug_work,
945 md_sync_work, 945 md_sync_work,
946 delay_probe_work; 946 delay_probe_work,
947 uuid_work;
947 struct timer_list resync_timer; 948 struct timer_list resync_timer;
948 struct timer_list md_sync_timer; 949 struct timer_list md_sync_timer;
949 struct timer_list delay_probe_timer; 950 struct timer_list delay_probe_timer;
@@ -1068,6 +1069,7 @@ struct drbd_conf {
1068 struct timeval dps_time; /* delay-probes-start-time */ 1069 struct timeval dps_time; /* delay-probes-start-time */
1069 unsigned int dp_volume_last; /* send_cnt of last delay probe */ 1070 unsigned int dp_volume_last; /* send_cnt of last delay probe */
1070 int c_sync_rate; /* current resync rate after delay_probe magic */ 1071 int c_sync_rate; /* current resync rate after delay_probe magic */
1072 atomic_t new_c_uuid;
1071}; 1073};
1072 1074
1073static inline struct drbd_conf *minor_to_mdev(unsigned int minor) 1075static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -2217,6 +2219,8 @@ static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
2217 return 0; 2219 return 0;
2218 if (test_bit(BITMAP_IO, &mdev->flags)) 2220 if (test_bit(BITMAP_IO, &mdev->flags))
2219 return 0; 2221 return 0;
2222 if (atomic_read(&mdev->new_c_uuid))
2223 return 0;
2220 return 1; 2224 return 1;
2221} 2225}
2222 2226
@@ -2237,6 +2241,9 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
2237 * to avoid races with the reconnect code, 2241 * to avoid races with the reconnect code,
2238 * we need to atomic_inc within the spinlock. */ 2242 * we need to atomic_inc within the spinlock. */
2239 2243
2244 if (atomic_read(&mdev->new_c_uuid) && atomic_add_unless(&mdev->new_c_uuid, -1, 1))
2245 drbd_queue_work_front(&mdev->data.work, &mdev->uuid_work);
2246
2240 spin_lock_irq(&mdev->req_lock); 2247 spin_lock_irq(&mdev->req_lock);
2241 while (!__inc_ap_bio_cond(mdev)) { 2248 while (!__inc_ap_bio_cond(mdev)) {
2242 prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); 2249 prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index d0fabace1452..c144509011b8 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1217,17 +1217,16 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1217 mdev->p_uuid = NULL; 1217 mdev->p_uuid = NULL;
1218 if (get_ldev(mdev)) { 1218 if (get_ldev(mdev)) {
1219 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) && 1219 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1220 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { 1220 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE)
1221 drbd_uuid_new_current(mdev); 1221 atomic_set(&mdev->new_c_uuid, 2);
1222 drbd_send_uuids(mdev);
1223 }
1224 put_ldev(mdev); 1222 put_ldev(mdev);
1225 } 1223 }
1226 } 1224 }
1227 1225
1228 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) { 1226 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1227 /* Diskless peer becomes primary or got connected do diskless, primary peer. */
1229 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) 1228 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0)
1230 drbd_uuid_new_current(mdev); 1229 atomic_set(&mdev->new_c_uuid, 2);
1231 1230
1232 /* D_DISKLESS Peer becomes secondary */ 1231 /* D_DISKLESS Peer becomes secondary */
1233 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) 1232 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
@@ -1351,6 +1350,19 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1351 drbd_md_sync(mdev); 1350 drbd_md_sync(mdev);
1352} 1351}
1353 1352
1353static int w_new_current_uuid(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1354{
1355 if (get_ldev(mdev)) {
1356 drbd_uuid_new_current(mdev);
1357 drbd_send_uuids(mdev);
1358 drbd_md_sync(mdev);
1359 put_ldev(mdev);
1360 }
1361 atomic_dec(&mdev->new_c_uuid);
1362 wake_up(&mdev->misc_wait);
1363
1364 return 1;
1365}
1354 1366
1355static int drbd_thread_setup(void *arg) 1367static int drbd_thread_setup(void *arg)
1356{ 1368{
@@ -2691,6 +2703,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2691 atomic_set(&mdev->net_cnt, 0); 2703 atomic_set(&mdev->net_cnt, 0);
2692 atomic_set(&mdev->packet_seq, 0); 2704 atomic_set(&mdev->packet_seq, 0);
2693 atomic_set(&mdev->pp_in_use, 0); 2705 atomic_set(&mdev->pp_in_use, 0);
2706 atomic_set(&mdev->new_c_uuid, 0);
2694 2707
2695 mutex_init(&mdev->md_io_mutex); 2708 mutex_init(&mdev->md_io_mutex);
2696 mutex_init(&mdev->data.mutex); 2709 mutex_init(&mdev->data.mutex);
@@ -2721,12 +2734,14 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2721 INIT_LIST_HEAD(&mdev->bm_io_work.w.list); 2734 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2722 INIT_LIST_HEAD(&mdev->delay_probes); 2735 INIT_LIST_HEAD(&mdev->delay_probes);
2723 INIT_LIST_HEAD(&mdev->delay_probe_work.list); 2736 INIT_LIST_HEAD(&mdev->delay_probe_work.list);
2737 INIT_LIST_HEAD(&mdev->uuid_work.list);
2724 2738
2725 mdev->resync_work.cb = w_resync_inactive; 2739 mdev->resync_work.cb = w_resync_inactive;
2726 mdev->unplug_work.cb = w_send_write_hint; 2740 mdev->unplug_work.cb = w_send_write_hint;
2727 mdev->md_sync_work.cb = w_md_sync; 2741 mdev->md_sync_work.cb = w_md_sync;
2728 mdev->bm_io_work.w.cb = w_bitmap_io; 2742 mdev->bm_io_work.w.cb = w_bitmap_io;
2729 mdev->delay_probe_work.cb = w_delay_probes; 2743 mdev->delay_probe_work.cb = w_delay_probes;
2744 mdev->uuid_work.cb = w_new_current_uuid;
2730 init_timer(&mdev->resync_timer); 2745 init_timer(&mdev->resync_timer);
2731 init_timer(&mdev->md_sync_timer); 2746 init_timer(&mdev->md_sync_timer);
2732 init_timer(&mdev->delay_probe_timer); 2747 init_timer(&mdev->delay_probe_timer);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index a04ec01ab3ce..461d9872d4d3 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1150,6 +1150,17 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1150 unsigned n_bios = 0; 1150 unsigned n_bios = 0;
1151 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; 1151 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1152 1152
1153 if (atomic_read(&mdev->new_c_uuid)) {
1154 if (atomic_add_unless(&mdev->new_c_uuid, -1, 1)) {
1155 drbd_uuid_new_current(mdev);
1156 drbd_md_sync(mdev);
1157
1158 atomic_dec(&mdev->new_c_uuid);
1159 wake_up(&mdev->misc_wait);
1160 }
1161 wait_event(mdev->misc_wait, !atomic_read(&mdev->new_c_uuid));
1162 }
1163
1153 /* In most cases, we will only need one bio. But in case the lower 1164 /* In most cases, we will only need one bio. But in case the lower
1154 * level restrictions happen to be different at this offset on this 1165 * level restrictions happen to be different at this offset on this
1155 * side than those of the sending peer, we may need to submit the 1166 * side than those of the sending peer, we may need to submit the