aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2011-11-09 13:18:00 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2012-11-08 10:58:08 -0500
commit12038a3a71ce6fabbcc2956cc8697fcbf729be57 (patch)
tree4852bf1216081f37e1cf0708660dcde92a458ec3 /drivers/block/drbd
parent1d2783d532207531ba8e3bfb016a4512dec97666 (diff)
drbd: Move list of epochs from mdev to tconn
This is necessary since the transfer_log on the sending is also per tconn. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd')
-rw-r--r--drivers/block/drbd/drbd_int.h6
-rw-r--r--drivers/block/drbd/drbd_main.c25
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c52
4 files changed, 42 insertions, 43 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 0db20cbb4234..94ec6bfb62a7 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -859,6 +859,9 @@ struct drbd_tconn { /* is a resource from the config file */
859 void *int_dig_in; 859 void *int_dig_in;
860 void *int_dig_vv; 860 void *int_dig_vv;
861 861
862 struct drbd_epoch *current_epoch;
863 spinlock_t epoch_lock;
864 unsigned int epochs;
862 enum write_ordering_e write_ordering; 865 enum write_ordering_e write_ordering;
863 866
864 struct drbd_thread receiver; 867 struct drbd_thread receiver;
@@ -962,9 +965,6 @@ struct drbd_conf {
962 965
963 int open_cnt; 966 int open_cnt;
964 u64 *p_uuid; 967 u64 *p_uuid;
965 struct drbd_epoch *current_epoch;
966 spinlock_t epoch_lock;
967 unsigned int epochs;
968 968
969 struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */ 969 struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
970 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ 970 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 7b1cad895d16..8b99f4e28ccc 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2082,7 +2082,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2082 2082
2083 spin_lock_init(&mdev->al_lock); 2083 spin_lock_init(&mdev->al_lock);
2084 spin_lock_init(&mdev->peer_seq_lock); 2084 spin_lock_init(&mdev->peer_seq_lock);
2085 spin_lock_init(&mdev->epoch_lock);
2086 2085
2087 INIT_LIST_HEAD(&mdev->active_ee); 2086 INIT_LIST_HEAD(&mdev->active_ee);
2088 INIT_LIST_HEAD(&mdev->sync_ee); 2087 INIT_LIST_HEAD(&mdev->sync_ee);
@@ -2142,9 +2141,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
2142 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", 2141 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2143 mdev->tconn->receiver.t_state); 2142 mdev->tconn->receiver.t_state);
2144 2143
2145 /* no need to lock it, I'm the only thread alive */
2146 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
2147 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2148 mdev->al_writ_cnt = 2144 mdev->al_writ_cnt =
2149 mdev->bm_writ_cnt = 2145 mdev->bm_writ_cnt =
2150 mdev->read_cnt = 2146 mdev->read_cnt =
@@ -2377,7 +2373,6 @@ void drbd_minor_destroy(struct kref *kref)
2377 kfree(mdev->p_uuid); 2373 kfree(mdev->p_uuid);
2378 /* mdev->p_uuid = NULL; */ 2374 /* mdev->p_uuid = NULL; */
2379 2375
2380 kfree(mdev->current_epoch);
2381 if (mdev->bitmap) /* should no longer be there. */ 2376 if (mdev->bitmap) /* should no longer be there. */
2382 drbd_bm_cleanup(mdev); 2377 drbd_bm_cleanup(mdev);
2383 __free_page(mdev->md_io_page); 2378 __free_page(mdev->md_io_page);
@@ -2624,6 +2619,12 @@ struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
2624 if (!tl_init(tconn)) 2619 if (!tl_init(tconn))
2625 goto fail; 2620 goto fail;
2626 2621
2622 tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2623 if (!tconn->current_epoch)
2624 goto fail;
2625 INIT_LIST_HEAD(&tconn->current_epoch->list);
2626 tconn->epochs = 1;
2627 spin_lock_init(&tconn->epoch_lock);
2627 tconn->write_ordering = WO_bdev_flush; 2628 tconn->write_ordering = WO_bdev_flush;
2628 2629
2629 tconn->cstate = C_STANDALONE; 2630 tconn->cstate = C_STANDALONE;
@@ -2649,6 +2650,7 @@ struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
2649 return tconn; 2650 return tconn;
2650 2651
2651fail: 2652fail:
2653 kfree(tconn->current_epoch);
2652 tl_cleanup(tconn); 2654 tl_cleanup(tconn);
2653 free_cpumask_var(tconn->cpu_mask); 2655 free_cpumask_var(tconn->cpu_mask);
2654 drbd_free_socket(&tconn->meta); 2656 drbd_free_socket(&tconn->meta);
@@ -2663,6 +2665,10 @@ void conn_destroy(struct kref *kref)
2663{ 2665{
2664 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref); 2666 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
2665 2667
2668 if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
2669 conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
2670 kfree(tconn->current_epoch);
2671
2666 idr_destroy(&tconn->volumes); 2672 idr_destroy(&tconn->volumes);
2667 2673
2668 free_cpumask_var(tconn->cpu_mask); 2674 free_cpumask_var(tconn->cpu_mask);
@@ -2744,13 +2750,6 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
2744 mdev->read_requests = RB_ROOT; 2750 mdev->read_requests = RB_ROOT;
2745 mdev->write_requests = RB_ROOT; 2751 mdev->write_requests = RB_ROOT;
2746 2752
2747 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2748 if (!mdev->current_epoch)
2749 goto out_no_epoch;
2750
2751 INIT_LIST_HEAD(&mdev->current_epoch->list);
2752 mdev->epochs = 1;
2753
2754 if (!idr_pre_get(&minors, GFP_KERNEL)) 2753 if (!idr_pre_get(&minors, GFP_KERNEL))
2755 goto out_no_minor_idr; 2754 goto out_no_minor_idr;
2756 if (idr_get_new_above(&minors, mdev, minor, &minor_got)) 2755 if (idr_get_new_above(&minors, mdev, minor, &minor_got))
@@ -2786,8 +2785,6 @@ out_idr_remove_minor:
2786 idr_remove(&minors, minor_got); 2785 idr_remove(&minors, minor_got);
2787 synchronize_rcu(); 2786 synchronize_rcu();
2788out_no_minor_idr: 2787out_no_minor_idr:
2789 kfree(mdev->current_epoch);
2790out_no_epoch:
2791 drbd_bm_cleanup(mdev); 2788 drbd_bm_cleanup(mdev);
2792out_no_bitmap: 2789out_no_bitmap:
2793 __free_page(mdev->md_io_page); 2790 __free_page(mdev->md_io_page);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 1321192b377d..64e0ddbf0668 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -271,7 +271,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
271 atomic_read(&mdev->rs_pending_cnt), 271 atomic_read(&mdev->rs_pending_cnt),
272 atomic_read(&mdev->unacked_cnt), 272 atomic_read(&mdev->unacked_cnt),
273 atomic_read(&mdev->ap_bio_cnt), 273 atomic_read(&mdev->ap_bio_cnt),
274 mdev->epochs, 274 mdev->tconn->epochs,
275 write_ordering_chars[mdev->tconn->write_ordering] 275 write_ordering_chars[mdev->tconn->write_ordering]
276 ); 276 );
277 seq_printf(seq, " oos:%llu\n", 277 seq_printf(seq, " oos:%llu\n",
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index cc5e0b6a88e9..0e8e256579eb 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1128,8 +1128,9 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1128 int epoch_size; 1128 int epoch_size;
1129 struct drbd_epoch *next_epoch; 1129 struct drbd_epoch *next_epoch;
1130 enum finish_epoch rv = FE_STILL_LIVE; 1130 enum finish_epoch rv = FE_STILL_LIVE;
1131 struct drbd_tconn *tconn = mdev->tconn;
1131 1132
1132 spin_lock(&mdev->epoch_lock); 1133 spin_lock(&tconn->epoch_lock);
1133 do { 1134 do {
1134 next_epoch = NULL; 1135 next_epoch = NULL;
1135 1136
@@ -1151,18 +1152,18 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1151 atomic_read(&epoch->active) == 0 && 1152 atomic_read(&epoch->active) == 0 &&
1152 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) { 1153 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1153 if (!(ev & EV_CLEANUP)) { 1154 if (!(ev & EV_CLEANUP)) {
1154 spin_unlock(&mdev->epoch_lock); 1155 spin_unlock(&tconn->epoch_lock);
1155 drbd_send_b_ack(epoch->mdev, epoch->barrier_nr, epoch_size); 1156 drbd_send_b_ack(epoch->mdev, epoch->barrier_nr, epoch_size);
1156 spin_lock(&mdev->epoch_lock); 1157 spin_lock(&tconn->epoch_lock);
1157 } 1158 }
1158 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) 1159 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1159 dec_unacked(epoch->mdev); 1160 dec_unacked(epoch->mdev);
1160 1161
1161 if (mdev->current_epoch != epoch) { 1162 if (tconn->current_epoch != epoch) {
1162 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); 1163 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1163 list_del(&epoch->list); 1164 list_del(&epoch->list);
1164 ev = EV_BECAME_LAST | (ev & EV_CLEANUP); 1165 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1165 mdev->epochs--; 1166 tconn->epochs--;
1166 kfree(epoch); 1167 kfree(epoch);
1167 1168
1168 if (rv == FE_STILL_LIVE) 1169 if (rv == FE_STILL_LIVE)
@@ -1183,7 +1184,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1183 epoch = next_epoch; 1184 epoch = next_epoch;
1184 } while (1); 1185 } while (1);
1185 1186
1186 spin_unlock(&mdev->epoch_lock); 1187 spin_unlock(&tconn->epoch_lock);
1187 1188
1188 return rv; 1189 return rv;
1189} 1190}
@@ -1348,9 +1349,9 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1348 1349
1349 inc_unacked(mdev); 1350 inc_unacked(mdev);
1350 1351
1351 mdev->current_epoch->barrier_nr = p->barrier; 1352 tconn->current_epoch->barrier_nr = p->barrier;
1352 mdev->current_epoch->mdev = mdev; 1353 tconn->current_epoch->mdev = mdev;
1353 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); 1354 rv = drbd_may_finish_epoch(mdev, tconn->current_epoch, EV_GOT_BARRIER_NR);
1354 1355
1355 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from 1356 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1356 * the activity log, which means it would not be resynced in case the 1357 * the activity log, which means it would not be resynced in case the
@@ -1376,13 +1377,13 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1376 drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1377 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1377 drbd_flush(tconn); 1378 drbd_flush(tconn);
1378 1379
1379 if (atomic_read(&mdev->current_epoch->epoch_size)) { 1380 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1380 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 1381 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1381 if (epoch) 1382 if (epoch)
1382 break; 1383 break;
1383 } 1384 }
1384 1385
1385 epoch = mdev->current_epoch; 1386 epoch = tconn->current_epoch;
1386 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0); 1387 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1387 1388
1388 D_ASSERT(atomic_read(&epoch->active) == 0); 1389 D_ASSERT(atomic_read(&epoch->active) == 0);
@@ -1398,16 +1399,16 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1398 atomic_set(&epoch->epoch_size, 0); 1399 atomic_set(&epoch->epoch_size, 0);
1399 atomic_set(&epoch->active, 0); 1400 atomic_set(&epoch->active, 0);
1400 1401
1401 spin_lock(&mdev->epoch_lock); 1402 spin_lock(&tconn->epoch_lock);
1402 if (atomic_read(&mdev->current_epoch->epoch_size)) { 1403 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1403 list_add(&epoch->list, &mdev->current_epoch->list); 1404 list_add(&epoch->list, &tconn->current_epoch->list);
1404 mdev->current_epoch = epoch; 1405 tconn->current_epoch = epoch;
1405 mdev->epochs++; 1406 tconn->epochs++;
1406 } else { 1407 } else {
1407 /* The current_epoch got recycled while we allocated this one... */ 1408 /* The current_epoch got recycled while we allocated this one... */
1408 kfree(epoch); 1409 kfree(epoch);
1409 } 1410 }
1410 spin_unlock(&mdev->epoch_lock); 1411 spin_unlock(&tconn->epoch_lock);
1411 1412
1412 return 0; 1413 return 0;
1413} 1414}
@@ -2103,7 +2104,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2103 2104
2104 err = wait_for_and_update_peer_seq(mdev, peer_seq); 2105 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2105 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size); 2106 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
2106 atomic_inc(&mdev->current_epoch->epoch_size); 2107 atomic_inc(&tconn->current_epoch->epoch_size);
2107 err2 = drbd_drain_block(mdev, pi->size); 2108 err2 = drbd_drain_block(mdev, pi->size);
2108 if (!err) 2109 if (!err)
2109 err = err2; 2110 err = err2;
@@ -2131,11 +2132,11 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2131 if (dp_flags & DP_MAY_SET_IN_SYNC) 2132 if (dp_flags & DP_MAY_SET_IN_SYNC)
2132 peer_req->flags |= EE_MAY_SET_IN_SYNC; 2133 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2133 2134
2134 spin_lock(&mdev->epoch_lock); 2135 spin_lock(&tconn->epoch_lock);
2135 peer_req->epoch = mdev->current_epoch; 2136 peer_req->epoch = tconn->current_epoch;
2136 atomic_inc(&peer_req->epoch->epoch_size); 2137 atomic_inc(&peer_req->epoch->epoch_size);
2137 atomic_inc(&peer_req->epoch->active); 2138 atomic_inc(&peer_req->epoch->active);
2138 spin_unlock(&mdev->epoch_lock); 2139 spin_unlock(&tconn->epoch_lock);
2139 2140
2140 rcu_read_lock(); 2141 rcu_read_lock();
2141 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries; 2142 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
@@ -4359,6 +4360,11 @@ static void conn_disconnect(struct drbd_tconn *tconn)
4359 } 4360 }
4360 rcu_read_unlock(); 4361 rcu_read_unlock();
4361 4362
4363 if (!list_empty(&tconn->current_epoch->list))
4364 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4365 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4366 atomic_set(&tconn->current_epoch->epoch_size, 0);
4367
4362 conn_info(tconn, "Connection closed\n"); 4368 conn_info(tconn, "Connection closed\n");
4363 4369
4364 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN) 4370 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
@@ -4446,10 +4452,6 @@ static int drbd_disconnected(struct drbd_conf *mdev)
4446 D_ASSERT(list_empty(&mdev->sync_ee)); 4452 D_ASSERT(list_empty(&mdev->sync_ee));
4447 D_ASSERT(list_empty(&mdev->done_ee)); 4453 D_ASSERT(list_empty(&mdev->done_ee));
4448 4454
4449 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4450 atomic_set(&mdev->current_epoch->epoch_size, 0);
4451 D_ASSERT(list_empty(&mdev->current_epoch->list));
4452
4453 return 0; 4455 return 0;
4454} 4456}
4455 4457