aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_main.c116
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/drbd/drbd_req.c14
-rw-r--r--drivers/block/drbd/drbd_req.h8
5 files changed, 103 insertions, 40 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 11b7c6f84cd3..bef9138f1975 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1138,6 +1138,8 @@ extern void drbd_free_resources(struct drbd_conf *mdev);
1138extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, 1138extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
1139 unsigned int set_size); 1139 unsigned int set_size);
1140extern void tl_clear(struct drbd_conf *mdev); 1140extern void tl_clear(struct drbd_conf *mdev);
1141enum drbd_req_event;
1142extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
1141extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *); 1143extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
1142extern void drbd_free_sock(struct drbd_conf *mdev); 1144extern void drbd_free_sock(struct drbd_conf *mdev);
1143extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, 1145extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a86e6f1ff7f4..a8a0341fce53 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -333,59 +333,94 @@ bail:
333 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 333 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
334} 334}
335 335
336
337/** 336/**
338 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL 337 * _tl_restart() - Walks the transfer log, and applies an action to all requests
339 * @mdev: DRBD device. 338 * @mdev: DRBD device.
339 * @what: The action/event to perform with all request objects
340 * 340 *
341 * This is called after the connection to the peer was lost. The storage covered 341 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
342 * by the requests on the transfer gets marked as our of sync. Called from the 342 * restart_frozen_disk_io.
343 * receiver thread and the worker thread.
344 */ 343 */
345void tl_clear(struct drbd_conf *mdev) 344static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
346{ 345{
347 struct drbd_tl_epoch *b, *tmp; 346 struct drbd_tl_epoch *b, *tmp, **pn;
348 struct list_head *le, *tle; 347 struct list_head *le, *tle;
349 struct drbd_request *r; 348 struct drbd_request *req;
350 int new_initial_bnr = net_random(); 349 int rv, n_writes, n_reads;
351
352 spin_lock_irq(&mdev->req_lock);
353 350
354 b = mdev->oldest_tle; 351 b = mdev->oldest_tle;
352 pn = &mdev->oldest_tle;
355 while (b) { 353 while (b) {
354 n_writes = 0;
355 n_reads = 0;
356 list_for_each_safe(le, tle, &b->requests) { 356 list_for_each_safe(le, tle, &b->requests) {
357 r = list_entry(le, struct drbd_request, tl_requests); 357 req = list_entry(le, struct drbd_request, tl_requests);
358 /* It would be nice to complete outside of spinlock. 358 rv = _req_mod(req, what);
359 * But this is easier for now. */ 359
360 _req_mod(r, connection_lost_while_pending); 360 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
361 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
361 } 362 }
362 tmp = b->next; 363 tmp = b->next;
363 364
364 /* there could still be requests on that ring list, 365 if (n_writes + n_reads) {
365 * in case local io is still pending */ 366 if (what == resend) {
366 list_del(&b->requests); 367 b->n_writes = n_writes;
367 368 if (b->w.cb == NULL) {
368 /* dec_ap_pending corresponding to queue_barrier. 369 b->w.cb = w_send_barrier;
369 * the newest barrier may not have been queued yet, 370 inc_ap_pending(mdev);
370 * in which case w.cb is still NULL. */ 371 set_bit(CREATE_BARRIER, &mdev->flags);
371 if (b->w.cb != NULL) 372 }
372 dec_ap_pending(mdev); 373
373 374 drbd_queue_work(&mdev->data.work, &b->w);
374 if (b == mdev->newest_tle) { 375 }
375 /* recycle, but reinit! */ 376 pn = &b->next;
376 D_ASSERT(tmp == NULL); 377 } else {
377 INIT_LIST_HEAD(&b->requests); 378 /* there could still be requests on that ring list,
378 INIT_LIST_HEAD(&b->w.list); 379 * in case local io is still pending */
379 b->w.cb = NULL; 380 list_del(&b->requests);
380 b->br_number = new_initial_bnr; 381
381 b->n_writes = 0; 382 /* dec_ap_pending corresponding to queue_barrier.
382 383 * the newest barrier may not have been queued yet,
383 mdev->oldest_tle = b; 384 * in which case w.cb is still NULL. */
384 break; 385 if (b->w.cb != NULL)
386 dec_ap_pending(mdev);
387
388 if (b == mdev->newest_tle) {
389 /* recycle, but reinit! */
390 D_ASSERT(tmp == NULL);
391 INIT_LIST_HEAD(&b->requests);
392 INIT_LIST_HEAD(&b->w.list);
393 b->w.cb = NULL;
394 b->br_number = net_random();
395 b->n_writes = 0;
396
397 *pn = b;
398 break;
399 }
400 *pn = tmp;
401 kfree(b);
385 } 402 }
386 kfree(b);
387 b = tmp; 403 b = tmp;
388 } 404 }
405}
406
407
408/**
409 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
410 * @mdev: DRBD device.
411 *
412 * This is called after the connection to the peer was lost. The storage covered
413 * by the requests on the transfer gets marked as our of sync. Called from the
414 * receiver thread and the worker thread.
415 */
416void tl_clear(struct drbd_conf *mdev)
417{
418 struct list_head *le, *tle;
419 struct drbd_request *r;
420
421 spin_lock_irq(&mdev->req_lock);
422
423 _tl_restart(mdev, connection_lost_while_pending);
389 424
390 /* we expect this list to be empty. */ 425 /* we expect this list to be empty. */
391 D_ASSERT(list_empty(&mdev->out_of_sequence_requests)); 426 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
@@ -406,6 +441,13 @@ void tl_clear(struct drbd_conf *mdev)
406 spin_unlock_irq(&mdev->req_lock); 441 spin_unlock_irq(&mdev->req_lock);
407} 442}
408 443
444void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
445{
446 spin_lock_irq(&mdev->req_lock);
447 _tl_restart(mdev, what);
448 spin_unlock_irq(&mdev->req_lock);
449}
450
409/** 451/**
410 * cl_wide_st_chg() - TRUE if the state change is a cluster wide one 452 * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
411 * @mdev: DRBD device. 453 * @mdev: DRBD device.
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 88a5e1f4ec1d..8daa920c40a4 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -776,9 +776,6 @@ static int drbd_connect(struct drbd_conf *mdev)
776 776
777 D_ASSERT(!mdev->data.socket); 777 D_ASSERT(!mdev->data.socket);
778 778
779 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags))
780 dev_err(DEV, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n");
781
782 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS) 779 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
783 return -2; 780 return -2;
784 781
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index d9df1a1c40b9..39c2cc3614e4 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -634,6 +634,20 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
634 /* else: done by handed_over_to_network */ 634 /* else: done by handed_over_to_network */
635 break; 635 break;
636 636
637 case resend:
638 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
639 before the connection loss; only P_BARRIER_ACK was missing.
640 Trowing them out of the TL here by pretending we got a BARRIER_ACK
641 TODO: Either resync them, or ensure peer was not rebooted. */
642 if (!(req->rq_state & RQ_NET_OK)) {
643 if (req->w.cb) {
644 drbd_queue_work(&mdev->data.work, &req->w);
645 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
646 }
647 break;
648 }
649 /* else, fall through to barrier_acked */
650
637 case barrier_acked: 651 case barrier_acked:
638 if (!(req->rq_state & RQ_WRITE)) 652 if (!(req->rq_state & RQ_WRITE))
639 break; 653 break;
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index db37c6e47fa9..1bcb85539735 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -104,6 +104,7 @@ enum drbd_req_event {
104 read_ahead_completed_with_error, 104 read_ahead_completed_with_error,
105 write_completed_with_error, 105 write_completed_with_error,
106 completed_ok, 106 completed_ok,
107 resend,
107 nothing, /* for tracing only */ 108 nothing, /* for tracing only */
108}; 109};
109 110
@@ -206,6 +207,13 @@ enum drbd_req_state_bits {
206 207
207#define RQ_WRITE (1UL << __RQ_WRITE) 208#define RQ_WRITE (1UL << __RQ_WRITE)
208 209
210/* For waking up the frozen transfer log mod_req() has to return if the request
211 should be counted in the epoch object*/
212#define MR_WRITE_SHIFT 0
213#define MR_WRITE (1 << MR_WRITE_SHIFT)
214#define MR_READ_SHIFT 1
215#define MR_READ (1 << MR_READ_SHIFT)
216
209/* epoch entries */ 217/* epoch entries */
210static inline 218static inline
211struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector) 219struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)