aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2011-11-14 09:42:37 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2012-11-08 10:58:34 -0500
commitd5b27b01f17ef1f0badc45f9eea521be3457c9cb (patch)
treee0760531801c0b5b51ea8b3f05f9c0c5d85ff60e /drivers/block/drbd
parent8c0785a5c9a0f2472aff68dc32247be01728c416 (diff)
drbd: move the drbd_work_queue from drbd_socket to drbd_connection
cherry-picked and adapted from drbd 9 devel branch In 8.4, we don't distinguish between "resource work" and "connection work" yet, we have one worker for both, as we still have only one connection. We only ever used the "data.work", no need to keep the "meta.work" around. Move tconn->data.work to tconn->sender_work. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd')
-rw-r--r--drivers/block/drbd/drbd_actlog.c4
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_main.c16
-rw-r--r--drivers/block/drbd/drbd_receiver.c4
-rw-r--r--drivers/block/drbd/drbd_req.c12
-rw-r--r--drivers/block/drbd/drbd_state.c4
-rw-r--r--drivers/block/drbd/drbd_worker.c16
7 files changed, 28 insertions, 32 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 83d48d210b69..f500dc5cdf52 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -485,7 +485,7 @@ static int al_write_transaction(struct drbd_conf *mdev)
485 init_completion(&al_work.event); 485 init_completion(&al_work.event);
486 al_work.w.cb = w_al_write_transaction; 486 al_work.w.cb = w_al_write_transaction;
487 al_work.w.mdev = mdev; 487 al_work.w.mdev = mdev;
488 drbd_queue_work_front(&mdev->tconn->data.work, &al_work.w); 488 drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
489 wait_for_completion(&al_work.event); 489 wait_for_completion(&al_work.event);
490 490
491 return al_work.err; 491 return al_work.err;
@@ -645,7 +645,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
645 udw->enr = ext->lce.lc_number; 645 udw->enr = ext->lce.lc_number;
646 udw->w.cb = w_update_odbm; 646 udw->w.cb = w_update_odbm;
647 udw->w.mdev = mdev; 647 udw->w.mdev = mdev;
648 drbd_queue_work_front(&mdev->tconn->data.work, &udw->w); 648 drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
649 } else { 649 } else {
650 dev_warn(DEV, "Could not kmalloc an udw\n"); 650 dev_warn(DEV, "Could not kmalloc an udw\n");
651 } 651 }
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e84c7b6a6bac..c0d0de54ae57 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -740,7 +740,6 @@ struct drbd_work_queue {
740}; 740};
741 741
742struct drbd_socket { 742struct drbd_socket {
743 struct drbd_work_queue work;
744 struct mutex mutex; 743 struct mutex mutex;
745 struct socket *socket; 744 struct socket *socket;
746 /* this way we get our 745 /* this way we get our
@@ -871,6 +870,7 @@ struct drbd_tconn { /* is a resource from the config file */
871 struct drbd_thread worker; 870 struct drbd_thread worker;
872 struct drbd_thread asender; 871 struct drbd_thread asender;
873 cpumask_var_t cpu_mask; 872 cpumask_var_t cpu_mask;
873 struct drbd_work_queue sender_work;
874}; 874};
875 875
876struct drbd_conf { 876struct drbd_conf {
@@ -2228,7 +2228,7 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
2228 wake_up(&mdev->misc_wait); 2228 wake_up(&mdev->misc_wait);
2229 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { 2229 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
2230 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) 2230 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
2231 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w); 2231 drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
2232 } 2232 }
2233} 2233}
2234 2234
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index f379d33b10a4..7e37149684e4 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -379,7 +379,7 @@ void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
379 set_bit(CREATE_BARRIER, &tconn->flags); 379 set_bit(CREATE_BARRIER, &tconn->flags);
380 } 380 }
381 381
382 drbd_queue_work(&tconn->data.work, &b->w); 382 drbd_queue_work(&tconn->sender_work, &b->w);
383 } 383 }
384 pn = &b->next; 384 pn = &b->next;
385 } else { 385 } else {
@@ -2173,8 +2173,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
2173 D_ASSERT(list_empty(&mdev->read_ee)); 2173 D_ASSERT(list_empty(&mdev->read_ee));
2174 D_ASSERT(list_empty(&mdev->net_ee)); 2174 D_ASSERT(list_empty(&mdev->net_ee));
2175 D_ASSERT(list_empty(&mdev->resync_reads)); 2175 D_ASSERT(list_empty(&mdev->resync_reads));
2176 D_ASSERT(list_empty(&mdev->tconn->data.work.q)); 2176 D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
2177 D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
2178 D_ASSERT(list_empty(&mdev->resync_work.list)); 2177 D_ASSERT(list_empty(&mdev->resync_work.list));
2179 D_ASSERT(list_empty(&mdev->unplug_work.list)); 2178 D_ASSERT(list_empty(&mdev->unplug_work.list));
2180 D_ASSERT(list_empty(&mdev->go_diskless.list)); 2179 D_ASSERT(list_empty(&mdev->go_diskless.list));
@@ -2349,7 +2348,6 @@ void drbd_minor_destroy(struct kref *kref)
2349 2348
2350 /* paranoia asserts */ 2349 /* paranoia asserts */
2351 D_ASSERT(mdev->open_cnt == 0); 2350 D_ASSERT(mdev->open_cnt == 0);
2352 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
2353 /* end paranoia asserts */ 2351 /* end paranoia asserts */
2354 2352
2355 /* cleanup stuff that may have been allocated during 2353 /* cleanup stuff that may have been allocated during
@@ -2700,10 +2698,8 @@ struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
2700 init_waitqueue_head(&tconn->ping_wait); 2698 init_waitqueue_head(&tconn->ping_wait);
2701 idr_init(&tconn->volumes); 2699 idr_init(&tconn->volumes);
2702 2700
2703 drbd_init_workqueue(&tconn->data.work); 2701 drbd_init_workqueue(&tconn->sender_work);
2704 mutex_init(&tconn->data.mutex); 2702 mutex_init(&tconn->data.mutex);
2705
2706 drbd_init_workqueue(&tconn->meta.work);
2707 mutex_init(&tconn->meta.mutex); 2703 mutex_init(&tconn->meta.mutex);
2708 2704
2709 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver"); 2705 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
@@ -3356,7 +3352,7 @@ void drbd_go_diskless(struct drbd_conf *mdev)
3356{ 3352{
3357 D_ASSERT(mdev->state.disk == D_FAILED); 3353 D_ASSERT(mdev->state.disk == D_FAILED);
3358 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) 3354 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3359 drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless); 3355 drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
3360} 3356}
3361 3357
3362/** 3358/**
@@ -3394,7 +3390,7 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3394 set_bit(BITMAP_IO, &mdev->flags); 3390 set_bit(BITMAP_IO, &mdev->flags);
3395 if (atomic_read(&mdev->ap_bio_cnt) == 0) { 3391 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
3396 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) 3392 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
3397 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w); 3393 drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
3398 } 3394 }
3399 spin_unlock_irq(&mdev->tconn->req_lock); 3395 spin_unlock_irq(&mdev->tconn->req_lock);
3400} 3396}
@@ -3452,7 +3448,7 @@ static void md_sync_timer_fn(unsigned long data)
3452{ 3448{
3453 struct drbd_conf *mdev = (struct drbd_conf *) data; 3449 struct drbd_conf *mdev = (struct drbd_conf *) data;
3454 3450
3455 drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work); 3451 drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
3456} 3452}
3457 3453
3458static int w_md_sync(struct drbd_work *w, int unused) 3454static int w_md_sync(struct drbd_work *w, int unused)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 9aac1c4033c7..34fc33b5eb45 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -4413,7 +4413,7 @@ void conn_flush_workqueue(struct drbd_tconn *tconn)
4413 barr.w.cb = w_prev_work_done; 4413 barr.w.cb = w_prev_work_done;
4414 barr.w.tconn = tconn; 4414 barr.w.tconn = tconn;
4415 init_completion(&barr.done); 4415 init_completion(&barr.done);
4416 drbd_queue_work(&tconn->data.work, &barr.w); 4416 drbd_queue_work(&tconn->sender_work, &barr.w);
4417 wait_for_completion(&barr.done); 4417 wait_for_completion(&barr.done);
4418} 4418}
4419 4419
@@ -5147,7 +5147,7 @@ static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
5147 if (w) { 5147 if (w) {
5148 w->cb = w_ov_finished; 5148 w->cb = w_ov_finished;
5149 w->mdev = mdev; 5149 w->mdev = mdev;
5150 drbd_queue_work_front(&mdev->tconn->data.work, w); 5150 drbd_queue_work(&mdev->tconn->sender_work, w);
5151 } else { 5151 } else {
5152 dev_err(DEV, "kmalloc(w) failed."); 5152 dev_err(DEV, "kmalloc(w) failed.");
5153 ov_out_of_sync_print(mdev); 5153 ov_out_of_sync_print(mdev);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index a131174b6677..e609557a9425 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -170,7 +170,7 @@ static void queue_barrier(struct drbd_conf *mdev)
170 * dec_ap_pending will be done in got_BarrierAck 170 * dec_ap_pending will be done in got_BarrierAck
171 * or (on connection loss) in tl_clear. */ 171 * or (on connection loss) in tl_clear. */
172 inc_ap_pending(mdev); 172 inc_ap_pending(mdev);
173 drbd_queue_work(&tconn->data.work, &b->w); 173 drbd_queue_work(&tconn->sender_work, &b->w);
174 set_bit(CREATE_BARRIER, &tconn->flags); 174 set_bit(CREATE_BARRIER, &tconn->flags);
175} 175}
176 176
@@ -483,7 +483,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
483 D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); 483 D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
484 req->rq_state |= RQ_NET_QUEUED; 484 req->rq_state |= RQ_NET_QUEUED;
485 req->w.cb = w_send_read_req; 485 req->w.cb = w_send_read_req;
486 drbd_queue_work(&mdev->tconn->data.work, &req->w); 486 drbd_queue_work(&mdev->tconn->sender_work, &req->w);
487 break; 487 break;
488 488
489 case QUEUE_FOR_NET_WRITE: 489 case QUEUE_FOR_NET_WRITE:
@@ -527,7 +527,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
527 D_ASSERT(req->rq_state & RQ_NET_PENDING); 527 D_ASSERT(req->rq_state & RQ_NET_PENDING);
528 req->rq_state |= RQ_NET_QUEUED; 528 req->rq_state |= RQ_NET_QUEUED;
529 req->w.cb = w_send_dblock; 529 req->w.cb = w_send_dblock;
530 drbd_queue_work(&mdev->tconn->data.work, &req->w); 530 drbd_queue_work(&mdev->tconn->sender_work, &req->w);
531 531
532 /* close the epoch, in case it outgrew the limit */ 532 /* close the epoch, in case it outgrew the limit */
533 rcu_read_lock(); 533 rcu_read_lock();
@@ -542,7 +542,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
542 case QUEUE_FOR_SEND_OOS: 542 case QUEUE_FOR_SEND_OOS:
543 req->rq_state |= RQ_NET_QUEUED; 543 req->rq_state |= RQ_NET_QUEUED;
544 req->w.cb = w_send_out_of_sync; 544 req->w.cb = w_send_out_of_sync;
545 drbd_queue_work(&mdev->tconn->data.work, &req->w); 545 drbd_queue_work(&mdev->tconn->sender_work, &req->w);
546 break; 546 break;
547 547
548 case READ_RETRY_REMOTE_CANCELED: 548 case READ_RETRY_REMOTE_CANCELED:
@@ -682,7 +682,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
682 682
683 get_ldev(mdev); 683 get_ldev(mdev);
684 req->w.cb = w_restart_disk_io; 684 req->w.cb = w_restart_disk_io;
685 drbd_queue_work(&mdev->tconn->data.work, &req->w); 685 drbd_queue_work(&mdev->tconn->sender_work, &req->w);
686 break; 686 break;
687 687
688 case RESEND: 688 case RESEND:
@@ -692,7 +692,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
692 During connection handshake, we ensure that the peer was not rebooted. */ 692 During connection handshake, we ensure that the peer was not rebooted. */
693 if (!(req->rq_state & RQ_NET_OK)) { 693 if (!(req->rq_state & RQ_NET_OK)) {
694 if (req->w.cb) { 694 if (req->w.cb) {
695 drbd_queue_work(&mdev->tconn->data.work, &req->w); 695 drbd_queue_work(&mdev->tconn->sender_work, &req->w);
696 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; 696 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
697 } 697 }
698 break; 698 break;
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index dd618b5346f2..84a5072d7370 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1090,7 +1090,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1090 ascw->w.cb = w_after_state_ch; 1090 ascw->w.cb = w_after_state_ch;
1091 ascw->w.mdev = mdev; 1091 ascw->w.mdev = mdev;
1092 ascw->done = done; 1092 ascw->done = done;
1093 drbd_queue_work(&mdev->tconn->data.work, &ascw->w); 1093 drbd_queue_work(&mdev->tconn->sender_work, &ascw->w);
1094 } else { 1094 } else {
1095 dev_err(DEV, "Could not kmalloc an ascw\n"); 1095 dev_err(DEV, "Could not kmalloc an ascw\n");
1096 } 1096 }
@@ -1764,7 +1764,7 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
1764 acscw->w.cb = w_after_conn_state_ch; 1764 acscw->w.cb = w_after_conn_state_ch;
1765 kref_get(&tconn->kref); 1765 kref_get(&tconn->kref);
1766 acscw->w.tconn = tconn; 1766 acscw->w.tconn = tconn;
1767 drbd_queue_work(&tconn->data.work, &acscw->w); 1767 drbd_queue_work(&tconn->sender_work, &acscw->w);
1768 } else { 1768 } else {
1769 conn_err(tconn, "Could not kmalloc an acscw\n"); 1769 conn_err(tconn, "Could not kmalloc an acscw\n");
1770 } 1770 }
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index fb2e6c8d45c9..39ece3a2f53a 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -109,7 +109,7 @@ void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(lo
109 __drbd_chk_io_error(mdev, false); 109 __drbd_chk_io_error(mdev, false);
110 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); 110 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
111 111
112 drbd_queue_work(&mdev->tconn->data.work, &peer_req->w); 112 drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w);
113 put_ldev(mdev); 113 put_ldev(mdev);
114} 114}
115 115
@@ -401,7 +401,7 @@ void resync_timer_fn(unsigned long data)
401 struct drbd_conf *mdev = (struct drbd_conf *) data; 401 struct drbd_conf *mdev = (struct drbd_conf *) data;
402 402
403 if (list_empty(&mdev->resync_work.list)) 403 if (list_empty(&mdev->resync_work.list))
404 drbd_queue_work(&mdev->tconn->data.work, &mdev->resync_work); 404 drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
405} 405}
406 406
407static void fifo_set(struct fifo_buffer *fb, int value) 407static void fifo_set(struct fifo_buffer *fb, int value)
@@ -783,7 +783,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
783 if (w) { 783 if (w) {
784 w->cb = w_resync_finished; 784 w->cb = w_resync_finished;
785 w->mdev = mdev; 785 w->mdev = mdev;
786 drbd_queue_work(&mdev->tconn->data.work, w); 786 drbd_queue_work(&mdev->tconn->sender_work, w);
787 return 1; 787 return 1;
788 } 788 }
789 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); 789 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
@@ -1484,7 +1484,7 @@ void start_resync_timer_fn(unsigned long data)
1484{ 1484{
1485 struct drbd_conf *mdev = (struct drbd_conf *) data; 1485 struct drbd_conf *mdev = (struct drbd_conf *) data;
1486 1486
1487 drbd_queue_work(&mdev->tconn->data.work, &mdev->start_resync_work); 1487 drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
1488} 1488}
1489 1489
1490int w_start_resync(struct drbd_work *w, int cancel) 1490int w_start_resync(struct drbd_work *w, int cancel)
@@ -1706,7 +1706,7 @@ int drbd_worker(struct drbd_thread *thi)
1706 /* as long as we use drbd_queue_work_front(), 1706 /* as long as we use drbd_queue_work_front(),
1707 * we may only dequeue single work items here, not batches. */ 1707 * we may only dequeue single work items here, not batches. */
1708 if (list_empty(&work_list)) 1708 if (list_empty(&work_list))
1709 dequeue_work_item(&tconn->data.work, &work_list); 1709 dequeue_work_item(&tconn->sender_work, &work_list);
1710 1710
1711 /* Still nothing to do? Poke TCP, just in case, 1711 /* Still nothing to do? Poke TCP, just in case,
1712 * then wait for new work (or signal). */ 1712 * then wait for new work (or signal). */
@@ -1721,8 +1721,8 @@ int drbd_worker(struct drbd_thread *thi)
1721 drbd_tcp_uncork(tconn->data.socket); 1721 drbd_tcp_uncork(tconn->data.socket);
1722 mutex_unlock(&tconn->data.mutex); 1722 mutex_unlock(&tconn->data.mutex);
1723 1723
1724 wait_event_interruptible(tconn->data.work.q_wait, 1724 wait_event_interruptible(tconn->sender_work.q_wait,
1725 dequeue_work_item(&tconn->data.work, &work_list)); 1725 dequeue_work_item(&tconn->sender_work, &work_list));
1726 1726
1727 mutex_lock(&tconn->data.mutex); 1727 mutex_lock(&tconn->data.mutex);
1728 if (tconn->data.socket && cork) 1728 if (tconn->data.socket && cork)
@@ -1758,7 +1758,7 @@ int drbd_worker(struct drbd_thread *thi)
1758 list_del_init(&w->list); 1758 list_del_init(&w->list);
1759 w->cb(w, 1); 1759 w->cb(w, 1);
1760 } 1760 }
1761 dequeue_work_batch(&tconn->data.work, &work_list); 1761 dequeue_work_batch(&tconn->sender_work, &work_list);
1762 } while (!list_empty(&work_list)); 1762 } while (!list_empty(&work_list));
1763 1763
1764 rcu_read_lock(); 1764 rcu_read_lock();