aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@linbit.com>2011-07-28 09:27:51 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-02-17 10:50:39 -0500
commit84b8c06b6591e73250e6ab4834a02a86c8994b91 (patch)
treeef2be94ac7b57c1042560a9a39d8c84550803b85
parent8682eae9b4b26d54b9eeac8e17c534197e6d8744 (diff)
drbd: Create a dedicated struct drbd_device_work
drbd_device_work is a work item that has a reference to a device, while drbd_work is a more generic work item that does not carry a reference to a device. All callbacks get a pointer to a drbd_work instance, those callbacks that expect a drbd_device_work use the container_of macro to get it. Signed-off-by: Andreas Gruenbacher <agruen@linbit.com> Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
-rw-r--r--drivers/block/drbd/drbd_actlog.c16
-rw-r--r--drivers/block/drbd/drbd_int.h29
-rw-r--r--drivers/block/drbd/drbd_main.c32
-rw-r--r--drivers/block/drbd/drbd_receiver.c84
-rw-r--r--drivers/block/drbd/drbd_req.c41
-rw-r--r--drivers/block/drbd/drbd_req.h4
-rw-r--r--drivers/block/drbd/drbd_state.c21
-rw-r--r--drivers/block/drbd/drbd_worker.c134
8 files changed, 197 insertions, 164 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 081ff42602d0..90ae4ba8f9ee 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -95,11 +95,13 @@ struct __packed al_transaction_on_disk {
95 95
96struct update_odbm_work { 96struct update_odbm_work {
97 struct drbd_work w; 97 struct drbd_work w;
98 struct drbd_device *device;
98 unsigned int enr; 99 unsigned int enr;
99}; 100};
100 101
101struct update_al_work { 102struct update_al_work {
102 struct drbd_work w; 103 struct drbd_work w;
104 struct drbd_device *device;
103 struct completion event; 105 struct completion event;
104 int err; 106 int err;
105}; 107};
@@ -594,7 +596,7 @@ _al_write_transaction(struct drbd_device *device)
594static int w_al_write_transaction(struct drbd_work *w, int unused) 596static int w_al_write_transaction(struct drbd_work *w, int unused)
595{ 597{
596 struct update_al_work *aw = container_of(w, struct update_al_work, w); 598 struct update_al_work *aw = container_of(w, struct update_al_work, w);
597 struct drbd_device *device = w->device; 599 struct drbd_device *device = aw->device;
598 int err; 600 int err;
599 601
600 err = _al_write_transaction(device); 602 err = _al_write_transaction(device);
@@ -613,8 +615,9 @@ static int al_write_transaction(struct drbd_device *device, bool delegate)
613 struct update_al_work al_work; 615 struct update_al_work al_work;
614 init_completion(&al_work.event); 616 init_completion(&al_work.event);
615 al_work.w.cb = w_al_write_transaction; 617 al_work.w.cb = w_al_write_transaction;
616 al_work.w.device = device; 618 al_work.device = device;
617 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, &al_work.w); 619 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
620 &al_work.w);
618 wait_for_completion(&al_work.event); 621 wait_for_completion(&al_work.event);
619 return al_work.err; 622 return al_work.err;
620 } else 623 } else
@@ -684,7 +687,7 @@ int drbd_initialize_al(struct drbd_device *device, void *buffer)
684static int w_update_odbm(struct drbd_work *w, int unused) 687static int w_update_odbm(struct drbd_work *w, int unused)
685{ 688{
686 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); 689 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
687 struct drbd_device *device = w->device; 690 struct drbd_device *device = udw->device;
688 struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, }; 691 struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
689 692
690 if (!get_ldev(device)) { 693 if (!get_ldev(device)) {
@@ -795,8 +798,9 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto
795 if (udw) { 798 if (udw) {
796 udw->enr = ext->lce.lc_number; 799 udw->enr = ext->lce.lc_number;
797 udw->w.cb = w_update_odbm; 800 udw->w.cb = w_update_odbm;
798 udw->w.device = device; 801 udw->device = device;
799 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, &udw->w); 802 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
803 &udw->w);
800 } else { 804 } else {
801 drbd_warn(device, "Could not kmalloc an udw\n"); 805 drbd_warn(device, "Could not kmalloc an udw\n");
802 } 806 }
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index ab3111e8ae70..3c52a4dc423d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -281,10 +281,11 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
281struct drbd_work { 281struct drbd_work {
282 struct list_head list; 282 struct list_head list;
283 int (*cb)(struct drbd_work *, int cancel); 283 int (*cb)(struct drbd_work *, int cancel);
284 union { 284};
285 struct drbd_device *device; 285
286 struct drbd_connection *connection; 286struct drbd_device_work {
287 }; 287 struct drbd_work w;
288 struct drbd_device *device;
288}; 289};
289 290
290#include "drbd_interval.h" 291#include "drbd_interval.h"
@@ -293,6 +294,7 @@ extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
293 294
294struct drbd_request { 295struct drbd_request {
295 struct drbd_work w; 296 struct drbd_work w;
297 struct drbd_device *device;
296 298
297 /* if local IO is not allowed, will be NULL. 299 /* if local IO is not allowed, will be NULL.
298 * if local IO _is_ allowed, holds the locally submitted bio clone, 300 * if local IO _is_ allowed, holds the locally submitted bio clone,
@@ -360,7 +362,7 @@ struct digest_info {
360}; 362};
361 363
362struct drbd_peer_request { 364struct drbd_peer_request {
363 struct drbd_work w; 365 struct drbd_device_work dw;
364 struct drbd_epoch *epoch; /* for writes */ 366 struct drbd_epoch *epoch; /* for writes */
365 struct page *pages; 367 struct page *pages;
366 atomic_t pending_bios; 368 atomic_t pending_bios;
@@ -686,11 +688,11 @@ struct drbd_device {
686 struct gendisk *vdisk; 688 struct gendisk *vdisk;
687 689
688 unsigned long last_reattach_jif; 690 unsigned long last_reattach_jif;
689 struct drbd_work resync_work, 691 struct drbd_work resync_work;
690 unplug_work, 692 struct drbd_work unplug_work;
691 go_diskless, 693 struct drbd_work go_diskless;
692 md_sync_work, 694 struct drbd_work md_sync_work;
693 start_resync_work; 695 struct drbd_work start_resync_work;
694 struct timer_list resync_timer; 696 struct timer_list resync_timer;
695 struct timer_list md_sync_timer; 697 struct timer_list md_sync_timer;
696 struct timer_list start_resync_timer; 698 struct timer_list start_resync_timer;
@@ -1865,7 +1867,8 @@ static inline void put_ldev(struct drbd_device *device)
1865 if (device->state.disk == D_FAILED) { 1867 if (device->state.disk == D_FAILED) {
1866 /* all application IO references gone. */ 1868 /* all application IO references gone. */
1867 if (!test_and_set_bit(GO_DISKLESS, &device->flags)) 1869 if (!test_and_set_bit(GO_DISKLESS, &device->flags))
1868 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->go_diskless); 1870 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
1871 &device->go_diskless);
1869 } 1872 }
1870 wake_up(&device->misc_wait); 1873 wake_up(&device->misc_wait);
1871 } 1874 }
@@ -2092,7 +2095,9 @@ static inline void dec_ap_bio(struct drbd_device *device)
2092 2095
2093 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) { 2096 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2094 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 2097 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2095 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w); 2098 drbd_queue_work(&first_peer_device(device)->
2099 connection->sender_work,
2100 &device->bm_io_work.w);
2096 } 2101 }
2097 2102
2098 /* this currently does wake_up for every dec_ap_bio! 2103 /* this currently does wake_up for every dec_ap_bio!
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 6c86807f22ec..ada1b07c564e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -312,7 +312,7 @@ void tl_abort_disk_io(struct drbd_device *device)
312 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) { 312 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
313 if (!(req->rq_state & RQ_LOCAL_PENDING)) 313 if (!(req->rq_state & RQ_LOCAL_PENDING))
314 continue; 314 continue;
315 if (req->w.device != device) 315 if (req->device != device)
316 continue; 316 continue;
317 _req_mod(req, ABORT_DISK_IO); 317 _req_mod(req, ABORT_DISK_IO);
318 } 318 }
@@ -1917,13 +1917,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
1917 device->bm_io_work.w.cb = w_bitmap_io; 1917 device->bm_io_work.w.cb = w_bitmap_io;
1918 device->start_resync_work.cb = w_start_resync; 1918 device->start_resync_work.cb = w_start_resync;
1919 1919
1920 device->resync_work.device = device;
1921 device->unplug_work.device = device;
1922 device->go_diskless.device = device;
1923 device->md_sync_work.device = device;
1924 device->bm_io_work.w.device = device;
1925 device->start_resync_work.device = device;
1926
1927 init_timer(&device->resync_timer); 1920 init_timer(&device->resync_timer);
1928 init_timer(&device->md_sync_timer); 1921 init_timer(&device->md_sync_timer);
1929 init_timer(&device->start_resync_timer); 1922 init_timer(&device->start_resync_timer);
@@ -2222,12 +2215,12 @@ static void do_retry(struct work_struct *ws)
2222 spin_unlock_irq(&retry->lock); 2215 spin_unlock_irq(&retry->lock);
2223 2216
2224 list_for_each_entry_safe(req, tmp, &writes, tl_requests) { 2217 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2225 struct drbd_device *device = req->w.device; 2218 struct drbd_device *device = req->device;
2226 struct bio *bio = req->master_bio; 2219 struct bio *bio = req->master_bio;
2227 unsigned long start_time = req->start_time; 2220 unsigned long start_time = req->start_time;
2228 bool expected; 2221 bool expected;
2229 2222
2230 expected = 2223 expected =
2231 expect(atomic_read(&req->completion_ref) == 0) && 2224 expect(atomic_read(&req->completion_ref) == 0) &&
2232 expect(req->rq_state & RQ_POSTPONED) && 2225 expect(req->rq_state & RQ_POSTPONED) &&
2233 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 || 2226 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
@@ -2273,7 +2266,7 @@ void drbd_restart_request(struct drbd_request *req)
2273 /* Drop the extra reference that would otherwise 2266 /* Drop the extra reference that would otherwise
2274 * have been dropped by complete_master_bio. 2267 * have been dropped by complete_master_bio.
2275 * do_retry() needs to grab a new one. */ 2268 * do_retry() needs to grab a new one. */
2276 dec_ap_bio(req->w.device); 2269 dec_ap_bio(req->device);
2277 2270
2278 queue_work(retry.wq, &retry.worker); 2271 queue_work(retry.wq, &retry.worker);
2279} 2272}
@@ -3468,8 +3461,9 @@ int drbd_bmio_clear_n_write(struct drbd_device *device)
3468 3461
3469static int w_bitmap_io(struct drbd_work *w, int unused) 3462static int w_bitmap_io(struct drbd_work *w, int unused)
3470{ 3463{
3471 struct bm_io_work *work = container_of(w, struct bm_io_work, w); 3464 struct drbd_device *device =
3472 struct drbd_device *device = w->device; 3465 container_of(w, struct drbd_device, bm_io_work.w);
3466 struct bm_io_work *work = &device->bm_io_work;
3473 int rv = -EIO; 3467 int rv = -EIO;
3474 3468
3475 D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0); 3469 D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
@@ -3509,7 +3503,8 @@ void drbd_ldev_destroy(struct drbd_device *device)
3509 3503
3510static int w_go_diskless(struct drbd_work *w, int unused) 3504static int w_go_diskless(struct drbd_work *w, int unused)
3511{ 3505{
3512 struct drbd_device *device = w->device; 3506 struct drbd_device *device =
3507 container_of(w, struct drbd_device, go_diskless);
3513 3508
3514 D_ASSERT(device, device->state.disk == D_FAILED); 3509 D_ASSERT(device, device->state.disk == D_FAILED);
3515 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will 3510 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
@@ -3583,7 +3578,8 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
3583 set_bit(BITMAP_IO, &device->flags); 3578 set_bit(BITMAP_IO, &device->flags);
3584 if (atomic_read(&device->ap_bio_cnt) == 0) { 3579 if (atomic_read(&device->ap_bio_cnt) == 0) {
3585 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 3580 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3586 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w); 3581 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3582 &device->bm_io_work.w);
3587 } 3583 }
3588 spin_unlock_irq(&device->resource->req_lock); 3584 spin_unlock_irq(&device->resource->req_lock);
3589} 3585}
@@ -3643,12 +3639,14 @@ static void md_sync_timer_fn(unsigned long data)
3643 3639
3644 /* must not double-queue! */ 3640 /* must not double-queue! */
3645 if (list_empty(&device->md_sync_work.list)) 3641 if (list_empty(&device->md_sync_work.list))
3646 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, &device->md_sync_work); 3642 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
3643 &device->md_sync_work);
3647} 3644}
3648 3645
3649static int w_md_sync(struct drbd_work *w, int unused) 3646static int w_md_sync(struct drbd_work *w, int unused)
3650{ 3647{
3651 struct drbd_device *device = w->device; 3648 struct drbd_device *device =
3649 container_of(w, struct drbd_device, md_sync_work);
3652 3650
3653 drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); 3651 drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3654#ifdef DEBUG 3652#ifdef DEBUG
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index e262b0bcbf67..87114361d804 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -209,7 +209,7 @@ static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
209 stop to examine the list... */ 209 stop to examine the list... */
210 210
211 list_for_each_safe(le, tle, &device->net_ee) { 211 list_for_each_safe(le, tle, &device->net_ee) {
212 peer_req = list_entry(le, struct drbd_peer_request, w.list); 212 peer_req = list_entry(le, struct drbd_peer_request, dw.w.list);
213 if (drbd_peer_req_has_active_page(peer_req)) 213 if (drbd_peer_req_has_active_page(peer_req))
214 break; 214 break;
215 list_move(le, to_be_freed); 215 list_move(le, to_be_freed);
@@ -225,7 +225,7 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device)
225 reclaim_finished_net_peer_reqs(device, &reclaimed); 225 reclaim_finished_net_peer_reqs(device, &reclaimed);
226 spin_unlock_irq(&device->resource->req_lock); 226 spin_unlock_irq(&device->resource->req_lock);
227 227
228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 228 list_for_each_entry_safe(peer_req, t, &reclaimed, dw.w.list)
229 drbd_free_net_peer_req(device, peer_req); 229 drbd_free_net_peer_req(device, peer_req);
230} 230}
231 231
@@ -363,7 +363,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
363 peer_req->i.waiting = false; 363 peer_req->i.waiting = false;
364 364
365 peer_req->epoch = NULL; 365 peer_req->epoch = NULL;
366 peer_req->w.device = device; 366 peer_req->dw.device = device;
367 peer_req->pages = page; 367 peer_req->pages = page;
368 atomic_set(&peer_req->pending_bios, 0); 368 atomic_set(&peer_req->pending_bios, 0);
369 peer_req->flags = 0; 369 peer_req->flags = 0;
@@ -402,7 +402,7 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
402 list_splice_init(list, &work_list); 402 list_splice_init(list, &work_list);
403 spin_unlock_irq(&device->resource->req_lock); 403 spin_unlock_irq(&device->resource->req_lock);
404 404
405 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { 405 list_for_each_entry_safe(peer_req, t, &work_list, dw.w.list) {
406 __drbd_free_peer_req(device, peer_req, is_net); 406 __drbd_free_peer_req(device, peer_req, is_net);
407 count++; 407 count++;
408 } 408 }
@@ -424,18 +424,18 @@ static int drbd_finish_peer_reqs(struct drbd_device *device)
424 list_splice_init(&device->done_ee, &work_list); 424 list_splice_init(&device->done_ee, &work_list);
425 spin_unlock_irq(&device->resource->req_lock); 425 spin_unlock_irq(&device->resource->req_lock);
426 426
427 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 427 list_for_each_entry_safe(peer_req, t, &reclaimed, dw.w.list)
428 drbd_free_net_peer_req(device, peer_req); 428 drbd_free_net_peer_req(device, peer_req);
429 429
430 /* possible callbacks here: 430 /* possible callbacks here:
431 * e_end_block, and e_end_resync_block, e_send_superseded. 431 * e_end_block, and e_end_resync_block, e_send_superseded.
432 * all ignore the last argument. 432 * all ignore the last argument.
433 */ 433 */
434 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { 434 list_for_each_entry_safe(peer_req, t, &work_list, dw.w.list) {
435 int err2; 435 int err2;
436 436
437 /* list_del not necessary, next/prev members not touched */ 437 /* list_del not necessary, next/prev members not touched */
438 err2 = peer_req->w.cb(&peer_req->w, !!err); 438 err2 = peer_req->dw.w.cb(&peer_req->dw.w, !!err);
439 if (!err) 439 if (!err)
440 err = err2; 440 err = err2;
441 drbd_free_peer_req(device, peer_req); 441 drbd_free_peer_req(device, peer_req);
@@ -1664,9 +1664,10 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
1664 */ 1664 */
1665static int e_end_resync_block(struct drbd_work *w, int unused) 1665static int e_end_resync_block(struct drbd_work *w, int unused)
1666{ 1666{
1667 struct drbd_device_work *dw = device_work(w);
1667 struct drbd_peer_request *peer_req = 1668 struct drbd_peer_request *peer_req =
1668 container_of(w, struct drbd_peer_request, w); 1669 container_of(dw, struct drbd_peer_request, dw);
1669 struct drbd_device *device = w->device; 1670 struct drbd_device *device = dw->device;
1670 sector_t sector = peer_req->i.sector; 1671 sector_t sector = peer_req->i.sector;
1671 int err; 1672 int err;
1672 1673
@@ -1702,10 +1703,10 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
1702 /* corresponding dec_unacked() in e_end_resync_block() 1703 /* corresponding dec_unacked() in e_end_resync_block()
1703 * respective _drbd_clear_done_ee */ 1704 * respective _drbd_clear_done_ee */
1704 1705
1705 peer_req->w.cb = e_end_resync_block; 1706 peer_req->dw.w.cb = e_end_resync_block;
1706 1707
1707 spin_lock_irq(&device->resource->req_lock); 1708 spin_lock_irq(&device->resource->req_lock);
1708 list_add(&peer_req->w.list, &device->sync_ee); 1709 list_add(&peer_req->dw.w.list, &device->sync_ee);
1709 spin_unlock_irq(&device->resource->req_lock); 1710 spin_unlock_irq(&device->resource->req_lock);
1710 1711
1711 atomic_add(data_size >> 9, &device->rs_sect_ev); 1712 atomic_add(data_size >> 9, &device->rs_sect_ev);
@@ -1715,7 +1716,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
1715 /* don't care for the reason here */ 1716 /* don't care for the reason here */
1716 drbd_err(device, "submit failed, triggering re-connect\n"); 1717 drbd_err(device, "submit failed, triggering re-connect\n");
1717 spin_lock_irq(&device->resource->req_lock); 1718 spin_lock_irq(&device->resource->req_lock);
1718 list_del(&peer_req->w.list); 1719 list_del(&peer_req->dw.w.list);
1719 spin_unlock_irq(&device->resource->req_lock); 1720 spin_unlock_irq(&device->resource->req_lock);
1720 1721
1721 drbd_free_peer_req(device, peer_req); 1722 drbd_free_peer_req(device, peer_req);
@@ -1835,9 +1836,10 @@ static void restart_conflicting_writes(struct drbd_device *device,
1835 */ 1836 */
1836static int e_end_block(struct drbd_work *w, int cancel) 1837static int e_end_block(struct drbd_work *w, int cancel)
1837{ 1838{
1839 struct drbd_device_work *dw = device_work(w);
1838 struct drbd_peer_request *peer_req = 1840 struct drbd_peer_request *peer_req =
1839 container_of(w, struct drbd_peer_request, w); 1841 container_of(dw, struct drbd_peer_request, dw);
1840 struct drbd_device *device = w->device; 1842 struct drbd_device *device = dw->device;
1841 sector_t sector = peer_req->i.sector; 1843 sector_t sector = peer_req->i.sector;
1842 int err = 0, pcmd; 1844 int err = 0, pcmd;
1843 1845
@@ -1874,11 +1876,11 @@ static int e_end_block(struct drbd_work *w, int cancel)
1874 return err; 1876 return err;
1875} 1877}
1876 1878
1877static int e_send_ack(struct drbd_work *w, enum drbd_packet ack) 1879static int e_send_ack(struct drbd_device_work *dw, enum drbd_packet ack)
1878{ 1880{
1879 struct drbd_device *device = w->device; 1881 struct drbd_device *device = dw->device;
1880 struct drbd_peer_request *peer_req = 1882 struct drbd_peer_request *peer_req =
1881 container_of(w, struct drbd_peer_request, w); 1883 container_of(dw, struct drbd_peer_request, dw);
1882 int err; 1884 int err;
1883 1885
1884 err = drbd_send_ack(first_peer_device(device), ack, peer_req); 1886 err = drbd_send_ack(first_peer_device(device), ack, peer_req);
@@ -1889,14 +1891,15 @@ static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
1889 1891
1890static int e_send_superseded(struct drbd_work *w, int unused) 1892static int e_send_superseded(struct drbd_work *w, int unused)
1891{ 1893{
1892 return e_send_ack(w, P_SUPERSEDED); 1894 return e_send_ack(device_work(w), P_SUPERSEDED);
1893} 1895}
1894 1896
1895static int e_send_retry_write(struct drbd_work *w, int unused) 1897static int e_send_retry_write(struct drbd_work *w, int unused)
1896{ 1898{
1897 struct drbd_connection *connection = first_peer_device(w->device)->connection; 1899 struct drbd_device_work *dw = device_work(w);
1900 struct drbd_connection *connection = first_peer_device(dw->device)->connection;
1898 1901
1899 return e_send_ack(w, connection->agreed_pro_version >= 100 ? 1902 return e_send_ack(dw, connection->agreed_pro_version >= 100 ?
1900 P_RETRY_WRITE : P_SUPERSEDED); 1903 P_RETRY_WRITE : P_SUPERSEDED);
1901} 1904}
1902 1905
@@ -1943,7 +1946,7 @@ static bool overlapping_resync_write(struct drbd_device *device, struct drbd_pee
1943 bool rv = 0; 1946 bool rv = 0;
1944 1947
1945 spin_lock_irq(&device->resource->req_lock); 1948 spin_lock_irq(&device->resource->req_lock);
1946 list_for_each_entry(rs_req, &device->sync_ee, w.list) { 1949 list_for_each_entry(rs_req, &device->sync_ee, dw.w.list) {
1947 if (overlaps(peer_req->i.sector, peer_req->i.size, 1950 if (overlaps(peer_req->i.sector, peer_req->i.size,
1948 rs_req->i.sector, rs_req->i.size)) { 1951 rs_req->i.sector, rs_req->i.size)) {
1949 rv = 1; 1952 rv = 1;
@@ -2114,9 +2117,9 @@ static int handle_write_conflicts(struct drbd_device *device,
2114 superseded ? "local" : "remote"); 2117 superseded ? "local" : "remote");
2115 2118
2116 inc_unacked(device); 2119 inc_unacked(device);
2117 peer_req->w.cb = superseded ? e_send_superseded : 2120 peer_req->dw.w.cb = superseded ? e_send_superseded :
2118 e_send_retry_write; 2121 e_send_retry_write;
2119 list_add_tail(&peer_req->w.list, &device->done_ee); 2122 list_add_tail(&peer_req->dw.w.list, &device->done_ee);
2120 wake_asender(first_peer_device(device)->connection); 2123 wake_asender(first_peer_device(device)->connection);
2121 2124
2122 err = -ENOENT; 2125 err = -ENOENT;
@@ -2212,7 +2215,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2212 return -EIO; 2215 return -EIO;
2213 } 2216 }
2214 2217
2215 peer_req->w.cb = e_end_block; 2218 peer_req->dw.w.cb = e_end_block;
2216 2219
2217 dp_flags = be32_to_cpu(p->dp_flags); 2220 dp_flags = be32_to_cpu(p->dp_flags);
2218 rw |= wire_flags_to_bio(device, dp_flags); 2221 rw |= wire_flags_to_bio(device, dp_flags);
@@ -2252,7 +2255,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2252 update_peer_seq(peer_device, peer_seq); 2255 update_peer_seq(peer_device, peer_seq);
2253 spin_lock_irq(&device->resource->req_lock); 2256 spin_lock_irq(&device->resource->req_lock);
2254 } 2257 }
2255 list_add(&peer_req->w.list, &device->active_ee); 2258 list_add(&peer_req->dw.w.list, &device->active_ee);
2256 spin_unlock_irq(&device->resource->req_lock); 2259 spin_unlock_irq(&device->resource->req_lock);
2257 2260
2258 if (device->state.conn == C_SYNC_TARGET) 2261 if (device->state.conn == C_SYNC_TARGET)
@@ -2299,7 +2302,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2299 /* don't care for the reason here */ 2302 /* don't care for the reason here */
2300 drbd_err(device, "submit failed, triggering re-connect\n"); 2303 drbd_err(device, "submit failed, triggering re-connect\n");
2301 spin_lock_irq(&device->resource->req_lock); 2304 spin_lock_irq(&device->resource->req_lock);
2302 list_del(&peer_req->w.list); 2305 list_del(&peer_req->dw.w.list);
2303 drbd_remove_epoch_entry_interval(device, peer_req); 2306 drbd_remove_epoch_entry_interval(device, peer_req);
2304 spin_unlock_irq(&device->resource->req_lock); 2307 spin_unlock_irq(&device->resource->req_lock);
2305 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) 2308 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
@@ -2454,13 +2457,13 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
2454 2457
2455 switch (pi->cmd) { 2458 switch (pi->cmd) {
2456 case P_DATA_REQUEST: 2459 case P_DATA_REQUEST:
2457 peer_req->w.cb = w_e_end_data_req; 2460 peer_req->dw.w.cb = w_e_end_data_req;
2458 fault_type = DRBD_FAULT_DT_RD; 2461 fault_type = DRBD_FAULT_DT_RD;
2459 /* application IO, don't drbd_rs_begin_io */ 2462 /* application IO, don't drbd_rs_begin_io */
2460 goto submit; 2463 goto submit;
2461 2464
2462 case P_RS_DATA_REQUEST: 2465 case P_RS_DATA_REQUEST:
2463 peer_req->w.cb = w_e_end_rsdata_req; 2466 peer_req->dw.w.cb = w_e_end_rsdata_req;
2464 fault_type = DRBD_FAULT_RS_RD; 2467 fault_type = DRBD_FAULT_RS_RD;
2465 /* used in the sector offset progress display */ 2468 /* used in the sector offset progress display */
2466 device->bm_resync_fo = BM_SECT_TO_BIT(sector); 2469 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -2484,13 +2487,13 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
2484 2487
2485 if (pi->cmd == P_CSUM_RS_REQUEST) { 2488 if (pi->cmd == P_CSUM_RS_REQUEST) {
2486 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89); 2489 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
2487 peer_req->w.cb = w_e_end_csum_rs_req; 2490 peer_req->dw.w.cb = w_e_end_csum_rs_req;
2488 /* used in the sector offset progress display */ 2491 /* used in the sector offset progress display */
2489 device->bm_resync_fo = BM_SECT_TO_BIT(sector); 2492 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2490 } else if (pi->cmd == P_OV_REPLY) { 2493 } else if (pi->cmd == P_OV_REPLY) {
2491 /* track progress, we may need to throttle */ 2494 /* track progress, we may need to throttle */
2492 atomic_add(size >> 9, &device->rs_sect_in); 2495 atomic_add(size >> 9, &device->rs_sect_in);
2493 peer_req->w.cb = w_e_end_ov_reply; 2496 peer_req->dw.w.cb = w_e_end_ov_reply;
2494 dec_rs_pending(device); 2497 dec_rs_pending(device);
2495 /* drbd_rs_begin_io done when we sent this request, 2498 /* drbd_rs_begin_io done when we sent this request,
2496 * but accounting still needs to be done. */ 2499 * but accounting still needs to be done. */
@@ -2514,7 +2517,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
2514 drbd_info(device, "Online Verify start sector: %llu\n", 2517 drbd_info(device, "Online Verify start sector: %llu\n",
2515 (unsigned long long)sector); 2518 (unsigned long long)sector);
2516 } 2519 }
2517 peer_req->w.cb = w_e_end_ov_req; 2520 peer_req->dw.w.cb = w_e_end_ov_req;
2518 fault_type = DRBD_FAULT_RS_RD; 2521 fault_type = DRBD_FAULT_RS_RD;
2519 break; 2522 break;
2520 2523
@@ -2555,7 +2558,7 @@ submit_for_resync:
2555submit: 2558submit:
2556 inc_unacked(device); 2559 inc_unacked(device);
2557 spin_lock_irq(&device->resource->req_lock); 2560 spin_lock_irq(&device->resource->req_lock);
2558 list_add_tail(&peer_req->w.list, &device->read_ee); 2561 list_add_tail(&peer_req->dw.w.list, &device->read_ee);
2559 spin_unlock_irq(&device->resource->req_lock); 2562 spin_unlock_irq(&device->resource->req_lock);
2560 2563
2561 if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0) 2564 if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
@@ -2564,7 +2567,7 @@ submit:
2564 /* don't care for the reason here */ 2567 /* don't care for the reason here */
2565 drbd_err(device, "submit failed, triggering re-connect\n"); 2568 drbd_err(device, "submit failed, triggering re-connect\n");
2566 spin_lock_irq(&device->resource->req_lock); 2569 spin_lock_irq(&device->resource->req_lock);
2567 list_del(&peer_req->w.list); 2570 list_del(&peer_req->dw.w.list);
2568 spin_unlock_irq(&device->resource->req_lock); 2571 spin_unlock_irq(&device->resource->req_lock);
2569 /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 2572 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2570 2573
@@ -4495,7 +4498,6 @@ void conn_flush_workqueue(struct drbd_connection *connection)
4495 struct drbd_wq_barrier barr; 4498 struct drbd_wq_barrier barr;
4496 4499
4497 barr.w.cb = w_complete; 4500 barr.w.cb = w_complete;
4498 barr.w.connection = connection;
4499 init_completion(&barr.done); 4501 init_completion(&barr.done);
4500 drbd_queue_work(&connection->sender_work, &barr.w); 4502 drbd_queue_work(&connection->sender_work, &barr.w);
4501 wait_for_completion(&barr.done); 4503 wait_for_completion(&barr.done);
@@ -5218,7 +5220,7 @@ static int got_OVResult(struct drbd_connection *connection, struct packet_info *
5218 struct drbd_peer_device *peer_device; 5220 struct drbd_peer_device *peer_device;
5219 struct drbd_device *device; 5221 struct drbd_device *device;
5220 struct p_block_ack *p = pi->data; 5222 struct p_block_ack *p = pi->data;
5221 struct drbd_work *w; 5223 struct drbd_device_work *dw;
5222 sector_t sector; 5224 sector_t sector;
5223 int size; 5225 int size;
5224 5226
@@ -5250,13 +5252,13 @@ static int got_OVResult(struct drbd_connection *connection, struct packet_info *
5250 drbd_advance_rs_marks(device, device->ov_left); 5252 drbd_advance_rs_marks(device, device->ov_left);
5251 5253
5252 if (device->ov_left == 0) { 5254 if (device->ov_left == 0) {
5253 w = kmalloc(sizeof(*w), GFP_NOIO); 5255 dw = kmalloc(sizeof(*dw), GFP_NOIO);
5254 if (w) { 5256 if (dw) {
5255 w->cb = w_ov_finished; 5257 dw->w.cb = w_ov_finished;
5256 w->device = device; 5258 dw->device = device;
5257 drbd_queue_work(&peer_device->connection->sender_work, w); 5259 drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
5258 } else { 5260 } else {
5259 drbd_err(device, "kmalloc(w) failed."); 5261 drbd_err(device, "kmalloc(dw) failed.");
5260 ov_out_of_sync_print(device); 5262 ov_out_of_sync_print(device);
5261 drbd_resync_finished(device); 5263 drbd_resync_finished(device);
5262 } 5264 }
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index f74c0a244e9a..3779c8d2875b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -72,7 +72,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device,
72 72
73 drbd_req_make_private_bio(req, bio_src); 73 drbd_req_make_private_bio(req, bio_src);
74 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; 74 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
75 req->w.device = device; 75 req->device = device;
76 req->master_bio = bio_src; 76 req->master_bio = bio_src;
77 req->epoch = 0; 77 req->epoch = 0;
78 78
@@ -95,7 +95,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device,
95void drbd_req_destroy(struct kref *kref) 95void drbd_req_destroy(struct kref *kref)
96{ 96{
97 struct drbd_request *req = container_of(kref, struct drbd_request, kref); 97 struct drbd_request *req = container_of(kref, struct drbd_request, kref);
98 struct drbd_device *device = req->w.device; 98 struct drbd_device *device = req->device;
99 const unsigned s = req->rq_state; 99 const unsigned s = req->rq_state;
100 100
101 if ((req->master_bio && !(s & RQ_POSTPONED)) || 101 if ((req->master_bio && !(s & RQ_POSTPONED)) ||
@@ -191,7 +191,7 @@ void complete_master_bio(struct drbd_device *device,
191static void drbd_remove_request_interval(struct rb_root *root, 191static void drbd_remove_request_interval(struct rb_root *root,
192 struct drbd_request *req) 192 struct drbd_request *req)
193{ 193{
194 struct drbd_device *device = req->w.device; 194 struct drbd_device *device = req->device;
195 struct drbd_interval *i = &req->i; 195 struct drbd_interval *i = &req->i;
196 196
197 drbd_remove_interval(root, i); 197 drbd_remove_interval(root, i);
@@ -211,7 +211,7 @@ static
211void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) 211void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
212{ 212{
213 const unsigned s = req->rq_state; 213 const unsigned s = req->rq_state;
214 struct drbd_device *device = req->w.device; 214 struct drbd_device *device = req->device;
215 int rw; 215 int rw;
216 int error, ok; 216 int error, ok;
217 217
@@ -306,7 +306,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
306 306
307static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) 307static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
308{ 308{
309 struct drbd_device *device = req->w.device; 309 struct drbd_device *device = req->device;
310 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); 310 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
311 311
312 if (!atomic_sub_and_test(put, &req->completion_ref)) 312 if (!atomic_sub_and_test(put, &req->completion_ref))
@@ -329,7 +329,7 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_
329static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, 329static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
330 int clear, int set) 330 int clear, int set)
331{ 331{
332 struct drbd_device *device = req->w.device; 332 struct drbd_device *device = req->device;
333 unsigned s = req->rq_state; 333 unsigned s = req->rq_state;
334 int c_put = 0; 334 int c_put = 0;
335 int k_put = 0; 335 int k_put = 0;
@@ -454,7 +454,7 @@ static void drbd_report_io_error(struct drbd_device *device, struct drbd_request
454int __req_mod(struct drbd_request *req, enum drbd_req_event what, 454int __req_mod(struct drbd_request *req, enum drbd_req_event what,
455 struct bio_and_error *m) 455 struct bio_and_error *m)
456{ 456{
457 struct drbd_device *device = req->w.device; 457 struct drbd_device *device = req->device;
458 struct net_conf *nc; 458 struct net_conf *nc;
459 int p, rv = 0; 459 int p, rv = 0;
460 460
@@ -542,7 +542,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
542 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); 542 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
543 mod_rq_state(req, m, 0, RQ_NET_QUEUED); 543 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
544 req->w.cb = w_send_read_req; 544 req->w.cb = w_send_read_req;
545 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); 545 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
546 &req->w);
546 break; 547 break;
547 548
548 case QUEUE_FOR_NET_WRITE: 549 case QUEUE_FOR_NET_WRITE:
@@ -577,7 +578,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
577 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); 578 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
578 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); 579 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
579 req->w.cb = w_send_dblock; 580 req->w.cb = w_send_dblock;
580 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); 581 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
582 &req->w);
581 583
582 /* close the epoch, in case it outgrew the limit */ 584 /* close the epoch, in case it outgrew the limit */
583 rcu_read_lock(); 585 rcu_read_lock();
@@ -592,7 +594,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
592 case QUEUE_FOR_SEND_OOS: 594 case QUEUE_FOR_SEND_OOS:
593 mod_rq_state(req, m, 0, RQ_NET_QUEUED); 595 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
594 req->w.cb = w_send_out_of_sync; 596 req->w.cb = w_send_out_of_sync;
595 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); 597 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
598 &req->w);
596 break; 599 break;
597 600
598 case READ_RETRY_REMOTE_CANCELED: 601 case READ_RETRY_REMOTE_CANCELED:
@@ -704,7 +707,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
704 707
705 get_ldev(device); /* always succeeds in this call path */ 708 get_ldev(device); /* always succeeds in this call path */
706 req->w.cb = w_restart_disk_io; 709 req->w.cb = w_restart_disk_io;
707 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); 710 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
711 &req->w);
708 break; 712 break;
709 713
710 case RESEND: 714 case RESEND:
@@ -720,12 +724,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
720 Throwing them out of the TL here by pretending we got a BARRIER_ACK. 724 Throwing them out of the TL here by pretending we got a BARRIER_ACK.
721 During connection handshake, we ensure that the peer was not rebooted. */ 725 During connection handshake, we ensure that the peer was not rebooted. */
722 if (!(req->rq_state & RQ_NET_OK)) { 726 if (!(req->rq_state & RQ_NET_OK)) {
723 /* FIXME could this possibly be a req->w.cb == w_send_out_of_sync? 727 /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
724 * in that case we must not set RQ_NET_PENDING. */ 728 * in that case we must not set RQ_NET_PENDING. */
725 729
726 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); 730 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
727 if (req->w.cb) { 731 if (req->w.cb) {
728 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); 732 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
733 &req->w);
729 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; 734 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
730 } /* else: FIXME can this happen? */ 735 } /* else: FIXME can this happen? */
731 break; 736 break;
@@ -835,7 +840,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
835static void complete_conflicting_writes(struct drbd_request *req) 840static void complete_conflicting_writes(struct drbd_request *req)
836{ 841{
837 DEFINE_WAIT(wait); 842 DEFINE_WAIT(wait);
838 struct drbd_device *device = req->w.device; 843 struct drbd_device *device = req->device;
839 struct drbd_interval *i; 844 struct drbd_interval *i;
840 sector_t sector = req->i.sector; 845 sector_t sector = req->i.sector;
841 int size = req->i.size; 846 int size = req->i.size;
@@ -915,7 +920,7 @@ static void maybe_pull_ahead(struct drbd_device *device)
915 */ 920 */
916static bool do_remote_read(struct drbd_request *req) 921static bool do_remote_read(struct drbd_request *req)
917{ 922{
918 struct drbd_device *device = req->w.device; 923 struct drbd_device *device = req->device;
919 enum drbd_read_balancing rbm; 924 enum drbd_read_balancing rbm;
920 925
921 if (req->private_bio) { 926 if (req->private_bio) {
@@ -960,7 +965,7 @@ static bool do_remote_read(struct drbd_request *req)
960 * which does NOT include those that we are L_AHEAD for. */ 965 * which does NOT include those that we are L_AHEAD for. */
961static int drbd_process_write_request(struct drbd_request *req) 966static int drbd_process_write_request(struct drbd_request *req)
962{ 967{
963 struct drbd_device *device = req->w.device; 968 struct drbd_device *device = req->device;
964 int remote, send_oos; 969 int remote, send_oos;
965 970
966 remote = drbd_should_do_remote(device->state); 971 remote = drbd_should_do_remote(device->state);
@@ -997,7 +1002,7 @@ static int drbd_process_write_request(struct drbd_request *req)
997static void 1002static void
998drbd_submit_req_private_bio(struct drbd_request *req) 1003drbd_submit_req_private_bio(struct drbd_request *req)
999{ 1004{
1000 struct drbd_device *device = req->w.device; 1005 struct drbd_device *device = req->device;
1001 struct bio *bio = req->private_bio; 1006 struct bio *bio = req->private_bio;
1002 const int rw = bio_rw(bio); 1007 const int rw = bio_rw(bio);
1003 1008
@@ -1390,7 +1395,7 @@ void request_timer_fn(unsigned long data)
1390 drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); 1395 drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
1391 _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); 1396 _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
1392 } 1397 }
1393 if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.device == device && 1398 if (dt && req->rq_state & RQ_LOCAL_PENDING && req->device == device &&
1394 time_after(now, req->start_time + dt) && 1399 time_after(now, req->start_time + dt) &&
1395 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { 1400 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
1396 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); 1401 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 5ce6dc505751..c684c963538e 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -294,7 +294,7 @@ extern void drbd_restart_request(struct drbd_request *req);
294 * outside the spinlock, e.g. when walking some list on cleanup. */ 294 * outside the spinlock, e.g. when walking some list on cleanup. */
295static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) 295static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
296{ 296{
297 struct drbd_device *device = req->w.device; 297 struct drbd_device *device = req->device;
298 struct bio_and_error m; 298 struct bio_and_error m;
299 int rv; 299 int rv;
300 300
@@ -314,7 +314,7 @@ static inline int req_mod(struct drbd_request *req,
314 enum drbd_req_event what) 314 enum drbd_req_event what)
315{ 315{
316 unsigned long flags; 316 unsigned long flags;
317 struct drbd_device *device = req->w.device; 317 struct drbd_device *device = req->device;
318 struct bio_and_error m; 318 struct bio_and_error m;
319 int rv; 319 int rv;
320 320
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 87ae01199a19..2e8e54b13332 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -32,6 +32,7 @@
32 32
33struct after_state_chg_work { 33struct after_state_chg_work {
34 struct drbd_work w; 34 struct drbd_work w;
35 struct drbd_device *device;
35 union drbd_state os; 36 union drbd_state os;
36 union drbd_state ns; 37 union drbd_state ns;
37 enum chg_state_flags flags; 38 enum chg_state_flags flags;
@@ -1145,9 +1146,10 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
1145 ascw->ns = ns; 1146 ascw->ns = ns;
1146 ascw->flags = flags; 1147 ascw->flags = flags;
1147 ascw->w.cb = w_after_state_ch; 1148 ascw->w.cb = w_after_state_ch;
1148 ascw->w.device = device; 1149 ascw->device = device;
1149 ascw->done = done; 1150 ascw->done = done;
1150 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &ascw->w); 1151 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
1152 &ascw->w);
1151 } else { 1153 } else {
1152 drbd_err(device, "Could not kmalloc an ascw\n"); 1154 drbd_err(device, "Could not kmalloc an ascw\n");
1153 } 1155 }
@@ -1159,7 +1161,7 @@ static int w_after_state_ch(struct drbd_work *w, int unused)
1159{ 1161{
1160 struct after_state_chg_work *ascw = 1162 struct after_state_chg_work *ascw =
1161 container_of(w, struct after_state_chg_work, w); 1163 container_of(w, struct after_state_chg_work, w);
1162 struct drbd_device *device = w->device; 1164 struct drbd_device *device = ascw->device;
1163 1165
1164 after_state_ch(device, ascw->os, ascw->ns, ascw->flags); 1166 after_state_ch(device, ascw->os, ascw->ns, ascw->flags);
1165 if (ascw->flags & CS_WAIT_COMPLETE) 1167 if (ascw->flags & CS_WAIT_COMPLETE)
@@ -1528,18 +1530,19 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1528} 1530}
1529 1531
1530struct after_conn_state_chg_work { 1532struct after_conn_state_chg_work {
1531 struct drbd_work w; 1533 struct drbd_device_work dw;
1532 enum drbd_conns oc; 1534 enum drbd_conns oc;
1533 union drbd_state ns_min; 1535 union drbd_state ns_min;
1534 union drbd_state ns_max; /* new, max state, over all devices */ 1536 union drbd_state ns_max; /* new, max state, over all devices */
1535 enum chg_state_flags flags; 1537 enum chg_state_flags flags;
1538 struct drbd_connection *connection;
1536}; 1539};
1537 1540
1538static int w_after_conn_state_ch(struct drbd_work *w, int unused) 1541static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1539{ 1542{
1540 struct after_conn_state_chg_work *acscw = 1543 struct after_conn_state_chg_work *acscw =
1541 container_of(w, struct after_conn_state_chg_work, w); 1544 container_of(w, struct after_conn_state_chg_work, dw.w);
1542 struct drbd_connection *connection = w->connection; 1545 struct drbd_connection *connection = acscw->connection;
1543 enum drbd_conns oc = acscw->oc; 1546 enum drbd_conns oc = acscw->oc;
1544 union drbd_state ns_max = acscw->ns_max; 1547 union drbd_state ns_max = acscw->ns_max;
1545 struct drbd_peer_device *peer_device; 1548 struct drbd_peer_device *peer_device;
@@ -1840,10 +1843,10 @@ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, u
1840 acscw->ns_min = ns_min; 1843 acscw->ns_min = ns_min;
1841 acscw->ns_max = ns_max; 1844 acscw->ns_max = ns_max;
1842 acscw->flags = flags; 1845 acscw->flags = flags;
1843 acscw->w.cb = w_after_conn_state_ch; 1846 acscw->dw.w.cb = w_after_conn_state_ch;
1844 kref_get(&connection->kref); 1847 kref_get(&connection->kref);
1845 acscw->w.connection = connection; 1848 acscw->connection = connection;
1846 drbd_queue_work(&connection->sender_work, &acscw->w); 1849 drbd_queue_work(&connection->sender_work, &acscw->dw.w);
1847 } else { 1850 } else {
1848 drbd_err(connection, "Could not kmalloc an acscw\n"); 1851 drbd_err(connection, "Could not kmalloc an acscw\n");
1849 } 1852 }
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index cb9ba141b610..c47fcc5af7f2 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -21,7 +21,7 @@
21 along with drbd; see the file COPYING. If not, write to 21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 23
24 */ 24*/
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/drbd.h> 27#include <linux/drbd.h>
@@ -39,7 +39,7 @@
39#include "drbd_protocol.h" 39#include "drbd_protocol.h"
40#include "drbd_req.h" 40#include "drbd_req.h"
41 41
42static int w_make_ov_request(struct drbd_work *w, int cancel); 42static int w_make_ov_request(struct drbd_work *, int);
43 43
44 44
45/* endio handlers: 45/* endio handlers:
@@ -100,18 +100,19 @@ void drbd_md_io_complete(struct bio *bio, int error)
100static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local) 100static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
101{ 101{
102 unsigned long flags = 0; 102 unsigned long flags = 0;
103 struct drbd_device *device = peer_req->w.device; 103 struct drbd_device *device = peer_req->dw.device;
104 104
105 spin_lock_irqsave(&device->resource->req_lock, flags); 105 spin_lock_irqsave(&device->resource->req_lock, flags);
106 device->read_cnt += peer_req->i.size >> 9; 106 device->read_cnt += peer_req->i.size >> 9;
107 list_del(&peer_req->w.list); 107 list_del(&peer_req->dw.w.list);
108 if (list_empty(&device->read_ee)) 108 if (list_empty(&device->read_ee))
109 wake_up(&device->ee_wait); 109 wake_up(&device->ee_wait);
110 if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) 110 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
111 __drbd_chk_io_error(device, DRBD_READ_ERROR); 111 __drbd_chk_io_error(device, DRBD_READ_ERROR);
112 spin_unlock_irqrestore(&device->resource->req_lock, flags); 112 spin_unlock_irqrestore(&device->resource->req_lock, flags);
113 113
114 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &peer_req->w); 114 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
115 &peer_req->dw.w);
115 put_ldev(device); 116 put_ldev(device);
116} 117}
117 118
@@ -120,7 +121,7 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
120static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) 121static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
121{ 122{
122 unsigned long flags = 0; 123 unsigned long flags = 0;
123 struct drbd_device *device = peer_req->w.device; 124 struct drbd_device *device = peer_req->dw.device;
124 struct drbd_interval i; 125 struct drbd_interval i;
125 int do_wake; 126 int do_wake;
126 u64 block_id; 127 u64 block_id;
@@ -136,13 +137,13 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
136 137
137 spin_lock_irqsave(&device->resource->req_lock, flags); 138 spin_lock_irqsave(&device->resource->req_lock, flags);
138 device->writ_cnt += peer_req->i.size >> 9; 139 device->writ_cnt += peer_req->i.size >> 9;
139 list_move_tail(&peer_req->w.list, &device->done_ee); 140 list_move_tail(&peer_req->dw.w.list, &device->done_ee);
140 141
141 /* 142 /*
142 * Do not remove from the write_requests tree here: we did not send the 143 * Do not remove from the write_requests tree here: we did not send the
143 * Ack yet and did not wake possibly waiting conflicting requests. 144 * Ack yet and did not wake possibly waiting conflicting requests.
144 * Removed from the tree from "drbd_process_done_ee" within the 145 * Removed from the tree from "drbd_process_done_ee" within the
145 * appropriate w.cb (e_end_block/e_end_resync_block) or from 146 * appropriate dw.cb (e_end_block/e_end_resync_block) or from
146 * _drbd_clear_done_ee. 147 * _drbd_clear_done_ee.
147 */ 148 */
148 149
@@ -171,7 +172,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
171void drbd_peer_request_endio(struct bio *bio, int error) 172void drbd_peer_request_endio(struct bio *bio, int error)
172{ 173{
173 struct drbd_peer_request *peer_req = bio->bi_private; 174 struct drbd_peer_request *peer_req = bio->bi_private;
174 struct drbd_device *device = peer_req->w.device; 175 struct drbd_device *device = peer_req->dw.device;
175 int uptodate = bio_flagged(bio, BIO_UPTODATE); 176 int uptodate = bio_flagged(bio, BIO_UPTODATE);
176 int is_write = bio_data_dir(bio) == WRITE; 177 int is_write = bio_data_dir(bio) == WRITE;
177 178
@@ -208,7 +209,7 @@ void drbd_request_endio(struct bio *bio, int error)
208{ 209{
209 unsigned long flags; 210 unsigned long flags;
210 struct drbd_request *req = bio->bi_private; 211 struct drbd_request *req = bio->bi_private;
211 struct drbd_device *device = req->w.device; 212 struct drbd_device *device = req->device;
212 struct bio_and_error m; 213 struct bio_and_error m;
213 enum drbd_req_event what; 214 enum drbd_req_event what;
214 int uptodate = bio_flagged(bio, BIO_UPTODATE); 215 int uptodate = bio_flagged(bio, BIO_UPTODATE);
@@ -332,8 +333,9 @@ void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest)
332/* MAYBE merge common code with w_e_end_ov_req */ 333/* MAYBE merge common code with w_e_end_ov_req */
333static int w_e_send_csum(struct drbd_work *w, int cancel) 334static int w_e_send_csum(struct drbd_work *w, int cancel)
334{ 335{
335 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 336 struct drbd_device_work *dw = device_work(w);
336 struct drbd_device *device = w->device; 337 struct drbd_peer_request *peer_req = container_of(dw, struct drbd_peer_request, dw);
338 struct drbd_device *device = dw->device;
337 int digest_size; 339 int digest_size;
338 void *digest; 340 void *digest;
339 int err = 0; 341 int err = 0;
@@ -396,9 +398,9 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
396 if (!peer_req) 398 if (!peer_req)
397 goto defer; 399 goto defer;
398 400
399 peer_req->w.cb = w_e_send_csum; 401 peer_req->dw.w.cb = w_e_send_csum;
400 spin_lock_irq(&device->resource->req_lock); 402 spin_lock_irq(&device->resource->req_lock);
401 list_add(&peer_req->w.list, &device->read_ee); 403 list_add(&peer_req->dw.w.list, &device->read_ee);
402 spin_unlock_irq(&device->resource->req_lock); 404 spin_unlock_irq(&device->resource->req_lock);
403 405
404 atomic_add(size >> 9, &device->rs_sect_ev); 406 atomic_add(size >> 9, &device->rs_sect_ev);
@@ -410,7 +412,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
410 * retry may or may not help. 412 * retry may or may not help.
411 * If it does not, you may need to force disconnect. */ 413 * If it does not, you may need to force disconnect. */
412 spin_lock_irq(&device->resource->req_lock); 414 spin_lock_irq(&device->resource->req_lock);
413 list_del(&peer_req->w.list); 415 list_del(&peer_req->dw.w.list);
414 spin_unlock_irq(&device->resource->req_lock); 416 spin_unlock_irq(&device->resource->req_lock);
415 417
416 drbd_free_peer_req(device, peer_req); 418 drbd_free_peer_req(device, peer_req);
@@ -421,7 +423,9 @@ defer:
421 423
422int w_resync_timer(struct drbd_work *w, int cancel) 424int w_resync_timer(struct drbd_work *w, int cancel)
423{ 425{
424 struct drbd_device *device = w->device; 426 struct drbd_device *device =
427 container_of(w, struct drbd_device, resync_work);
428
425 switch (device->state.conn) { 429 switch (device->state.conn) {
426 case C_VERIFY_S: 430 case C_VERIFY_S:
427 w_make_ov_request(w, cancel); 431 w_make_ov_request(w, cancel);
@@ -439,7 +443,8 @@ void resync_timer_fn(unsigned long data)
439 struct drbd_device *device = (struct drbd_device *) data; 443 struct drbd_device *device = (struct drbd_device *) data;
440 444
441 if (list_empty(&device->resync_work.list)) 445 if (list_empty(&device->resync_work.list))
442 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->resync_work); 446 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
447 &device->resync_work);
443} 448}
444 449
445static void fifo_set(struct fifo_buffer *fb, int value) 450static void fifo_set(struct fifo_buffer *fb, int value)
@@ -563,7 +568,8 @@ static int drbd_rs_number_requests(struct drbd_device *device)
563 568
564int w_make_resync_request(struct drbd_work *w, int cancel) 569int w_make_resync_request(struct drbd_work *w, int cancel)
565{ 570{
566 struct drbd_device *device = w->device; 571 struct drbd_device_work *dw = device_work(w);
572 struct drbd_device *device = dw->device;
567 unsigned long bit; 573 unsigned long bit;
568 sector_t sector; 574 sector_t sector;
569 const sector_t capacity = drbd_get_capacity(device->this_bdev); 575 const sector_t capacity = drbd_get_capacity(device->this_bdev);
@@ -727,7 +733,7 @@ next_sector:
727 733
728static int w_make_ov_request(struct drbd_work *w, int cancel) 734static int w_make_ov_request(struct drbd_work *w, int cancel)
729{ 735{
730 struct drbd_device *device = w->device; 736 struct drbd_device *device = device_work(w)->device;
731 int number, i, size; 737 int number, i, size;
732 sector_t sector; 738 sector_t sector;
733 const sector_t capacity = drbd_get_capacity(device->this_bdev); 739 const sector_t capacity = drbd_get_capacity(device->this_bdev);
@@ -781,8 +787,10 @@ static int w_make_ov_request(struct drbd_work *w, int cancel)
781 787
782int w_ov_finished(struct drbd_work *w, int cancel) 788int w_ov_finished(struct drbd_work *w, int cancel)
783{ 789{
784 struct drbd_device *device = w->device; 790 struct drbd_device_work *dw =
785 kfree(w); 791 container_of(w, struct drbd_device_work, w);
792 struct drbd_device *device = dw->device;
793 kfree(dw);
786 ov_out_of_sync_print(device); 794 ov_out_of_sync_print(device);
787 drbd_resync_finished(device); 795 drbd_resync_finished(device);
788 796
@@ -791,8 +799,10 @@ int w_ov_finished(struct drbd_work *w, int cancel)
791 799
792static int w_resync_finished(struct drbd_work *w, int cancel) 800static int w_resync_finished(struct drbd_work *w, int cancel)
793{ 801{
794 struct drbd_device *device = w->device; 802 struct drbd_device_work *dw =
795 kfree(w); 803 container_of(w, struct drbd_device_work, w);
804 struct drbd_device *device = dw->device;
805 kfree(dw);
796 806
797 drbd_resync_finished(device); 807 drbd_resync_finished(device);
798 808
@@ -814,7 +824,7 @@ int drbd_resync_finished(struct drbd_device *device)
814 unsigned long db, dt, dbdt; 824 unsigned long db, dt, dbdt;
815 unsigned long n_oos; 825 unsigned long n_oos;
816 union drbd_state os, ns; 826 union drbd_state os, ns;
817 struct drbd_work *w; 827 struct drbd_device_work *dw;
818 char *khelper_cmd = NULL; 828 char *khelper_cmd = NULL;
819 int verify_done = 0; 829 int verify_done = 0;
820 830
@@ -828,20 +838,21 @@ int drbd_resync_finished(struct drbd_device *device)
828 * is not finished by now). Retry in 100ms. */ 838 * is not finished by now). Retry in 100ms. */
829 839
830 schedule_timeout_interruptible(HZ / 10); 840 schedule_timeout_interruptible(HZ / 10);
831 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); 841 dw = kmalloc(sizeof(struct drbd_device_work), GFP_ATOMIC);
832 if (w) { 842 if (dw) {
833 w->cb = w_resync_finished; 843 dw->w.cb = w_resync_finished;
834 w->device = device; 844 dw->device = device;
835 drbd_queue_work(&first_peer_device(device)->connection->sender_work, w); 845 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
846 &dw->w);
836 return 1; 847 return 1;
837 } 848 }
838 drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); 849 drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n");
839 } 850 }
840 851
841 dt = (jiffies - device->rs_start - device->rs_paused) / HZ; 852 dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
842 if (dt <= 0) 853 if (dt <= 0)
843 dt = 1; 854 dt = 1;
844 855
845 db = device->rs_total; 856 db = device->rs_total;
846 /* adjust for verify start and stop sectors, respective reached position */ 857 /* adjust for verify start and stop sectors, respective reached position */
847 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) 858 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
@@ -972,7 +983,7 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
972 atomic_add(i, &device->pp_in_use_by_net); 983 atomic_add(i, &device->pp_in_use_by_net);
973 atomic_sub(i, &device->pp_in_use); 984 atomic_sub(i, &device->pp_in_use);
974 spin_lock_irq(&device->resource->req_lock); 985 spin_lock_irq(&device->resource->req_lock);
975 list_add_tail(&peer_req->w.list, &device->net_ee); 986 list_add_tail(&peer_req->dw.w.list, &device->net_ee);
976 spin_unlock_irq(&device->resource->req_lock); 987 spin_unlock_irq(&device->resource->req_lock);
977 wake_up(&drbd_pp_wait); 988 wake_up(&drbd_pp_wait);
978 } else 989 } else
@@ -987,8 +998,9 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
987 */ 998 */
988int w_e_end_data_req(struct drbd_work *w, int cancel) 999int w_e_end_data_req(struct drbd_work *w, int cancel)
989{ 1000{
990 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1001 struct drbd_device_work *dw = device_work(w);
991 struct drbd_device *device = w->device; 1002 struct drbd_peer_request *peer_req = container_of(dw, struct drbd_peer_request, dw);
1003 struct drbd_device *device = dw->device;
992 int err; 1004 int err;
993 1005
994 if (unlikely(cancel)) { 1006 if (unlikely(cancel)) {
@@ -1018,14 +1030,14 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
1018 1030
1019/** 1031/**
1020 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST 1032 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
1021 * @device: DRBD device.
1022 * @w: work object. 1033 * @w: work object.
1023 * @cancel: The connection will be closed anyways 1034 * @cancel: The connection will be closed anyways
1024 */ 1035 */
1025int w_e_end_rsdata_req(struct drbd_work *w, int cancel) 1036int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1026{ 1037{
1027 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1038 struct drbd_device_work *dw = device_work(w);
1028 struct drbd_device *device = w->device; 1039 struct drbd_peer_request *peer_req = container_of(dw, struct drbd_peer_request, dw);
1040 struct drbd_device *device = dw->device;
1029 int err; 1041 int err;
1030 1042
1031 if (unlikely(cancel)) { 1043 if (unlikely(cancel)) {
@@ -1073,8 +1085,9 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1073 1085
1074int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) 1086int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1075{ 1087{
1076 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1088 struct drbd_device_work *dw = device_work(w);
1077 struct drbd_device *device = w->device; 1089 struct drbd_peer_request *peer_req = container_of(dw, struct drbd_peer_request, dw);
1090 struct drbd_device *device = dw->device;
1078 struct digest_info *di; 1091 struct digest_info *di;
1079 int digest_size; 1092 int digest_size;
1080 void *digest = NULL; 1093 void *digest = NULL;
@@ -1136,8 +1149,9 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1136 1149
1137int w_e_end_ov_req(struct drbd_work *w, int cancel) 1150int w_e_end_ov_req(struct drbd_work *w, int cancel)
1138{ 1151{
1139 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1152 struct drbd_device_work *dw = device_work(w);
1140 struct drbd_device *device = w->device; 1153 struct drbd_peer_request *peer_req = container_of(dw, struct drbd_peer_request, dw);
1154 struct drbd_device *device = dw->device;
1141 sector_t sector = peer_req->i.sector; 1155 sector_t sector = peer_req->i.sector;
1142 unsigned int size = peer_req->i.size; 1156 unsigned int size = peer_req->i.size;
1143 int digest_size; 1157 int digest_size;
@@ -1192,8 +1206,9 @@ void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int
1192 1206
1193int w_e_end_ov_reply(struct drbd_work *w, int cancel) 1207int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1194{ 1208{
1195 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1209 struct drbd_device_work *dw = device_work(w);
1196 struct drbd_device *device = w->device; 1210 struct drbd_peer_request *peer_req = container_of(dw, struct drbd_peer_request, dw);
1211 struct drbd_device *device = dw->device;
1197 struct digest_info *di; 1212 struct digest_info *di;
1198 void *digest; 1213 void *digest;
1199 sector_t sector = peer_req->i.sector; 1214 sector_t sector = peer_req->i.sector;
@@ -1285,7 +1300,8 @@ static int drbd_send_barrier(struct drbd_connection *connection)
1285 1300
1286int w_send_write_hint(struct drbd_work *w, int cancel) 1301int w_send_write_hint(struct drbd_work *w, int cancel)
1287{ 1302{
1288 struct drbd_device *device = w->device; 1303 struct drbd_device *device =
1304 container_of(w, struct drbd_device, unplug_work);
1289 struct drbd_socket *sock; 1305 struct drbd_socket *sock;
1290 1306
1291 if (cancel) 1307 if (cancel)
@@ -1320,7 +1336,7 @@ static void maybe_send_barrier(struct drbd_connection *connection, unsigned int
1320int w_send_out_of_sync(struct drbd_work *w, int cancel) 1336int w_send_out_of_sync(struct drbd_work *w, int cancel)
1321{ 1337{
1322 struct drbd_request *req = container_of(w, struct drbd_request, w); 1338 struct drbd_request *req = container_of(w, struct drbd_request, w);
1323 struct drbd_device *device = w->device; 1339 struct drbd_device *device = req->device;
1324 struct drbd_connection *connection = first_peer_device(device)->connection; 1340 struct drbd_connection *connection = first_peer_device(device)->connection;
1325 int err; 1341 int err;
1326 1342
@@ -1343,14 +1359,13 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
1343 1359
1344/** 1360/**
1345 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request 1361 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1346 * @device: DRBD device.
1347 * @w: work object. 1362 * @w: work object.
1348 * @cancel: The connection will be closed anyways 1363 * @cancel: The connection will be closed anyways
1349 */ 1364 */
1350int w_send_dblock(struct drbd_work *w, int cancel) 1365int w_send_dblock(struct drbd_work *w, int cancel)
1351{ 1366{
1352 struct drbd_request *req = container_of(w, struct drbd_request, w); 1367 struct drbd_request *req = container_of(w, struct drbd_request, w);
1353 struct drbd_device *device = w->device; 1368 struct drbd_device *device = req->device;
1354 struct drbd_connection *connection = first_peer_device(device)->connection; 1369 struct drbd_connection *connection = first_peer_device(device)->connection;
1355 int err; 1370 int err;
1356 1371
@@ -1371,14 +1386,13 @@ int w_send_dblock(struct drbd_work *w, int cancel)
1371 1386
1372/** 1387/**
1373 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet 1388 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1374 * @device: DRBD device.
1375 * @w: work object. 1389 * @w: work object.
1376 * @cancel: The connection will be closed anyways 1390 * @cancel: The connection will be closed anyways
1377 */ 1391 */
1378int w_send_read_req(struct drbd_work *w, int cancel) 1392int w_send_read_req(struct drbd_work *w, int cancel)
1379{ 1393{
1380 struct drbd_request *req = container_of(w, struct drbd_request, w); 1394 struct drbd_request *req = container_of(w, struct drbd_request, w);
1381 struct drbd_device *device = w->device; 1395 struct drbd_device *device = req->device;
1382 struct drbd_connection *connection = first_peer_device(device)->connection; 1396 struct drbd_connection *connection = first_peer_device(device)->connection;
1383 int err; 1397 int err;
1384 1398
@@ -1402,7 +1416,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
1402int w_restart_disk_io(struct drbd_work *w, int cancel) 1416int w_restart_disk_io(struct drbd_work *w, int cancel)
1403{ 1417{
1404 struct drbd_request *req = container_of(w, struct drbd_request, w); 1418 struct drbd_request *req = container_of(w, struct drbd_request, w);
1405 struct drbd_device *device = w->device; 1419 struct drbd_device *device = req->device;
1406 1420
1407 if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) 1421 if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
1408 drbd_al_begin_io(device, &req->i, false); 1422 drbd_al_begin_io(device, &req->i, false);
@@ -1574,12 +1588,14 @@ void start_resync_timer_fn(unsigned long data)
1574{ 1588{
1575 struct drbd_device *device = (struct drbd_device *) data; 1589 struct drbd_device *device = (struct drbd_device *) data;
1576 1590
1577 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->start_resync_work); 1591 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
1592 &device->start_resync_work);
1578} 1593}
1579 1594
1580int w_start_resync(struct drbd_work *w, int cancel) 1595int w_start_resync(struct drbd_work *w, int cancel)
1581{ 1596{
1582 struct drbd_device *device = w->device; 1597 struct drbd_device *device =
1598 container_of(w, struct drbd_device, start_resync_work);
1583 1599
1584 if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) { 1600 if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) {
1585 drbd_warn(device, "w_start_resync later...\n"); 1601 drbd_warn(device, "w_start_resync later...\n");
@@ -1881,7 +1897,7 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
1881int drbd_worker(struct drbd_thread *thi) 1897int drbd_worker(struct drbd_thread *thi)
1882{ 1898{
1883 struct drbd_connection *connection = thi->connection; 1899 struct drbd_connection *connection = thi->connection;
1884 struct drbd_work *w = NULL; 1900 struct drbd_device_work *dw = NULL;
1885 struct drbd_peer_device *peer_device; 1901 struct drbd_peer_device *peer_device;
1886 LIST_HEAD(work_list); 1902 LIST_HEAD(work_list);
1887 int vnr; 1903 int vnr;
@@ -1907,9 +1923,9 @@ int drbd_worker(struct drbd_thread *thi)
1907 break; 1923 break;
1908 1924
1909 while (!list_empty(&work_list)) { 1925 while (!list_empty(&work_list)) {
1910 w = list_first_entry(&work_list, struct drbd_work, list); 1926 dw = list_first_entry(&work_list, struct drbd_device_work, w.list);
1911 list_del_init(&w->list); 1927 list_del_init(&dw->w.list);
1912 if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0) 1928 if (dw->w.cb(&dw->w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
1913 continue; 1929 continue;
1914 if (connection->cstate >= C_WF_REPORT_PARAMS) 1930 if (connection->cstate >= C_WF_REPORT_PARAMS)
1915 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); 1931 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
@@ -1918,9 +1934,9 @@ int drbd_worker(struct drbd_thread *thi)
1918 1934
1919 do { 1935 do {
1920 while (!list_empty(&work_list)) { 1936 while (!list_empty(&work_list)) {
1921 w = list_first_entry(&work_list, struct drbd_work, list); 1937 dw = list_first_entry(&work_list, struct drbd_device_work, w.list);
1922 list_del_init(&w->list); 1938 list_del_init(&dw->w.list);
1923 w->cb(w, 1); 1939 dw->w.cb(&dw->w, 1);
1924 } 1940 }
1925 dequeue_work_batch(&connection->sender_work, &work_list); 1941 dequeue_work_batch(&connection->sender_work, &work_list);
1926 } while (!list_empty(&work_list)); 1942 } while (!list_empty(&work_list));