diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-10 18:22:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-10 18:22:42 -0400 |
commit | 2f9e825d3e0e2b407ae8f082de5c00afcf7378fb (patch) | |
tree | f8b3ee40674ce4acd5508a0a0bf52a30904caf6c /drivers/block/drbd/drbd_receiver.c | |
parent | 7ae0dea900b027cd90e8a3e14deca9a19e17638b (diff) | |
parent | de75d60d5ea235e6e09f4962ab22541ce0fe176a (diff) |
Merge branch 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block: (149 commits)
block: make sure that REQ_* types are seen even with CONFIG_BLOCK=n
xen-blkfront: fix missing out label
blkdev: fix blkdev_issue_zeroout return value
block: update request stacking methods to support discards
block: fix missing export of blk_types.h
writeback: fix bad _bh spinlock nesting
drbd: revert "delay probes", feature is being re-implemented differently
drbd: Initialize all members of sync_conf to their defaults [Bugz 315]
drbd: Disable delay probes for the upcomming release
writeback: cleanup bdi_register
writeback: add new tracepoints
writeback: remove unnecessary init_timer call
writeback: optimize periodic bdi thread wakeups
writeback: prevent unnecessary bdi threads wakeups
writeback: move bdi threads exiting logic to the forker thread
writeback: restructure bdi forker loop a little
writeback: move last_active to bdi
writeback: do not remove bdi from bdi_list
writeback: simplify bdi code a little
writeback: do not lose wake-ups in bdi threads
...
Fixed up pretty trivial conflicts in drivers/block/virtio_blk.c and
drivers/scsi/scsi_error.c as per Jens.
Diffstat (limited to 'drivers/block/drbd/drbd_receiver.c')
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 135 |
1 files changed, 29 insertions, 106 deletions
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index ec1711f7c5c5..081522d3c742 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -1180,7 +1180,7 @@ next_bio: | |||
1180 | bio->bi_sector = sector; | 1180 | bio->bi_sector = sector; |
1181 | bio->bi_bdev = mdev->ldev->backing_bdev; | 1181 | bio->bi_bdev = mdev->ldev->backing_bdev; |
1182 | /* we special case some flags in the multi-bio case, see below | 1182 | /* we special case some flags in the multi-bio case, see below |
1183 | * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */ | 1183 | * (REQ_UNPLUG, REQ_HARDBARRIER) */ |
1184 | bio->bi_rw = rw; | 1184 | bio->bi_rw = rw; |
1185 | bio->bi_private = e; | 1185 | bio->bi_private = e; |
1186 | bio->bi_end_io = drbd_endio_sec; | 1186 | bio->bi_end_io = drbd_endio_sec; |
@@ -1209,16 +1209,16 @@ next_bio: | |||
1209 | bios = bios->bi_next; | 1209 | bios = bios->bi_next; |
1210 | bio->bi_next = NULL; | 1210 | bio->bi_next = NULL; |
1211 | 1211 | ||
1212 | /* strip off BIO_RW_UNPLUG unless it is the last bio */ | 1212 | /* strip off REQ_UNPLUG unless it is the last bio */ |
1213 | if (bios) | 1213 | if (bios) |
1214 | bio->bi_rw &= ~(1<<BIO_RW_UNPLUG); | 1214 | bio->bi_rw &= ~REQ_UNPLUG; |
1215 | 1215 | ||
1216 | drbd_generic_make_request(mdev, fault_type, bio); | 1216 | drbd_generic_make_request(mdev, fault_type, bio); |
1217 | 1217 | ||
1218 | /* strip off BIO_RW_BARRIER, | 1218 | /* strip off REQ_HARDBARRIER, |
1219 | * unless it is the first or last bio */ | 1219 | * unless it is the first or last bio */ |
1220 | if (bios && bios->bi_next) | 1220 | if (bios && bios->bi_next) |
1221 | bios->bi_rw &= ~(1<<BIO_RW_BARRIER); | 1221 | bios->bi_rw &= ~REQ_HARDBARRIER; |
1222 | } while (bios); | 1222 | } while (bios); |
1223 | maybe_kick_lo(mdev); | 1223 | maybe_kick_lo(mdev); |
1224 | return 0; | 1224 | return 0; |
@@ -1233,7 +1233,7 @@ fail: | |||
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | /** | 1235 | /** |
1236 | * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set | 1236 | * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set |
1237 | * @mdev: DRBD device. | 1237 | * @mdev: DRBD device. |
1238 | * @w: work object. | 1238 | * @w: work object. |
1239 | * @cancel: The connection will be closed anyways (unused in this callback) | 1239 | * @cancel: The connection will be closed anyways (unused in this callback) |
@@ -1245,7 +1245,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea | |||
1245 | (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) | 1245 | (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) |
1246 | so that we can finish that epoch in drbd_may_finish_epoch(). | 1246 | so that we can finish that epoch in drbd_may_finish_epoch(). |
1247 | That is necessary if we already have a long chain of Epochs, before | 1247 | That is necessary if we already have a long chain of Epochs, before |
1248 | we realize that BIO_RW_BARRIER is actually not supported */ | 1248 | we realize that REQ_HARDBARRIER is actually not supported */ |
1249 | 1249 | ||
1250 | /* As long as the -ENOTSUPP on the barrier is reported immediately | 1250 | /* As long as the -ENOTSUPP on the barrier is reported immediately |
1251 | that will never trigger. If it is reported late, we will just | 1251 | that will never trigger. If it is reported late, we will just |
@@ -1824,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) | |||
1824 | epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); | 1824 | epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); |
1825 | if (epoch == e->epoch) { | 1825 | if (epoch == e->epoch) { |
1826 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); | 1826 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); |
1827 | rw |= (1<<BIO_RW_BARRIER); | 1827 | rw |= REQ_HARDBARRIER; |
1828 | e->flags |= EE_IS_BARRIER; | 1828 | e->flags |= EE_IS_BARRIER; |
1829 | } else { | 1829 | } else { |
1830 | if (atomic_read(&epoch->epoch_size) > 1 || | 1830 | if (atomic_read(&epoch->epoch_size) > 1 || |
1831 | !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { | 1831 | !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { |
1832 | set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); | 1832 | set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); |
1833 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); | 1833 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); |
1834 | rw |= (1<<BIO_RW_BARRIER); | 1834 | rw |= REQ_HARDBARRIER; |
1835 | e->flags |= EE_IS_BARRIER; | 1835 | e->flags |= EE_IS_BARRIER; |
1836 | } | 1836 | } |
1837 | } | 1837 | } |
@@ -1841,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) | |||
1841 | dp_flags = be32_to_cpu(p->dp_flags); | 1841 | dp_flags = be32_to_cpu(p->dp_flags); |
1842 | if (dp_flags & DP_HARDBARRIER) { | 1842 | if (dp_flags & DP_HARDBARRIER) { |
1843 | dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); | 1843 | dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); |
1844 | /* rw |= (1<<BIO_RW_BARRIER); */ | 1844 | /* rw |= REQ_HARDBARRIER; */ |
1845 | } | 1845 | } |
1846 | if (dp_flags & DP_RW_SYNC) | 1846 | if (dp_flags & DP_RW_SYNC) |
1847 | rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); | 1847 | rw |= REQ_SYNC | REQ_UNPLUG; |
1848 | if (dp_flags & DP_MAY_SET_IN_SYNC) | 1848 | if (dp_flags & DP_MAY_SET_IN_SYNC) |
1849 | e->flags |= EE_MAY_SET_IN_SYNC; | 1849 | e->flags |= EE_MAY_SET_IN_SYNC; |
1850 | 1850 | ||
@@ -3555,14 +3555,15 @@ static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h) | |||
3555 | return ok; | 3555 | return ok; |
3556 | } | 3556 | } |
3557 | 3557 | ||
3558 | static int receive_skip(struct drbd_conf *mdev, struct p_header *h) | 3558 | static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent) |
3559 | { | 3559 | { |
3560 | /* TODO zero copy sink :) */ | 3560 | /* TODO zero copy sink :) */ |
3561 | static char sink[128]; | 3561 | static char sink[128]; |
3562 | int size, want, r; | 3562 | int size, want, r; |
3563 | 3563 | ||
3564 | dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", | 3564 | if (!silent) |
3565 | h->command, h->length); | 3565 | dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", |
3566 | h->command, h->length); | ||
3566 | 3567 | ||
3567 | size = h->length; | 3568 | size = h->length; |
3568 | while (size > 0) { | 3569 | while (size > 0) { |
@@ -3574,101 +3575,25 @@ static int receive_skip(struct drbd_conf *mdev, struct p_header *h) | |||
3574 | return size == 0; | 3575 | return size == 0; |
3575 | } | 3576 | } |
3576 | 3577 | ||
3577 | static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h) | 3578 | static int receive_skip(struct drbd_conf *mdev, struct p_header *h) |
3578 | { | ||
3579 | if (mdev->state.disk >= D_INCONSISTENT) | ||
3580 | drbd_kick_lo(mdev); | ||
3581 | |||
3582 | /* Make sure we've acked all the TCP data associated | ||
3583 | * with the data requests being unplugged */ | ||
3584 | drbd_tcp_quickack(mdev->data.socket); | ||
3585 | |||
3586 | return TRUE; | ||
3587 | } | ||
3588 | |||
3589 | static void timeval_sub_us(struct timeval* tv, unsigned int us) | ||
3590 | { | 3579 | { |
3591 | tv->tv_sec -= us / 1000000; | 3580 | return receive_skip_(mdev, h, 0); |
3592 | us = us % 1000000; | ||
3593 | if (tv->tv_usec > us) { | ||
3594 | tv->tv_usec += 1000000; | ||
3595 | tv->tv_sec--; | ||
3596 | } | ||
3597 | tv->tv_usec -= us; | ||
3598 | } | 3581 | } |
3599 | 3582 | ||
3600 | static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p) | 3583 | static int receive_skip_silent(struct drbd_conf *mdev, struct p_header *h) |
3601 | { | 3584 | { |
3602 | struct delay_probe *dp; | 3585 | return receive_skip_(mdev, h, 1); |
3603 | struct list_head *le; | ||
3604 | struct timeval now; | ||
3605 | int seq_num; | ||
3606 | int offset; | ||
3607 | int data_delay; | ||
3608 | |||
3609 | seq_num = be32_to_cpu(p->seq_num); | ||
3610 | offset = be32_to_cpu(p->offset); | ||
3611 | |||
3612 | spin_lock(&mdev->peer_seq_lock); | ||
3613 | if (!list_empty(&mdev->delay_probes)) { | ||
3614 | if (from == USE_DATA_SOCKET) | ||
3615 | le = mdev->delay_probes.next; | ||
3616 | else | ||
3617 | le = mdev->delay_probes.prev; | ||
3618 | |||
3619 | dp = list_entry(le, struct delay_probe, list); | ||
3620 | |||
3621 | if (dp->seq_num == seq_num) { | ||
3622 | list_del(le); | ||
3623 | spin_unlock(&mdev->peer_seq_lock); | ||
3624 | do_gettimeofday(&now); | ||
3625 | timeval_sub_us(&now, offset); | ||
3626 | data_delay = | ||
3627 | now.tv_usec - dp->time.tv_usec + | ||
3628 | (now.tv_sec - dp->time.tv_sec) * 1000000; | ||
3629 | |||
3630 | if (data_delay > 0) | ||
3631 | mdev->data_delay = data_delay; | ||
3632 | |||
3633 | kfree(dp); | ||
3634 | return; | ||
3635 | } | ||
3636 | |||
3637 | if (dp->seq_num > seq_num) { | ||
3638 | spin_unlock(&mdev->peer_seq_lock); | ||
3639 | dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n"); | ||
3640 | return; /* Do not alloca a struct delay_probe.... */ | ||
3641 | } | ||
3642 | } | ||
3643 | spin_unlock(&mdev->peer_seq_lock); | ||
3644 | |||
3645 | dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO); | ||
3646 | if (!dp) { | ||
3647 | dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n"); | ||
3648 | return; | ||
3649 | } | ||
3650 | |||
3651 | dp->seq_num = seq_num; | ||
3652 | do_gettimeofday(&dp->time); | ||
3653 | timeval_sub_us(&dp->time, offset); | ||
3654 | |||
3655 | spin_lock(&mdev->peer_seq_lock); | ||
3656 | if (from == USE_DATA_SOCKET) | ||
3657 | list_add(&dp->list, &mdev->delay_probes); | ||
3658 | else | ||
3659 | list_add_tail(&dp->list, &mdev->delay_probes); | ||
3660 | spin_unlock(&mdev->peer_seq_lock); | ||
3661 | } | 3586 | } |
3662 | 3587 | ||
3663 | static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h) | 3588 | static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h) |
3664 | { | 3589 | { |
3665 | struct p_delay_probe *p = (struct p_delay_probe *)h; | 3590 | if (mdev->state.disk >= D_INCONSISTENT) |
3591 | drbd_kick_lo(mdev); | ||
3666 | 3592 | ||
3667 | ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; | 3593 | /* Make sure we've acked all the TCP data associated |
3668 | if (drbd_recv(mdev, h->payload, h->length) != h->length) | 3594 | * with the data requests being unplugged */ |
3669 | return FALSE; | 3595 | drbd_tcp_quickack(mdev->data.socket); |
3670 | 3596 | ||
3671 | got_delay_probe(mdev, USE_DATA_SOCKET, p); | ||
3672 | return TRUE; | 3597 | return TRUE; |
3673 | } | 3598 | } |
3674 | 3599 | ||
@@ -3695,7 +3620,7 @@ static drbd_cmd_handler_f drbd_default_handler[] = { | |||
3695 | [P_OV_REQUEST] = receive_DataRequest, | 3620 | [P_OV_REQUEST] = receive_DataRequest, |
3696 | [P_OV_REPLY] = receive_DataRequest, | 3621 | [P_OV_REPLY] = receive_DataRequest, |
3697 | [P_CSUM_RS_REQUEST] = receive_DataRequest, | 3622 | [P_CSUM_RS_REQUEST] = receive_DataRequest, |
3698 | [P_DELAY_PROBE] = receive_delay_probe, | 3623 | [P_DELAY_PROBE] = receive_skip_silent, |
3699 | /* anything missing from this table is in | 3624 | /* anything missing from this table is in |
3700 | * the asender_tbl, see get_asender_cmd */ | 3625 | * the asender_tbl, see get_asender_cmd */ |
3701 | [P_MAX_CMD] = NULL, | 3626 | [P_MAX_CMD] = NULL, |
@@ -4472,11 +4397,9 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h) | |||
4472 | return TRUE; | 4397 | return TRUE; |
4473 | } | 4398 | } |
4474 | 4399 | ||
4475 | static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h) | 4400 | static int got_something_to_ignore_m(struct drbd_conf *mdev, struct p_header *h) |
4476 | { | 4401 | { |
4477 | struct p_delay_probe *p = (struct p_delay_probe *)h; | 4402 | /* IGNORE */ |
4478 | |||
4479 | got_delay_probe(mdev, USE_META_SOCKET, p); | ||
4480 | return TRUE; | 4403 | return TRUE; |
4481 | } | 4404 | } |
4482 | 4405 | ||
@@ -4504,7 +4427,7 @@ static struct asender_cmd *get_asender_cmd(int cmd) | |||
4504 | [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, | 4427 | [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, |
4505 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, | 4428 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, |
4506 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, | 4429 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, |
4507 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_delay_probe_m }, | 4430 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_something_to_ignore_m }, |
4508 | [P_MAX_CMD] = { 0, NULL }, | 4431 | [P_MAX_CMD] = { 0, NULL }, |
4509 | }; | 4432 | }; |
4510 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) | 4433 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) |