diff options
author | Philipp Reisner <philipp.reisner@linbit.com> | 2011-01-19 07:55:45 -0500 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2011-08-29 05:27:05 -0400 |
commit | e42325a57606396539807ff55c24febda39f8d01 (patch) | |
tree | 7bb6df217de57d3445968a082f9e2c4a7ecba659 /drivers/block/drbd/drbd_worker.c | |
parent | b2fb6dbe52dafa3cd18e0665937a0ebcc0892b92 (diff) |
drbd: moved data and meta from mdev to tconn
Patch mostly:
sed -i -e 's/mdev->data/mdev->tconn->data/g' \
-e 's/mdev->meta/mdev->tconn->meta/g' \
*.[ch]
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_worker.c')
-rw-r--r-- | drivers/block/drbd/drbd_worker.c | 66 |
1 files changed, 33 insertions, 33 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index d8c61816d103..9b1e2bad5fbd 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -94,7 +94,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) | |||
94 | __drbd_chk_io_error(mdev, false); | 94 | __drbd_chk_io_error(mdev, false); |
95 | spin_unlock_irqrestore(&mdev->req_lock, flags); | 95 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
96 | 96 | ||
97 | drbd_queue_work(&mdev->data.work, &e->w); | 97 | drbd_queue_work(&mdev->tconn->data.work, &e->w); |
98 | put_ldev(mdev); | 98 | put_ldev(mdev); |
99 | } | 99 | } |
100 | 100 | ||
@@ -400,7 +400,7 @@ void resync_timer_fn(unsigned long data) | |||
400 | struct drbd_conf *mdev = (struct drbd_conf *) data; | 400 | struct drbd_conf *mdev = (struct drbd_conf *) data; |
401 | 401 | ||
402 | if (list_empty(&mdev->resync_work.list)) | 402 | if (list_empty(&mdev->resync_work.list)) |
403 | drbd_queue_work(&mdev->data.work, &mdev->resync_work); | 403 | drbd_queue_work(&mdev->tconn->data.work, &mdev->resync_work); |
404 | } | 404 | } |
405 | 405 | ||
406 | static void fifo_set(struct fifo_buffer *fb, int value) | 406 | static void fifo_set(struct fifo_buffer *fb, int value) |
@@ -538,15 +538,15 @@ static int w_make_resync_request(struct drbd_conf *mdev, | |||
538 | 538 | ||
539 | for (i = 0; i < number; i++) { | 539 | for (i = 0; i < number; i++) { |
540 | /* Stop generating RS requests, when half of the send buffer is filled */ | 540 | /* Stop generating RS requests, when half of the send buffer is filled */ |
541 | mutex_lock(&mdev->data.mutex); | 541 | mutex_lock(&mdev->tconn->data.mutex); |
542 | if (mdev->data.socket) { | 542 | if (mdev->tconn->data.socket) { |
543 | queued = mdev->data.socket->sk->sk_wmem_queued; | 543 | queued = mdev->tconn->data.socket->sk->sk_wmem_queued; |
544 | sndbuf = mdev->data.socket->sk->sk_sndbuf; | 544 | sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf; |
545 | } else { | 545 | } else { |
546 | queued = 1; | 546 | queued = 1; |
547 | sndbuf = 0; | 547 | sndbuf = 0; |
548 | } | 548 | } |
549 | mutex_unlock(&mdev->data.mutex); | 549 | mutex_unlock(&mdev->tconn->data.mutex); |
550 | if (queued > sndbuf / 2) | 550 | if (queued > sndbuf / 2) |
551 | goto requeue; | 551 | goto requeue; |
552 | 552 | ||
@@ -710,7 +710,7 @@ void start_resync_timer_fn(unsigned long data) | |||
710 | { | 710 | { |
711 | struct drbd_conf *mdev = (struct drbd_conf *) data; | 711 | struct drbd_conf *mdev = (struct drbd_conf *) data; |
712 | 712 | ||
713 | drbd_queue_work(&mdev->data.work, &mdev->start_resync_work); | 713 | drbd_queue_work(&mdev->tconn->data.work, &mdev->start_resync_work); |
714 | } | 714 | } |
715 | 715 | ||
716 | int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | 716 | int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
@@ -775,7 +775,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
775 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); | 775 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); |
776 | if (w) { | 776 | if (w) { |
777 | w->cb = w_resync_finished; | 777 | w->cb = w_resync_finished; |
778 | drbd_queue_work(&mdev->data.work, w); | 778 | drbd_queue_work(&mdev->tconn->data.work, w); |
779 | return 1; | 779 | return 1; |
780 | } | 780 | } |
781 | dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); | 781 | dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); |
@@ -1202,7 +1202,7 @@ int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1202 | int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | 1202 | int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
1203 | { | 1203 | { |
1204 | struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w); | 1204 | struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w); |
1205 | struct p_barrier *p = &mdev->data.sbuf.barrier; | 1205 | struct p_barrier *p = &mdev->tconn->data.sbuf.barrier; |
1206 | int ok = 1; | 1206 | int ok = 1; |
1207 | 1207 | ||
1208 | /* really avoid racing with tl_clear. w.cb may have been referenced | 1208 | /* really avoid racing with tl_clear. w.cb may have been referenced |
@@ -1223,7 +1223,7 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1223 | /* inc_ap_pending was done where this was queued. | 1223 | /* inc_ap_pending was done where this was queued. |
1224 | * dec_ap_pending will be done in got_BarrierAck | 1224 | * dec_ap_pending will be done in got_BarrierAck |
1225 | * or (on connection loss) in w_clear_epoch. */ | 1225 | * or (on connection loss) in w_clear_epoch. */ |
1226 | ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER, | 1226 | ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER, |
1227 | (struct p_header80 *)p, sizeof(*p), 0); | 1227 | (struct p_header80 *)p, sizeof(*p), 0); |
1228 | drbd_put_data_sock(mdev); | 1228 | drbd_put_data_sock(mdev); |
1229 | 1229 | ||
@@ -1621,18 +1621,18 @@ int drbd_worker(struct drbd_thread *thi) | |||
1621 | while (get_t_state(thi) == RUNNING) { | 1621 | while (get_t_state(thi) == RUNNING) { |
1622 | drbd_thread_current_set_cpu(mdev); | 1622 | drbd_thread_current_set_cpu(mdev); |
1623 | 1623 | ||
1624 | if (down_trylock(&mdev->data.work.s)) { | 1624 | if (down_trylock(&mdev->tconn->data.work.s)) { |
1625 | mutex_lock(&mdev->data.mutex); | 1625 | mutex_lock(&mdev->tconn->data.mutex); |
1626 | if (mdev->data.socket && !mdev->tconn->net_conf->no_cork) | 1626 | if (mdev->tconn->data.socket && !mdev->tconn->net_conf->no_cork) |
1627 | drbd_tcp_uncork(mdev->data.socket); | 1627 | drbd_tcp_uncork(mdev->tconn->data.socket); |
1628 | mutex_unlock(&mdev->data.mutex); | 1628 | mutex_unlock(&mdev->tconn->data.mutex); |
1629 | 1629 | ||
1630 | intr = down_interruptible(&mdev->data.work.s); | 1630 | intr = down_interruptible(&mdev->tconn->data.work.s); |
1631 | 1631 | ||
1632 | mutex_lock(&mdev->data.mutex); | 1632 | mutex_lock(&mdev->tconn->data.mutex); |
1633 | if (mdev->data.socket && !mdev->tconn->net_conf->no_cork) | 1633 | if (mdev->tconn->data.socket && !mdev->tconn->net_conf->no_cork) |
1634 | drbd_tcp_cork(mdev->data.socket); | 1634 | drbd_tcp_cork(mdev->tconn->data.socket); |
1635 | mutex_unlock(&mdev->data.mutex); | 1635 | mutex_unlock(&mdev->tconn->data.mutex); |
1636 | } | 1636 | } |
1637 | 1637 | ||
1638 | if (intr) { | 1638 | if (intr) { |
@@ -1650,8 +1650,8 @@ int drbd_worker(struct drbd_thread *thi) | |||
1650 | this... */ | 1650 | this... */ |
1651 | 1651 | ||
1652 | w = NULL; | 1652 | w = NULL; |
1653 | spin_lock_irq(&mdev->data.work.q_lock); | 1653 | spin_lock_irq(&mdev->tconn->data.work.q_lock); |
1654 | if (!expect(!list_empty(&mdev->data.work.q))) { | 1654 | if (!expect(!list_empty(&mdev->tconn->data.work.q))) { |
1655 | /* something terribly wrong in our logic. | 1655 | /* something terribly wrong in our logic. |
1656 | * we were able to down() the semaphore, | 1656 | * we were able to down() the semaphore, |
1657 | * but the list is empty... doh. | 1657 | * but the list is empty... doh. |
@@ -1663,12 +1663,12 @@ int drbd_worker(struct drbd_thread *thi) | |||
1663 | * | 1663 | * |
1664 | * I'll try to get away just starting over this loop. | 1664 | * I'll try to get away just starting over this loop. |
1665 | */ | 1665 | */ |
1666 | spin_unlock_irq(&mdev->data.work.q_lock); | 1666 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); |
1667 | continue; | 1667 | continue; |
1668 | } | 1668 | } |
1669 | w = list_entry(mdev->data.work.q.next, struct drbd_work, list); | 1669 | w = list_entry(mdev->tconn->data.work.q.next, struct drbd_work, list); |
1670 | list_del_init(&w->list); | 1670 | list_del_init(&w->list); |
1671 | spin_unlock_irq(&mdev->data.work.q_lock); | 1671 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); |
1672 | 1672 | ||
1673 | if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) { | 1673 | if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) { |
1674 | /* dev_warn(DEV, "worker: a callback failed! \n"); */ | 1674 | /* dev_warn(DEV, "worker: a callback failed! \n"); */ |
@@ -1680,11 +1680,11 @@ int drbd_worker(struct drbd_thread *thi) | |||
1680 | D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags)); | 1680 | D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags)); |
1681 | D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags)); | 1681 | D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags)); |
1682 | 1682 | ||
1683 | spin_lock_irq(&mdev->data.work.q_lock); | 1683 | spin_lock_irq(&mdev->tconn->data.work.q_lock); |
1684 | i = 0; | 1684 | i = 0; |
1685 | while (!list_empty(&mdev->data.work.q)) { | 1685 | while (!list_empty(&mdev->tconn->data.work.q)) { |
1686 | list_splice_init(&mdev->data.work.q, &work_list); | 1686 | list_splice_init(&mdev->tconn->data.work.q, &work_list); |
1687 | spin_unlock_irq(&mdev->data.work.q_lock); | 1687 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); |
1688 | 1688 | ||
1689 | while (!list_empty(&work_list)) { | 1689 | while (!list_empty(&work_list)) { |
1690 | w = list_entry(work_list.next, struct drbd_work, list); | 1690 | w = list_entry(work_list.next, struct drbd_work, list); |
@@ -1693,15 +1693,15 @@ int drbd_worker(struct drbd_thread *thi) | |||
1693 | i++; /* dead debugging code */ | 1693 | i++; /* dead debugging code */ |
1694 | } | 1694 | } |
1695 | 1695 | ||
1696 | spin_lock_irq(&mdev->data.work.q_lock); | 1696 | spin_lock_irq(&mdev->tconn->data.work.q_lock); |
1697 | } | 1697 | } |
1698 | sema_init(&mdev->data.work.s, 0); | 1698 | sema_init(&mdev->tconn->data.work.s, 0); |
1699 | /* DANGEROUS race: if someone did queue his work within the spinlock, | 1699 | /* DANGEROUS race: if someone did queue his work within the spinlock, |
1700 | * but up() ed outside the spinlock, we could get an up() on the | 1700 | * but up() ed outside the spinlock, we could get an up() on the |
1701 | * semaphore without corresponding list entry. | 1701 | * semaphore without corresponding list entry. |
1702 | * So don't do that. | 1702 | * So don't do that. |
1703 | */ | 1703 | */ |
1704 | spin_unlock_irq(&mdev->data.work.q_lock); | 1704 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); |
1705 | 1705 | ||
1706 | D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE); | 1706 | D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE); |
1707 | /* _drbd_set_state only uses stop_nowait. | 1707 | /* _drbd_set_state only uses stop_nowait. |