aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_worker.c
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2011-01-19 08:16:30 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-08-29 05:30:15 -0400
commit87eeee41f8740451b61a1e7d37a494333a906861 (patch)
treef7b328aa626ed5153ec5cf5b9dbd94c23676b6a8 /drivers/block/drbd/drbd_worker.c
parent31890f4ab299c4116cf0a104ca9ce4f9ca2c5da0 (diff)
drbd: moved req_lock and transfer log from mdev to tconn
sed -i \ -e 's/mdev->req_lock/mdev->tconn->req_lock/g' \ -e 's/mdev->unused_spare_tle/mdev->tconn->unused_spare_tle/g' \ -e 's/mdev->newest_tle/mdev->tconn->newest_tle/g' \ -e 's/mdev->oldest_tle/mdev->tconn->oldest_tle/g' \ -e 's/mdev->out_of_sequence_requests/mdev->tconn->out_of_sequence_requests/g' \ *.[ch] Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_worker.c')
-rw-r--r--drivers/block/drbd/drbd_worker.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index ec26df378845..671251af6bcf 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -85,14 +85,14 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
85 unsigned long flags = 0; 85 unsigned long flags = 0;
86 struct drbd_conf *mdev = e->mdev; 86 struct drbd_conf *mdev = e->mdev;
87 87
88 spin_lock_irqsave(&mdev->req_lock, flags); 88 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
89 mdev->read_cnt += e->i.size >> 9; 89 mdev->read_cnt += e->i.size >> 9;
90 list_del(&e->w.list); 90 list_del(&e->w.list);
91 if (list_empty(&mdev->read_ee)) 91 if (list_empty(&mdev->read_ee))
92 wake_up(&mdev->ee_wait); 92 wake_up(&mdev->ee_wait);
93 if (test_bit(__EE_WAS_ERROR, &e->flags)) 93 if (test_bit(__EE_WAS_ERROR, &e->flags))
94 __drbd_chk_io_error(mdev, false); 94 __drbd_chk_io_error(mdev, false);
95 spin_unlock_irqrestore(&mdev->req_lock, flags); 95 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
96 96
97 drbd_queue_work(&mdev->tconn->data.work, &e->w); 97 drbd_queue_work(&mdev->tconn->data.work, &e->w);
98 put_ldev(mdev); 98 put_ldev(mdev);
@@ -117,7 +117,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
117 do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO; 117 do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
118 block_id = e->block_id; 118 block_id = e->block_id;
119 119
120 spin_lock_irqsave(&mdev->req_lock, flags); 120 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
121 mdev->writ_cnt += e->i.size >> 9; 121 mdev->writ_cnt += e->i.size >> 9;
122 list_del(&e->w.list); /* has been on active_ee or sync_ee */ 122 list_del(&e->w.list); /* has been on active_ee or sync_ee */
123 list_add_tail(&e->w.list, &mdev->done_ee); 123 list_add_tail(&e->w.list, &mdev->done_ee);
@@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
134 134
135 if (test_bit(__EE_WAS_ERROR, &e->flags)) 135 if (test_bit(__EE_WAS_ERROR, &e->flags))
136 __drbd_chk_io_error(mdev, false); 136 __drbd_chk_io_error(mdev, false);
137 spin_unlock_irqrestore(&mdev->req_lock, flags); 137 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
138 138
139 if (block_id == ID_SYNCER) 139 if (block_id == ID_SYNCER)
140 drbd_rs_complete_io(mdev, e_sector); 140 drbd_rs_complete_io(mdev, e_sector);
@@ -220,9 +220,9 @@ void drbd_endio_pri(struct bio *bio, int error)
220 req->private_bio = ERR_PTR(error); 220 req->private_bio = ERR_PTR(error);
221 221
222 /* not req_mod(), we need irqsave here! */ 222 /* not req_mod(), we need irqsave here! */
223 spin_lock_irqsave(&mdev->req_lock, flags); 223 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
224 __req_mod(req, what, &m); 224 __req_mod(req, what, &m);
225 spin_unlock_irqrestore(&mdev->req_lock, flags); 225 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
226 226
227 if (m.bio) 227 if (m.bio)
228 complete_master_bio(mdev, &m); 228 complete_master_bio(mdev, &m);
@@ -236,13 +236,13 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
236 * but try to WRITE the P_DATA_REPLY to the failed location, 236 * but try to WRITE the P_DATA_REPLY to the failed location,
237 * to give the disk the chance to relocate that block */ 237 * to give the disk the chance to relocate that block */
238 238
239 spin_lock_irq(&mdev->req_lock); 239 spin_lock_irq(&mdev->tconn->req_lock);
240 if (cancel || mdev->state.pdsk != D_UP_TO_DATE) { 240 if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
241 _req_mod(req, READ_RETRY_REMOTE_CANCELED); 241 _req_mod(req, READ_RETRY_REMOTE_CANCELED);
242 spin_unlock_irq(&mdev->req_lock); 242 spin_unlock_irq(&mdev->tconn->req_lock);
243 return 1; 243 return 1;
244 } 244 }
245 spin_unlock_irq(&mdev->req_lock); 245 spin_unlock_irq(&mdev->tconn->req_lock);
246 246
247 return w_send_read_req(mdev, w, 0); 247 return w_send_read_req(mdev, w, 0);
248} 248}
@@ -359,9 +359,9 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
359 goto defer; 359 goto defer;
360 360
361 e->w.cb = w_e_send_csum; 361 e->w.cb = w_e_send_csum;
362 spin_lock_irq(&mdev->req_lock); 362 spin_lock_irq(&mdev->tconn->req_lock);
363 list_add(&e->w.list, &mdev->read_ee); 363 list_add(&e->w.list, &mdev->read_ee);
364 spin_unlock_irq(&mdev->req_lock); 364 spin_unlock_irq(&mdev->tconn->req_lock);
365 365
366 atomic_add(size >> 9, &mdev->rs_sect_ev); 366 atomic_add(size >> 9, &mdev->rs_sect_ev);
367 if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) 367 if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
@@ -371,9 +371,9 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
371 * because bio_add_page failed (probably broken lower level driver), 371 * because bio_add_page failed (probably broken lower level driver),
372 * retry may or may not help. 372 * retry may or may not help.
373 * If it does not, you may need to force disconnect. */ 373 * If it does not, you may need to force disconnect. */
374 spin_lock_irq(&mdev->req_lock); 374 spin_lock_irq(&mdev->tconn->req_lock);
375 list_del(&e->w.list); 375 list_del(&e->w.list);
376 spin_unlock_irq(&mdev->req_lock); 376 spin_unlock_irq(&mdev->tconn->req_lock);
377 377
378 drbd_free_ee(mdev, e); 378 drbd_free_ee(mdev, e);
379defer: 379defer:
@@ -793,7 +793,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
793 793
794 ping_peer(mdev); 794 ping_peer(mdev);
795 795
796 spin_lock_irq(&mdev->req_lock); 796 spin_lock_irq(&mdev->tconn->req_lock);
797 os = mdev->state; 797 os = mdev->state;
798 798
799 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); 799 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
@@ -882,7 +882,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
882 882
883 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 883 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
884out_unlock: 884out_unlock:
885 spin_unlock_irq(&mdev->req_lock); 885 spin_unlock_irq(&mdev->tconn->req_lock);
886 put_ldev(mdev); 886 put_ldev(mdev);
887out: 887out:
888 mdev->rs_total = 0; 888 mdev->rs_total = 0;
@@ -907,9 +907,9 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent
907 int i = (e->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; 907 int i = (e->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
908 atomic_add(i, &mdev->pp_in_use_by_net); 908 atomic_add(i, &mdev->pp_in_use_by_net);
909 atomic_sub(i, &mdev->pp_in_use); 909 atomic_sub(i, &mdev->pp_in_use);
910 spin_lock_irq(&mdev->req_lock); 910 spin_lock_irq(&mdev->tconn->req_lock);
911 list_add_tail(&e->w.list, &mdev->net_ee); 911 list_add_tail(&e->w.list, &mdev->net_ee);
912 spin_unlock_irq(&mdev->req_lock); 912 spin_unlock_irq(&mdev->tconn->req_lock);
913 wake_up(&drbd_pp_wait); 913 wake_up(&drbd_pp_wait);
914 } else 914 } else
915 drbd_free_ee(mdev, e); 915 drbd_free_ee(mdev, e);
@@ -1210,10 +1210,10 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1210 * actually, this race was harmless, since we only try to send the 1210 * actually, this race was harmless, since we only try to send the
1211 * barrier packet here, and otherwise do nothing with the object. 1211 * barrier packet here, and otherwise do nothing with the object.
1212 * but compare with the head of w_clear_epoch */ 1212 * but compare with the head of w_clear_epoch */
1213 spin_lock_irq(&mdev->req_lock); 1213 spin_lock_irq(&mdev->tconn->req_lock);
1214 if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED) 1214 if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
1215 cancel = 1; 1215 cancel = 1;
1216 spin_unlock_irq(&mdev->req_lock); 1216 spin_unlock_irq(&mdev->tconn->req_lock);
1217 if (cancel) 1217 if (cancel)
1218 return 1; 1218 return 1;
1219 1219