aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/drbd')
-rw-r--r--drivers/block/drbd/drbd_actlog.c49
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_req.c70
3 files changed, 107 insertions, 14 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ff03f9053316..6afe173d5c2b 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -359,6 +359,55 @@ void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool dele
359 drbd_al_begin_io_commit(mdev, delegate); 359 drbd_al_begin_io_commit(mdev, delegate);
360} 360}
361 361
362int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
363{
364 struct lru_cache *al = mdev->act_log;
365 /* for bios crossing activity log extent boundaries,
366 * we may need to activate two extents in one go */
367 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
368 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
369 unsigned nr_al_extents;
370 unsigned available_update_slots;
371 unsigned enr;
372
373 D_ASSERT(first <= last);
374
375 nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
376 available_update_slots = min(al->nr_elements - al->used,
377 al->max_pending_changes - al->pending_changes);
378
379 /* We want all necessary updates for a given request within the same transaction
380 * We could first check how many updates are *actually* needed,
381 * and use that instead of the worst-case nr_al_extents */
382 if (available_update_slots < nr_al_extents)
383 return -EWOULDBLOCK;
384
385 /* Is resync active in this area? */
386 for (enr = first; enr <= last; enr++) {
387 struct lc_element *tmp;
388 tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
389 if (unlikely(tmp != NULL)) {
390 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
391 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
392 if (!test_and_set_bit(BME_PRIORITY, &bm_ext->flags));
393 return -EBUSY;
394 return -EWOULDBLOCK;
395 }
396 }
397 }
398
399 /* Checkout the refcounts.
400 * Given that we checked for available elements and update slots above,
401 * this has to be successful. */
402 for (enr = first; enr <= last; enr++) {
403 struct lc_element *al_ext;
404 al_ext = lc_get_cumulative(mdev->act_log, enr);
405 if (!al_ext)
406 dev_info(DEV, "LOGIC BUG for enr=%u\n", enr);
407 }
408 return 0;
409}
410
362void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i) 411void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
363{ 412{
364 /* for bios crossing activity log extent boundaries, 413 /* for bios crossing activity log extent boundaries,
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index b7b52dd42325..f943aacfdad8 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1611,6 +1611,8 @@ extern const char *drbd_conn_str(enum drbd_conns s);
1611extern const char *drbd_role_str(enum drbd_role s); 1611extern const char *drbd_role_str(enum drbd_role s);
1612 1612
1613/* drbd_actlog.c */ 1613/* drbd_actlog.c */
1614extern int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i);
1615extern void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate);
1614extern bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i); 1616extern bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i);
1615extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate); 1617extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate);
1616extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i); 1618extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 43bc1d064bc7..b923d41678e1 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1164,32 +1164,74 @@ void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long
1164 drbd_send_and_submit(mdev, req); 1164 drbd_send_and_submit(mdev, req);
1165} 1165}
1166 1166
1167void __drbd_make_request_from_worker(struct drbd_conf *mdev, struct drbd_request *req) 1167static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming)
1168{ 1168{
1169 const int rw = bio_rw(req->master_bio); 1169 struct drbd_request *req, *tmp;
1170 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1171 const int rw = bio_data_dir(req->master_bio);
1170 1172
1171 if (rw == WRITE && req->private_bio && req->i.size 1173 if (rw == WRITE /* rw != WRITE should not even end up here! */
1172 && !test_bit(AL_SUSPENDED, &mdev->flags)) { 1174 && req->private_bio && req->i.size
1173 drbd_al_begin_io(mdev, &req->i, false); 1175 && !test_bit(AL_SUSPENDED, &mdev->flags)) {
1174 req->rq_state |= RQ_IN_ACT_LOG; 1176 if (!drbd_al_begin_io_fastpath(mdev, &req->i))
1177 continue;
1178
1179 req->rq_state |= RQ_IN_ACT_LOG;
1180 }
1181
1182 list_del_init(&req->tl_requests);
1183 drbd_send_and_submit(mdev, req);
1175 } 1184 }
1176 drbd_send_and_submit(mdev, req);
1177} 1185}
1178 1186
1187static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
1188 struct list_head *incoming,
1189 struct list_head *pending)
1190{
1191 struct drbd_request *req, *tmp;
1192 int wake = 0;
1193 int err;
1194
1195 spin_lock_irq(&mdev->al_lock);
1196 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1197 err = drbd_al_begin_io_nonblock(mdev, &req->i);
1198 if (err == -EBUSY)
1199 wake = 1;
1200 if (err)
1201 continue;
1202 req->rq_state |= RQ_IN_ACT_LOG;
1203 list_move_tail(&req->tl_requests, pending);
1204 }
1205 spin_unlock_irq(&mdev->al_lock);
1206 if (wake)
1207 wake_up(&mdev->al_wait);
1208
1209 return !list_empty(pending);
1210}
1179 1211
1180void do_submit(struct work_struct *ws) 1212void do_submit(struct work_struct *ws)
1181{ 1213{
1182 struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker); 1214 struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker);
1183 LIST_HEAD(writes); 1215 LIST_HEAD(incoming);
1216 LIST_HEAD(pending);
1184 struct drbd_request *req, *tmp; 1217 struct drbd_request *req, *tmp;
1185 1218
1186 spin_lock(&mdev->submit.lock); 1219 for (;;) {
1187 list_splice_init(&mdev->submit.writes, &writes); 1220 spin_lock(&mdev->submit.lock);
1188 spin_unlock(&mdev->submit.lock); 1221 list_splice_tail_init(&mdev->submit.writes, &incoming);
1222 spin_unlock(&mdev->submit.lock);
1189 1223
1190 list_for_each_entry_safe(req, tmp, &writes, tl_requests) { 1224 submit_fast_path(mdev, &incoming);
1191 list_del_init(&req->tl_requests); 1225 if (list_empty(&incoming))
1192 __drbd_make_request_from_worker(mdev, req); 1226 break;
1227
1228 wait_event(mdev->al_wait, prepare_al_transaction_nonblock(mdev, &incoming, &pending));
1229 drbd_al_begin_io_commit(mdev, false);
1230
1231 list_for_each_entry_safe(req, tmp, &pending, tl_requests) {
1232 list_del_init(&req->tl_requests);
1233 drbd_send_and_submit(mdev, req);
1234 }
1193 } 1235 }
1194} 1236}
1195 1237