diff options
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r-- | drivers/md/md.c | 72 |
1 files changed, 43 insertions, 29 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index 08fcaebc61bd..0ff1bbf6c90e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock); | |||
266 | * call has finished, the bio has been linked into some internal structure | 266 | * call has finished, the bio has been linked into some internal structure |
267 | * and so is visible to ->quiesce(), so we don't need the refcount any more. | 267 | * and so is visible to ->quiesce(), so we don't need the refcount any more. |
268 | */ | 268 | */ |
269 | void md_handle_request(struct mddev *mddev, struct bio *bio) | ||
270 | { | ||
271 | check_suspended: | ||
272 | rcu_read_lock(); | ||
273 | if (mddev->suspended) { | ||
274 | DEFINE_WAIT(__wait); | ||
275 | for (;;) { | ||
276 | prepare_to_wait(&mddev->sb_wait, &__wait, | ||
277 | TASK_UNINTERRUPTIBLE); | ||
278 | if (!mddev->suspended) | ||
279 | break; | ||
280 | rcu_read_unlock(); | ||
281 | schedule(); | ||
282 | rcu_read_lock(); | ||
283 | } | ||
284 | finish_wait(&mddev->sb_wait, &__wait); | ||
285 | } | ||
286 | atomic_inc(&mddev->active_io); | ||
287 | rcu_read_unlock(); | ||
288 | |||
289 | if (!mddev->pers->make_request(mddev, bio)) { | ||
290 | atomic_dec(&mddev->active_io); | ||
291 | wake_up(&mddev->sb_wait); | ||
292 | goto check_suspended; | ||
293 | } | ||
294 | |||
295 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) | ||
296 | wake_up(&mddev->sb_wait); | ||
297 | } | ||
298 | EXPORT_SYMBOL(md_handle_request); | ||
299 | |||
269 | static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) | 300 | static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) |
270 | { | 301 | { |
271 | const int rw = bio_data_dir(bio); | 302 | const int rw = bio_data_dir(bio); |
@@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) | |||
285 | bio_endio(bio); | 316 | bio_endio(bio); |
286 | return BLK_QC_T_NONE; | 317 | return BLK_QC_T_NONE; |
287 | } | 318 | } |
288 | check_suspended: | ||
289 | rcu_read_lock(); | ||
290 | if (mddev->suspended) { | ||
291 | DEFINE_WAIT(__wait); | ||
292 | for (;;) { | ||
293 | prepare_to_wait(&mddev->sb_wait, &__wait, | ||
294 | TASK_UNINTERRUPTIBLE); | ||
295 | if (!mddev->suspended) | ||
296 | break; | ||
297 | rcu_read_unlock(); | ||
298 | schedule(); | ||
299 | rcu_read_lock(); | ||
300 | } | ||
301 | finish_wait(&mddev->sb_wait, &__wait); | ||
302 | } | ||
303 | atomic_inc(&mddev->active_io); | ||
304 | rcu_read_unlock(); | ||
305 | 319 | ||
306 | /* | 320 | /* |
307 | * save the sectors now since our bio can | 321 | * save the sectors now since our bio can |
@@ -310,20 +324,14 @@ check_suspended: | |||
310 | sectors = bio_sectors(bio); | 324 | sectors = bio_sectors(bio); |
311 | /* bio could be mergeable after passing to underlayer */ | 325 | /* bio could be mergeable after passing to underlayer */ |
312 | bio->bi_opf &= ~REQ_NOMERGE; | 326 | bio->bi_opf &= ~REQ_NOMERGE; |
313 | if (!mddev->pers->make_request(mddev, bio)) { | 327 | |
314 | atomic_dec(&mddev->active_io); | 328 | md_handle_request(mddev, bio); |
315 | wake_up(&mddev->sb_wait); | ||
316 | goto check_suspended; | ||
317 | } | ||
318 | 329 | ||
319 | cpu = part_stat_lock(); | 330 | cpu = part_stat_lock(); |
320 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | 331 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
321 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); | 332 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); |
322 | part_stat_unlock(); | 333 | part_stat_unlock(); |
323 | 334 | ||
324 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) | ||
325 | wake_up(&mddev->sb_wait); | ||
326 | |||
327 | return BLK_QC_T_NONE; | 335 | return BLK_QC_T_NONE; |
328 | } | 336 | } |
329 | 337 | ||
@@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws) | |||
439 | struct mddev *mddev = container_of(ws, struct mddev, flush_work); | 447 | struct mddev *mddev = container_of(ws, struct mddev, flush_work); |
440 | struct bio *bio = mddev->flush_bio; | 448 | struct bio *bio = mddev->flush_bio; |
441 | 449 | ||
450 | /* | ||
451 | * must reset flush_bio before calling into md_handle_request to avoid a | ||
452 | * deadlock, because other bios passed md_handle_request suspend check | ||
453 | * could wait for this and below md_handle_request could wait for those | ||
454 | * bios because of suspend check | ||
455 | */ | ||
456 | mddev->flush_bio = NULL; | ||
457 | wake_up(&mddev->sb_wait); | ||
458 | |||
442 | if (bio->bi_iter.bi_size == 0) | 459 | if (bio->bi_iter.bi_size == 0) |
443 | /* an empty barrier - all done */ | 460 | /* an empty barrier - all done */ |
444 | bio_endio(bio); | 461 | bio_endio(bio); |
445 | else { | 462 | else { |
446 | bio->bi_opf &= ~REQ_PREFLUSH; | 463 | bio->bi_opf &= ~REQ_PREFLUSH; |
447 | mddev->pers->make_request(mddev, bio); | 464 | md_handle_request(mddev, bio); |
448 | } | 465 | } |
449 | |||
450 | mddev->flush_bio = NULL; | ||
451 | wake_up(&mddev->sb_wait); | ||
452 | } | 466 | } |
453 | 467 | ||
454 | void md_flush_request(struct mddev *mddev, struct bio *bio) | 468 | void md_flush_request(struct mddev *mddev, struct bio *bio) |