diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/md/raid5-cache.c | 40 |
1 files changed, 31 insertions, 9 deletions
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index ce98414a6e34..2dcbafa8e66c 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
| @@ -236,9 +236,10 @@ struct r5l_io_unit { | |||
| 236 | bool need_split_bio; | 236 | bool need_split_bio; |
| 237 | struct bio *split_bio; | 237 | struct bio *split_bio; |
| 238 | 238 | ||
| 239 | unsigned int has_flush:1; /* include flush request */ | 239 | unsigned int has_flush:1; /* include flush request */ |
| 240 | unsigned int has_fua:1; /* include fua request */ | 240 | unsigned int has_fua:1; /* include fua request */ |
| 241 | unsigned int has_null_flush:1; /* include empty flush request */ | 241 | unsigned int has_null_flush:1; /* include null flush request */ |
| 242 | unsigned int has_flush_payload:1; /* include flush payload */ | ||
| 242 | /* | 243 | /* |
| 243 | * io isn't sent yet, flush/fua request can only be submitted till it's | 244 | * io isn't sent yet, flush/fua request can only be submitted till it's |
| 244 | * the first IO in running_ios list | 245 | * the first IO in running_ios list |
| @@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio) | |||
| 571 | struct r5l_io_unit *io_deferred; | 572 | struct r5l_io_unit *io_deferred; |
| 572 | struct r5l_log *log = io->log; | 573 | struct r5l_log *log = io->log; |
| 573 | unsigned long flags; | 574 | unsigned long flags; |
| 575 | bool has_null_flush; | ||
| 576 | bool has_flush_payload; | ||
| 574 | 577 | ||
| 575 | if (bio->bi_status) | 578 | if (bio->bi_status) |
| 576 | md_error(log->rdev->mddev, log->rdev); | 579 | md_error(log->rdev->mddev, log->rdev); |
| @@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio) | |||
| 580 | 583 | ||
| 581 | spin_lock_irqsave(&log->io_list_lock, flags); | 584 | spin_lock_irqsave(&log->io_list_lock, flags); |
| 582 | __r5l_set_io_unit_state(io, IO_UNIT_IO_END); | 585 | __r5l_set_io_unit_state(io, IO_UNIT_IO_END); |
| 586 | |||
| 587 | /* | ||
| 588 | * if the io doesn't not have null_flush or flush payload, | ||
| 589 | * it is not safe to access it after releasing io_list_lock. | ||
| 590 | * Therefore, it is necessary to check the condition with | ||
| 591 | * the lock held. | ||
| 592 | */ | ||
| 593 | has_null_flush = io->has_null_flush; | ||
| 594 | has_flush_payload = io->has_flush_payload; | ||
| 595 | |||
| 583 | if (log->need_cache_flush && !list_empty(&io->stripe_list)) | 596 | if (log->need_cache_flush && !list_empty(&io->stripe_list)) |
| 584 | r5l_move_to_end_ios(log); | 597 | r5l_move_to_end_ios(log); |
| 585 | else | 598 | else |
| @@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio) | |||
| 600 | if (log->need_cache_flush) | 613 | if (log->need_cache_flush) |
| 601 | md_wakeup_thread(log->rdev->mddev->thread); | 614 | md_wakeup_thread(log->rdev->mddev->thread); |
| 602 | 615 | ||
| 603 | if (io->has_null_flush) { | 616 | /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ |
| 617 | if (has_null_flush) { | ||
| 604 | struct bio *bi; | 618 | struct bio *bi; |
| 605 | 619 | ||
| 606 | WARN_ON(bio_list_empty(&io->flush_barriers)); | 620 | WARN_ON(bio_list_empty(&io->flush_barriers)); |
| 607 | while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { | 621 | while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { |
| 608 | bio_endio(bi); | 622 | bio_endio(bi); |
| 609 | atomic_dec(&io->pending_stripe); | 623 | if (atomic_dec_and_test(&io->pending_stripe)) { |
| 624 | __r5l_stripe_write_finished(io); | ||
| 625 | return; | ||
| 626 | } | ||
| 610 | } | 627 | } |
| 611 | } | 628 | } |
| 612 | 629 | /* decrease pending_stripe for flush payload */ | |
| 613 | /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ | 630 | if (has_flush_payload) |
| 614 | if (atomic_read(&io->pending_stripe) == 0) | 631 | if (atomic_dec_and_test(&io->pending_stripe)) |
| 615 | __r5l_stripe_write_finished(io); | 632 | __r5l_stripe_write_finished(io); |
| 616 | } | 633 | } |
| 617 | 634 | ||
| 618 | static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) | 635 | static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) |
| @@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) | |||
| 881 | payload->size = cpu_to_le32(sizeof(__le64)); | 898 | payload->size = cpu_to_le32(sizeof(__le64)); |
| 882 | payload->flush_stripes[0] = cpu_to_le64(sect); | 899 | payload->flush_stripes[0] = cpu_to_le64(sect); |
| 883 | io->meta_offset += meta_size; | 900 | io->meta_offset += meta_size; |
| 901 | /* multiple flush payloads count as one pending_stripe */ | ||
| 902 | if (!io->has_flush_payload) { | ||
| 903 | io->has_flush_payload = 1; | ||
| 904 | atomic_inc(&io->pending_stripe); | ||
| 905 | } | ||
| 884 | mutex_unlock(&log->io_mutex); | 906 | mutex_unlock(&log->io_mutex); |
| 885 | } | 907 | } |
| 886 | 908 | ||
