aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5-cache.c')
-rw-r--r--drivers/md/raid5-cache.c61
1 files changed, 46 insertions, 15 deletions
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index bfa1e907c472..2dcbafa8e66c 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -236,9 +236,10 @@ struct r5l_io_unit {
236 bool need_split_bio; 236 bool need_split_bio;
237 struct bio *split_bio; 237 struct bio *split_bio;
238 238
239 unsigned int has_flush:1; /* include flush request */ 239 unsigned int has_flush:1; /* include flush request */
240 unsigned int has_fua:1; /* include fua request */ 240 unsigned int has_fua:1; /* include fua request */
241 unsigned int has_null_flush:1; /* include empty flush request */ 241 unsigned int has_null_flush:1; /* include null flush request */
242 unsigned int has_flush_payload:1; /* include flush payload */
242 /* 243 /*
243 * io isn't sent yet, flush/fua request can only be submitted till it's 244 * io isn't sent yet, flush/fua request can only be submitted till it's
244 * the first IO in running_ios list 245 * the first IO in running_ios list
@@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio)
571 struct r5l_io_unit *io_deferred; 572 struct r5l_io_unit *io_deferred;
572 struct r5l_log *log = io->log; 573 struct r5l_log *log = io->log;
573 unsigned long flags; 574 unsigned long flags;
575 bool has_null_flush;
576 bool has_flush_payload;
574 577
575 if (bio->bi_status) 578 if (bio->bi_status)
576 md_error(log->rdev->mddev, log->rdev); 579 md_error(log->rdev->mddev, log->rdev);
@@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio)
580 583
581 spin_lock_irqsave(&log->io_list_lock, flags); 584 spin_lock_irqsave(&log->io_list_lock, flags);
582 __r5l_set_io_unit_state(io, IO_UNIT_IO_END); 585 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
586
587 /*
588 * if the io doesn't not have null_flush or flush payload,
589 * it is not safe to access it after releasing io_list_lock.
590 * Therefore, it is necessary to check the condition with
591 * the lock held.
592 */
593 has_null_flush = io->has_null_flush;
594 has_flush_payload = io->has_flush_payload;
595
583 if (log->need_cache_flush && !list_empty(&io->stripe_list)) 596 if (log->need_cache_flush && !list_empty(&io->stripe_list))
584 r5l_move_to_end_ios(log); 597 r5l_move_to_end_ios(log);
585 else 598 else
@@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio)
600 if (log->need_cache_flush) 613 if (log->need_cache_flush)
601 md_wakeup_thread(log->rdev->mddev->thread); 614 md_wakeup_thread(log->rdev->mddev->thread);
602 615
603 if (io->has_null_flush) { 616 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
617 if (has_null_flush) {
604 struct bio *bi; 618 struct bio *bi;
605 619
606 WARN_ON(bio_list_empty(&io->flush_barriers)); 620 WARN_ON(bio_list_empty(&io->flush_barriers));
607 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { 621 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
608 bio_endio(bi); 622 bio_endio(bi);
609 atomic_dec(&io->pending_stripe); 623 if (atomic_dec_and_test(&io->pending_stripe)) {
624 __r5l_stripe_write_finished(io);
625 return;
626 }
610 } 627 }
611 } 628 }
612 629 /* decrease pending_stripe for flush payload */
613 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ 630 if (has_flush_payload)
614 if (atomic_read(&io->pending_stripe) == 0) 631 if (atomic_dec_and_test(&io->pending_stripe))
615 __r5l_stripe_write_finished(io); 632 __r5l_stripe_write_finished(io);
616} 633}
617 634
618static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) 635static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
@@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
881 payload->size = cpu_to_le32(sizeof(__le64)); 898 payload->size = cpu_to_le32(sizeof(__le64));
882 payload->flush_stripes[0] = cpu_to_le64(sect); 899 payload->flush_stripes[0] = cpu_to_le64(sect);
883 io->meta_offset += meta_size; 900 io->meta_offset += meta_size;
901 /* multiple flush payloads count as one pending_stripe */
902 if (!io->has_flush_payload) {
903 io->has_flush_payload = 1;
904 atomic_inc(&io->pending_stripe);
905 }
884 mutex_unlock(&log->io_mutex); 906 mutex_unlock(&log->io_mutex);
885} 907}
886 908
@@ -2540,23 +2562,32 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2540 */ 2562 */
2541int r5c_journal_mode_set(struct mddev *mddev, int mode) 2563int r5c_journal_mode_set(struct mddev *mddev, int mode)
2542{ 2564{
2543 struct r5conf *conf = mddev->private; 2565 struct r5conf *conf;
2544 struct r5l_log *log = conf->log; 2566 int err;
2545
2546 if (!log)
2547 return -ENODEV;
2548 2567
2549 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || 2568 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2550 mode > R5C_JOURNAL_MODE_WRITE_BACK) 2569 mode > R5C_JOURNAL_MODE_WRITE_BACK)
2551 return -EINVAL; 2570 return -EINVAL;
2552 2571
2572 err = mddev_lock(mddev);
2573 if (err)
2574 return err;
2575 conf = mddev->private;
2576 if (!conf || !conf->log) {
2577 mddev_unlock(mddev);
2578 return -ENODEV;
2579 }
2580
2553 if (raid5_calc_degraded(conf) > 0 && 2581 if (raid5_calc_degraded(conf) > 0 &&
2554 mode == R5C_JOURNAL_MODE_WRITE_BACK) 2582 mode == R5C_JOURNAL_MODE_WRITE_BACK) {
2583 mddev_unlock(mddev);
2555 return -EINVAL; 2584 return -EINVAL;
2585 }
2556 2586
2557 mddev_suspend(mddev); 2587 mddev_suspend(mddev);
2558 conf->log->r5c_journal_mode = mode; 2588 conf->log->r5c_journal_mode = mode;
2559 mddev_resume(mddev); 2589 mddev_resume(mddev);
2590 mddev_unlock(mddev);
2560 2591
2561 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", 2592 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2562 mdname(mddev), mode, r5c_journal_mode_str[mode]); 2593 mdname(mddev), mode, r5c_journal_mode_str[mode]);