diff options
author | Artur Paszkiewicz <artur.paszkiewicz@intel.com> | 2017-04-04 07:13:56 -0400 |
---|---|---|
committer | Shaohua Li <shli@fb.com> | 2017-04-10 15:00:27 -0400 |
commit | 94568f64af50bb37c418b200449698cfe7e1da5f (patch) | |
tree | 0de940594ff6b9151b57e06cb66c13e51eaa6f49 /drivers/md/raid5-ppl.c | |
parent | 0c9d5b127f695818c2c5a3868c1f28ca2969e905 (diff) |
raid5-ppl: move no_mem_stripes to struct ppl_conf
Use a single no_mem_stripes list instead of per member device lists for
handling stripes that need retrying in case of failed io_unit
allocation. Because io_units are allocated from a memory pool shared
between all member disks, the no_mem_stripes list should be checked when
an io_unit for any member is freed. This fixes a deadlock that could
happen if there are stripes in more than one no_mem_stripes list.
Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid5-ppl.c')
-rw-r--r-- | drivers/md/raid5-ppl.c | 36 |
1 files changed, 23 insertions, 13 deletions
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 86ea9addb51a..355cf3581ef8 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c | |||
@@ -107,6 +107,10 @@ struct ppl_conf { | |||
107 | /* used only for recovery */ | 107 | /* used only for recovery */ |
108 | int recovered_entries; | 108 | int recovered_entries; |
109 | int mismatch_count; | 109 | int mismatch_count; |
110 | |||
111 | /* stripes to retry if failed to allocate io_unit */ | ||
112 | struct list_head no_mem_stripes; | ||
113 | spinlock_t no_mem_stripes_lock; | ||
110 | }; | 114 | }; |
111 | 115 | ||
112 | struct ppl_log { | 116 | struct ppl_log { |
@@ -119,8 +123,6 @@ struct ppl_log { | |||
119 | * always at the end of io_list */ | 123 | * always at the end of io_list */ |
120 | spinlock_t io_list_lock; | 124 | spinlock_t io_list_lock; |
121 | struct list_head io_list; /* all io_units of this log */ | 125 | struct list_head io_list; /* all io_units of this log */ |
122 | struct list_head no_mem_stripes;/* stripes to retry if failed to | ||
123 | * allocate io_unit */ | ||
124 | }; | 126 | }; |
125 | 127 | ||
126 | #define PPL_IO_INLINE_BVECS 32 | 128 | #define PPL_IO_INLINE_BVECS 32 |
@@ -347,9 +349,9 @@ int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh) | |||
347 | atomic_inc(&sh->count); | 349 | atomic_inc(&sh->count); |
348 | 350 | ||
349 | if (ppl_log_stripe(log, sh)) { | 351 | if (ppl_log_stripe(log, sh)) { |
350 | spin_lock_irq(&log->io_list_lock); | 352 | spin_lock_irq(&ppl_conf->no_mem_stripes_lock); |
351 | list_add_tail(&sh->log_list, &log->no_mem_stripes); | 353 | list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes); |
352 | spin_unlock_irq(&log->io_list_lock); | 354 | spin_unlock_irq(&ppl_conf->no_mem_stripes_lock); |
353 | } | 355 | } |
354 | 356 | ||
355 | mutex_unlock(&log->io_mutex); | 357 | mutex_unlock(&log->io_mutex); |
@@ -492,25 +494,32 @@ void ppl_write_stripe_run(struct r5conf *conf) | |||
492 | static void ppl_io_unit_finished(struct ppl_io_unit *io) | 494 | static void ppl_io_unit_finished(struct ppl_io_unit *io) |
493 | { | 495 | { |
494 | struct ppl_log *log = io->log; | 496 | struct ppl_log *log = io->log; |
497 | struct ppl_conf *ppl_conf = log->ppl_conf; | ||
495 | unsigned long flags; | 498 | unsigned long flags; |
496 | 499 | ||
497 | pr_debug("%s: seq: %llu\n", __func__, io->seq); | 500 | pr_debug("%s: seq: %llu\n", __func__, io->seq); |
498 | 501 | ||
499 | spin_lock_irqsave(&log->io_list_lock, flags); | 502 | local_irq_save(flags); |
500 | 503 | ||
504 | spin_lock(&log->io_list_lock); | ||
501 | list_del(&io->log_sibling); | 505 | list_del(&io->log_sibling); |
502 | mempool_free(io, log->ppl_conf->io_pool); | 506 | spin_unlock(&log->io_list_lock); |
507 | |||
508 | mempool_free(io, ppl_conf->io_pool); | ||
509 | |||
510 | spin_lock(&ppl_conf->no_mem_stripes_lock); | ||
511 | if (!list_empty(&ppl_conf->no_mem_stripes)) { | ||
512 | struct stripe_head *sh; | ||
503 | 513 | ||
504 | if (!list_empty(&log->no_mem_stripes)) { | 514 | sh = list_first_entry(&ppl_conf->no_mem_stripes, |
505 | struct stripe_head *sh = list_first_entry(&log->no_mem_stripes, | 515 | struct stripe_head, log_list); |
506 | struct stripe_head, | ||
507 | log_list); | ||
508 | list_del_init(&sh->log_list); | 516 | list_del_init(&sh->log_list); |
509 | set_bit(STRIPE_HANDLE, &sh->state); | 517 | set_bit(STRIPE_HANDLE, &sh->state); |
510 | raid5_release_stripe(sh); | 518 | raid5_release_stripe(sh); |
511 | } | 519 | } |
520 | spin_unlock(&ppl_conf->no_mem_stripes_lock); | ||
512 | 521 | ||
513 | spin_unlock_irqrestore(&log->io_list_lock, flags); | 522 | local_irq_restore(flags); |
514 | } | 523 | } |
515 | 524 | ||
516 | void ppl_stripe_write_finished(struct stripe_head *sh) | 525 | void ppl_stripe_write_finished(struct stripe_head *sh) |
@@ -1135,6 +1144,8 @@ int ppl_init_log(struct r5conf *conf) | |||
1135 | } | 1144 | } |
1136 | 1145 | ||
1137 | atomic64_set(&ppl_conf->seq, 0); | 1146 | atomic64_set(&ppl_conf->seq, 0); |
1147 | INIT_LIST_HEAD(&ppl_conf->no_mem_stripes); | ||
1148 | spin_lock_init(&ppl_conf->no_mem_stripes_lock); | ||
1138 | 1149 | ||
1139 | if (!mddev->external) { | 1150 | if (!mddev->external) { |
1140 | ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); | 1151 | ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); |
@@ -1150,7 +1161,6 @@ int ppl_init_log(struct r5conf *conf) | |||
1150 | mutex_init(&log->io_mutex); | 1161 | mutex_init(&log->io_mutex); |
1151 | spin_lock_init(&log->io_list_lock); | 1162 | spin_lock_init(&log->io_list_lock); |
1152 | INIT_LIST_HEAD(&log->io_list); | 1163 | INIT_LIST_HEAD(&log->io_list); |
1153 | INIT_LIST_HEAD(&log->no_mem_stripes); | ||
1154 | 1164 | ||
1155 | log->ppl_conf = ppl_conf; | 1165 | log->ppl_conf = ppl_conf; |
1156 | log->rdev = rdev; | 1166 | log->rdev = rdev; |