diff options
author | Javier González <javier@cnexlabs.com> | 2017-10-13 08:46:26 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-10-13 10:34:57 -0400 |
commit | 21d2287119e843929c29fb1adbd271bde1fac7ae (patch) | |
tree | aeb579a033c1e6d5d91dc8db71b36a8e51399c6b /drivers/lightnvm/pblk-map.c | |
parent | 1e82123da6a4c6019ef03bcd47e4b3dc18dd136e (diff) |
lightnvm: pblk: enable 1 LUN configuration
Metadata I/Os are scheduled to minimize their impact on user data I/Os.
When there are enough LUNs instantiated (i.e., enough bandwidth), it is
easy to interleave metadata and data one after the other so that
metadata I/Os are the ones being blocked and not vice-versa.
We do this by calculating the distance between the I/Os in terms of the
LUNs that are not in used, and selecting a free LUN that satisfies a
the simple heuristic that metadata is scheduled behind. The per-LUN
semaphores guarantee consistency. This works fine on >1 LUN
configuration. However, when a single LUN is instantiated, this design
leads to a deadlock, where metadata waits to be scheduled on a free LUN.
This patch implements the 1 LUN case by simply scheduling the metadada
I/O after the data I/O. In the process, we refactor the way a line is
replaced to ensure that metadata writes are submitted after data writes
in order to guarantee block sequentiality. Note that, since there is
only one LUN, both I/Os will block each other by design. However, such
configuration only pursues tight read latencies, not write bandwidth.
Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/lightnvm/pblk-map.c')
-rw-r--r-- | drivers/lightnvm/pblk-map.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c index fddb924f6dde..3bc4c94f9cf2 100644 --- a/drivers/lightnvm/pblk-map.c +++ b/drivers/lightnvm/pblk-map.c | |||
@@ -25,13 +25,23 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry, | |||
25 | unsigned int valid_secs) | 25 | unsigned int valid_secs) |
26 | { | 26 | { |
27 | struct pblk_line *line = pblk_line_get_data(pblk); | 27 | struct pblk_line *line = pblk_line_get_data(pblk); |
28 | struct pblk_emeta *emeta = line->emeta; | 28 | struct pblk_emeta *emeta; |
29 | struct pblk_w_ctx *w_ctx; | 29 | struct pblk_w_ctx *w_ctx; |
30 | __le64 *lba_list = emeta_to_lbas(pblk, emeta->buf); | 30 | __le64 *lba_list; |
31 | u64 paddr; | 31 | u64 paddr; |
32 | int nr_secs = pblk->min_write_pgs; | 32 | int nr_secs = pblk->min_write_pgs; |
33 | int i; | 33 | int i; |
34 | 34 | ||
35 | if (pblk_line_is_full(line)) { | ||
36 | struct pblk_line *prev_line = line; | ||
37 | |||
38 | line = pblk_line_replace_data(pblk); | ||
39 | pblk_line_close_meta(pblk, prev_line); | ||
40 | } | ||
41 | |||
42 | emeta = line->emeta; | ||
43 | lba_list = emeta_to_lbas(pblk, emeta->buf); | ||
44 | |||
35 | paddr = pblk_alloc_page(pblk, line, nr_secs); | 45 | paddr = pblk_alloc_page(pblk, line, nr_secs); |
36 | 46 | ||
37 | for (i = 0; i < nr_secs; i++, paddr++) { | 47 | for (i = 0; i < nr_secs; i++, paddr++) { |
@@ -60,13 +70,6 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry, | |||
60 | } | 70 | } |
61 | } | 71 | } |
62 | 72 | ||
63 | if (pblk_line_is_full(line)) { | ||
64 | struct pblk_line *prev_line = line; | ||
65 | |||
66 | pblk_line_replace_data(pblk); | ||
67 | pblk_line_close_meta(pblk, prev_line); | ||
68 | } | ||
69 | |||
70 | pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap); | 73 | pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap); |
71 | } | 74 | } |
72 | 75 | ||