diff options
author | Javier González <javier@cnexlabs.com> | 2018-03-29 18:05:10 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-03-29 19:29:09 -0400 |
commit | e46f4e4822bdecf9bcbc2e71b2a3ae7f37464a2d (patch) | |
tree | f69895ed8945edc7be7e80186ad7b4568d367db5 /drivers/lightnvm/pblk-init.c | |
parent | 43d47127219de1dd674b917c1835baa14c4c1768 (diff) |
lightnvm: simplify geometry structure
Currently, the device geometry is stored redundantly in the nvm_id and
nvm_geo structures at a device level. Moreover, when instantiating
targets on a specific number of LUNs, these structures are replicated
and manually modified to fit the instance channel and LUN partitioning.
Instead, create a generic geometry around nvm_geo, which can be used by
(i) the underlying device to describe the geometry of the whole device,
and (ii) instances to describe their geometry independently.
Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <mb@lightnvm.io>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/lightnvm/pblk-init.c')
-rw-r--r-- | drivers/lightnvm/pblk-init.c | 117 |
1 files changed, 62 insertions, 55 deletions
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 8f1d622801df..2fca27d0a9b5 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c | |||
@@ -179,7 +179,7 @@ static int pblk_rwb_init(struct pblk *pblk) | |||
179 | return -ENOMEM; | 179 | return -ENOMEM; |
180 | 180 | ||
181 | power_size = get_count_order(nr_entries); | 181 | power_size = get_count_order(nr_entries); |
182 | power_seg_sz = get_count_order(geo->sec_size); | 182 | power_seg_sz = get_count_order(geo->csecs); |
183 | 183 | ||
184 | return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz); | 184 | return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz); |
185 | } | 185 | } |
@@ -187,18 +187,10 @@ static int pblk_rwb_init(struct pblk *pblk) | |||
187 | /* Minimum pages needed within a lun */ | 187 | /* Minimum pages needed within a lun */ |
188 | #define ADDR_POOL_SIZE 64 | 188 | #define ADDR_POOL_SIZE 64 |
189 | 189 | ||
190 | static int pblk_set_ppaf(struct pblk *pblk) | 190 | static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst) |
191 | { | 191 | { |
192 | struct nvm_tgt_dev *dev = pblk->dev; | 192 | struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf; |
193 | struct nvm_geo *geo = &dev->geo; | 193 | int power_len; |
194 | struct nvm_addr_format ppaf = geo->ppaf; | ||
195 | int mod, power_len; | ||
196 | |||
197 | div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod); | ||
198 | if (mod) { | ||
199 | pr_err("pblk: bad configuration of sectors/pages\n"); | ||
200 | return -EINVAL; | ||
201 | } | ||
202 | 194 | ||
203 | /* Re-calculate channel and lun format to adapt to configuration */ | 195 | /* Re-calculate channel and lun format to adapt to configuration */ |
204 | power_len = get_count_order(geo->nr_chnls); | 196 | power_len = get_count_order(geo->nr_chnls); |
@@ -206,34 +198,50 @@ static int pblk_set_ppaf(struct pblk *pblk) | |||
206 | pr_err("pblk: supports only power-of-two channel config.\n"); | 198 | pr_err("pblk: supports only power-of-two channel config.\n"); |
207 | return -EINVAL; | 199 | return -EINVAL; |
208 | } | 200 | } |
209 | ppaf.ch_len = power_len; | 201 | dst->ch_len = power_len; |
210 | 202 | ||
211 | power_len = get_count_order(geo->nr_luns); | 203 | power_len = get_count_order(geo->nr_luns); |
212 | if (1 << power_len != geo->nr_luns) { | 204 | if (1 << power_len != geo->nr_luns) { |
213 | pr_err("pblk: supports only power-of-two LUN config.\n"); | 205 | pr_err("pblk: supports only power-of-two LUN config.\n"); |
214 | return -EINVAL; | 206 | return -EINVAL; |
215 | } | 207 | } |
216 | ppaf.lun_len = power_len; | 208 | dst->lun_len = power_len; |
217 | 209 | ||
218 | pblk->ppaf.sec_offset = 0; | 210 | dst->blk_len = src->blk_len; |
219 | pblk->ppaf.pln_offset = ppaf.sect_len; | 211 | dst->pg_len = src->pg_len; |
220 | pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len; | 212 | dst->pln_len = src->pln_len; |
221 | pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len; | 213 | dst->sect_len = src->sect_len; |
222 | pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len; | 214 | |
223 | pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len; | 215 | dst->sect_offset = 0; |
224 | pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1; | 216 | dst->pln_offset = dst->sect_len; |
225 | pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) << | 217 | dst->ch_offset = dst->pln_offset + dst->pln_len; |
226 | pblk->ppaf.pln_offset; | 218 | dst->lun_offset = dst->ch_offset + dst->ch_len; |
227 | pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) << | 219 | dst->pg_offset = dst->lun_offset + dst->lun_len; |
228 | pblk->ppaf.ch_offset; | 220 | dst->blk_offset = dst->pg_offset + dst->pg_len; |
229 | pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) << | 221 | |
230 | pblk->ppaf.lun_offset; | 222 | dst->sec_mask = ((1ULL << dst->sect_len) - 1) << dst->sect_offset; |
231 | pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) << | 223 | dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset; |
232 | pblk->ppaf.pg_offset; | 224 | dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset; |
233 | pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) << | 225 | dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset; |
234 | pblk->ppaf.blk_offset; | 226 | dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset; |
235 | 227 | dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset; | |
236 | pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len; | 228 | |
229 | return dst->blk_offset + src->blk_len; | ||
230 | } | ||
231 | |||
232 | static int pblk_set_ppaf(struct pblk *pblk) | ||
233 | { | ||
234 | struct nvm_tgt_dev *dev = pblk->dev; | ||
235 | struct nvm_geo *geo = &dev->geo; | ||
236 | int mod; | ||
237 | |||
238 | div_u64_rem(geo->clba, pblk->min_write_pgs, &mod); | ||
239 | if (mod) { | ||
240 | pr_err("pblk: bad configuration of sectors/pages\n"); | ||
241 | return -EINVAL; | ||
242 | } | ||
243 | |||
244 | pblk->ppaf_bitsize = pblk_set_addrf_12(geo, (void *)&pblk->ppaf); | ||
237 | 245 | ||
238 | return 0; | 246 | return 0; |
239 | } | 247 | } |
@@ -303,10 +311,9 @@ static int pblk_core_init(struct pblk *pblk) | |||
303 | atomic64_set(&pblk->nr_flush, 0); | 311 | atomic64_set(&pblk->nr_flush, 0); |
304 | pblk->nr_flush_rst = 0; | 312 | pblk->nr_flush_rst = 0; |
305 | 313 | ||
306 | pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg * | 314 | pblk->pgs_in_buffer = geo->mw_cunits * geo->all_luns; |
307 | geo->nr_planes * geo->all_luns; | ||
308 | 315 | ||
309 | pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE); | 316 | pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE); |
310 | max_write_ppas = pblk->min_write_pgs * geo->all_luns; | 317 | max_write_ppas = pblk->min_write_pgs * geo->all_luns; |
311 | pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); | 318 | pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); |
312 | pblk_set_sec_per_write(pblk, pblk->min_write_pgs); | 319 | pblk_set_sec_per_write(pblk, pblk->min_write_pgs); |
@@ -583,18 +590,18 @@ static unsigned int calc_emeta_len(struct pblk *pblk) | |||
583 | /* Round to sector size so that lba_list starts on its own sector */ | 590 | /* Round to sector size so that lba_list starts on its own sector */ |
584 | lm->emeta_sec[1] = DIV_ROUND_UP( | 591 | lm->emeta_sec[1] = DIV_ROUND_UP( |
585 | sizeof(struct line_emeta) + lm->blk_bitmap_len + | 592 | sizeof(struct line_emeta) + lm->blk_bitmap_len + |
586 | sizeof(struct wa_counters), geo->sec_size); | 593 | sizeof(struct wa_counters), geo->csecs); |
587 | lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size; | 594 | lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs; |
588 | 595 | ||
589 | /* Round to sector size so that vsc_list starts on its own sector */ | 596 | /* Round to sector size so that vsc_list starts on its own sector */ |
590 | lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0]; | 597 | lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0]; |
591 | lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64), | 598 | lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64), |
592 | geo->sec_size); | 599 | geo->csecs); |
593 | lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size; | 600 | lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs; |
594 | 601 | ||
595 | lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32), | 602 | lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32), |
596 | geo->sec_size); | 603 | geo->csecs); |
597 | lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size; | 604 | lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs; |
598 | 605 | ||
599 | lm->vsc_list_len = l_mg->nr_lines * sizeof(u32); | 606 | lm->vsc_list_len = l_mg->nr_lines * sizeof(u32); |
600 | 607 | ||
@@ -625,13 +632,13 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks) | |||
625 | * on user capacity consider only provisioned blocks | 632 | * on user capacity consider only provisioned blocks |
626 | */ | 633 | */ |
627 | pblk->rl.total_blocks = nr_free_blks; | 634 | pblk->rl.total_blocks = nr_free_blks; |
628 | pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk; | 635 | pblk->rl.nr_secs = nr_free_blks * geo->clba; |
629 | 636 | ||
630 | /* Consider sectors used for metadata */ | 637 | /* Consider sectors used for metadata */ |
631 | sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines; | 638 | sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines; |
632 | blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk); | 639 | blk_meta = DIV_ROUND_UP(sec_meta, geo->clba); |
633 | 640 | ||
634 | pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk; | 641 | pblk->capacity = (provisioned - blk_meta) * geo->clba; |
635 | 642 | ||
636 | atomic_set(&pblk->rl.free_blocks, nr_free_blks); | 643 | atomic_set(&pblk->rl.free_blocks, nr_free_blks); |
637 | atomic_set(&pblk->rl.free_user_blocks, nr_free_blks); | 644 | atomic_set(&pblk->rl.free_user_blocks, nr_free_blks); |
@@ -783,7 +790,7 @@ static int pblk_line_meta_init(struct pblk *pblk) | |||
783 | unsigned int smeta_len, emeta_len; | 790 | unsigned int smeta_len, emeta_len; |
784 | int i; | 791 | int i; |
785 | 792 | ||
786 | lm->sec_per_line = geo->sec_per_chk * geo->all_luns; | 793 | lm->sec_per_line = geo->clba * geo->all_luns; |
787 | lm->blk_per_line = geo->all_luns; | 794 | lm->blk_per_line = geo->all_luns; |
788 | lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long); | 795 | lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long); |
789 | lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long); | 796 | lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long); |
@@ -797,8 +804,8 @@ static int pblk_line_meta_init(struct pblk *pblk) | |||
797 | */ | 804 | */ |
798 | i = 1; | 805 | i = 1; |
799 | add_smeta_page: | 806 | add_smeta_page: |
800 | lm->smeta_sec = i * geo->sec_per_pl; | 807 | lm->smeta_sec = i * geo->ws_opt; |
801 | lm->smeta_len = lm->smeta_sec * geo->sec_size; | 808 | lm->smeta_len = lm->smeta_sec * geo->csecs; |
802 | 809 | ||
803 | smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len; | 810 | smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len; |
804 | if (smeta_len > lm->smeta_len) { | 811 | if (smeta_len > lm->smeta_len) { |
@@ -811,8 +818,8 @@ add_smeta_page: | |||
811 | */ | 818 | */ |
812 | i = 1; | 819 | i = 1; |
813 | add_emeta_page: | 820 | add_emeta_page: |
814 | lm->emeta_sec[0] = i * geo->sec_per_pl; | 821 | lm->emeta_sec[0] = i * geo->ws_opt; |
815 | lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size; | 822 | lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs; |
816 | 823 | ||
817 | emeta_len = calc_emeta_len(pblk); | 824 | emeta_len = calc_emeta_len(pblk); |
818 | if (emeta_len > lm->emeta_len[0]) { | 825 | if (emeta_len > lm->emeta_len[0]) { |
@@ -825,7 +832,7 @@ add_emeta_page: | |||
825 | lm->min_blk_line = 1; | 832 | lm->min_blk_line = 1; |
826 | if (geo->all_luns > 1) | 833 | if (geo->all_luns > 1) |
827 | lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec + | 834 | lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec + |
828 | lm->emeta_sec[0], geo->sec_per_chk); | 835 | lm->emeta_sec[0], geo->clba); |
829 | 836 | ||
830 | if (lm->min_blk_line > lm->blk_per_line) { | 837 | if (lm->min_blk_line > lm->blk_per_line) { |
831 | pr_err("pblk: config. not supported. Min. LUN in line:%d\n", | 838 | pr_err("pblk: config. not supported. Min. LUN in line:%d\n", |
@@ -1009,9 +1016,9 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, | |||
1009 | struct pblk *pblk; | 1016 | struct pblk *pblk; |
1010 | int ret; | 1017 | int ret; |
1011 | 1018 | ||
1012 | if (dev->identity.dom & NVM_RSP_L2P) { | 1019 | if (dev->geo.dom & NVM_RSP_L2P) { |
1013 | pr_err("pblk: host-side L2P table not supported. (%x)\n", | 1020 | pr_err("pblk: host-side L2P table not supported. (%x)\n", |
1014 | dev->identity.dom); | 1021 | dev->geo.dom); |
1015 | return ERR_PTR(-EINVAL); | 1022 | return ERR_PTR(-EINVAL); |
1016 | } | 1023 | } |
1017 | 1024 | ||
@@ -1093,7 +1100,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, | |||
1093 | 1100 | ||
1094 | blk_queue_write_cache(tqueue, true, false); | 1101 | blk_queue_write_cache(tqueue, true, false); |
1095 | 1102 | ||
1096 | tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size; | 1103 | tqueue->limits.discard_granularity = geo->clba * geo->csecs; |
1097 | tqueue->limits.discard_alignment = 0; | 1104 | tqueue->limits.discard_alignment = 0; |
1098 | blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9); | 1105 | blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9); |
1099 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue); | 1106 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue); |