summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/pblk-map.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lightnvm/pblk-map.c')
-rw-r--r--drivers/lightnvm/pblk-map.c75
1 files changed, 53 insertions, 22 deletions
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 17c16955284d..fddb924f6dde 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -25,9 +25,9 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
25 unsigned int valid_secs) 25 unsigned int valid_secs)
26{ 26{
27 struct pblk_line *line = pblk_line_get_data(pblk); 27 struct pblk_line *line = pblk_line_get_data(pblk);
28 struct line_emeta *emeta = line->emeta; 28 struct pblk_emeta *emeta = line->emeta;
29 struct pblk_w_ctx *w_ctx; 29 struct pblk_w_ctx *w_ctx;
30 __le64 *lba_list = pblk_line_emeta_to_lbas(emeta); 30 __le64 *lba_list = emeta_to_lbas(pblk, emeta->buf);
31 u64 paddr; 31 u64 paddr;
32 int nr_secs = pblk->min_write_pgs; 32 int nr_secs = pblk->min_write_pgs;
33 int i; 33 int i;
@@ -51,18 +51,20 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
51 w_ctx->ppa = ppa_list[i]; 51 w_ctx->ppa = ppa_list[i];
52 meta_list[i].lba = cpu_to_le64(w_ctx->lba); 52 meta_list[i].lba = cpu_to_le64(w_ctx->lba);
53 lba_list[paddr] = cpu_to_le64(w_ctx->lba); 53 lba_list[paddr] = cpu_to_le64(w_ctx->lba);
54 le64_add_cpu(&line->emeta->nr_valid_lbas, 1); 54 line->nr_valid_lbas++;
55 } else { 55 } else {
56 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY); 56 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
57 lba_list[paddr] = cpu_to_le64(ADDR_EMPTY); 57
58 pblk_map_pad_invalidate(pblk, line, paddr); 58 lba_list[paddr] = meta_list[i].lba = addr_empty;
59 __pblk_map_invalidate(pblk, line, paddr);
59 } 60 }
60 } 61 }
61 62
62 if (pblk_line_is_full(line)) { 63 if (pblk_line_is_full(line)) {
63 line = pblk_line_replace_data(pblk); 64 struct pblk_line *prev_line = line;
64 if (!line) 65
65 return; 66 pblk_line_replace_data(pblk);
67 pblk_line_close_meta(pblk, prev_line);
66 } 68 }
67 69
68 pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap); 70 pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
@@ -91,8 +93,9 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
91{ 93{
92 struct nvm_tgt_dev *dev = pblk->dev; 94 struct nvm_tgt_dev *dev = pblk->dev;
93 struct nvm_geo *geo = &dev->geo; 95 struct nvm_geo *geo = &dev->geo;
94 struct pblk_line *e_line = pblk_line_get_data_next(pblk); 96 struct pblk_line_meta *lm = &pblk->lm;
95 struct pblk_sec_meta *meta_list = rqd->meta_list; 97 struct pblk_sec_meta *meta_list = rqd->meta_list;
98 struct pblk_line *e_line, *d_line;
96 unsigned int map_secs; 99 unsigned int map_secs;
97 int min = pblk->min_write_pgs; 100 int min = pblk->min_write_pgs;
98 int i, erase_lun; 101 int i, erase_lun;
@@ -102,35 +105,63 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
102 pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i], 105 pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
103 lun_bitmap, &meta_list[i], map_secs); 106 lun_bitmap, &meta_list[i], map_secs);
104 107
105 erase_lun = rqd->ppa_list[i].g.lun * geo->nr_chnls + 108 erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]);
106 rqd->ppa_list[i].g.ch;
107 109
108 if (!test_bit(erase_lun, e_line->erase_bitmap)) { 110 /* line can change after page map. We might also be writing the
109 if (down_trylock(&pblk->erase_sem)) 111 * last line.
110 continue; 112 */
113 e_line = pblk_line_get_erase(pblk);
114 if (!e_line)
115 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
116 valid_secs, i + min);
111 117
118 spin_lock(&e_line->lock);
119 if (!test_bit(erase_lun, e_line->erase_bitmap)) {
112 set_bit(erase_lun, e_line->erase_bitmap); 120 set_bit(erase_lun, e_line->erase_bitmap);
113 atomic_dec(&e_line->left_eblks); 121 atomic_dec(&e_line->left_eblks);
122
114 *erase_ppa = rqd->ppa_list[i]; 123 *erase_ppa = rqd->ppa_list[i];
115 erase_ppa->g.blk = e_line->id; 124 erase_ppa->g.blk = e_line->id;
116 125
126 spin_unlock(&e_line->lock);
127
117 /* Avoid evaluating e_line->left_eblks */ 128 /* Avoid evaluating e_line->left_eblks */
118 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap, 129 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
119 valid_secs, i + min); 130 valid_secs, i + min);
120 } 131 }
132 spin_unlock(&e_line->lock);
121 } 133 }
122 134
123 /* Erase blocks that are bad in this line but might not be in next */ 135 d_line = pblk_line_get_data(pblk);
124 if (unlikely(ppa_empty(*erase_ppa))) { 136
125 struct pblk_line_meta *lm = &pblk->lm; 137 /* line can change after page map. We might also be writing the
138 * last line.
139 */
140 e_line = pblk_line_get_erase(pblk);
141 if (!e_line)
142 return;
126 143
127 i = find_first_zero_bit(e_line->erase_bitmap, lm->blk_per_line); 144 /* Erase blocks that are bad in this line but might not be in next */
128 if (i == lm->blk_per_line) 145 if (unlikely(ppa_empty(*erase_ppa)) &&
146 bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
147 int bit = -1;
148
149retry:
150 bit = find_next_bit(d_line->blk_bitmap,
151 lm->blk_per_line, bit + 1);
152 if (bit >= lm->blk_per_line)
129 return; 153 return;
130 154
131 set_bit(i, e_line->erase_bitmap); 155 spin_lock(&e_line->lock);
156 if (test_bit(bit, e_line->erase_bitmap)) {
157 spin_unlock(&e_line->lock);
158 goto retry;
159 }
160 spin_unlock(&e_line->lock);
161
162 set_bit(bit, e_line->erase_bitmap);
132 atomic_dec(&e_line->left_eblks); 163 atomic_dec(&e_line->left_eblks);
133 *erase_ppa = pblk->luns[i].bppa; /* set ch and lun */ 164 *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
134 erase_ppa->g.blk = e_line->id; 165 erase_ppa->g.blk = e_line->id;
135 } 166 }
136} 167}