diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-24 13:26:30 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-24 13:26:30 -0500 |
commit | 4ce01c518ebf6e3818abf5f4a9e1a1ef9d52f232 (patch) | |
tree | 356fa4e0e8207afb36e127e7b1f4cf6dd54217d6 | |
parent | a2931547eee19201856a89406397ecc1f978deaf (diff) | |
parent | 12e57f59ca3344a588531f68eeede45666e8a6e0 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe:
"A round of fixes/updates for the current series.
This looks a little bigger than it is, but that's mainly because we
pushed the lightnvm enabled null_blk change out of the merge window so
it could be updated a bit. The rest of the volume is also mostly
lightnvm. In particular:
- Lightnvm. Various fixes, additions, updates from Matias and
Javier, as well as from Wenwei Tao.
- NVMe:
- Fix for potential arithmetic overflow from Keith.
- Also from Keith, ensure that we reap pending completions from
a completion queue before deleting it. Fixes kernel crashes
when resetting a device with IO pending.
- Various little lightnvm related tweaks from Matias.
- Fixup flushes to go through the IO scheduler, for the cases where a
flush is not required. Fixes a case in CFQ where we would be
idling and not see this request, hence not break the idling. From
Jan Kara.
- Use list_{first,prev,next} in elevator.c for cleaner code. From
Gelian Tang.
- Fix for a warning trigger on btrfs and raid on single queue blk-mq
devices, where we would flush plug callbacks with preemption
disabled. From me.
- A mac partition validation fix from Kees Cook.
- Two merge fixes from Ming, marked stable. A third part is adding a
new warning so we'll notice this quicker in the future, if we screw
up the accounting.
- Cleanup of thread name/creation in mtip32xx from Rasmus Villemoes"
* 'for-linus' of git://git.kernel.dk/linux-block: (32 commits)
blk-merge: warn if figured out segment number is bigger than nr_phys_segments
blk-merge: fix blk_bio_segment_split
block: fix segment split
blk-mq: fix calling unplug callbacks with preempt disabled
mac: validate mac_partition is within sector
mtip32xx: use formatting capability of kthread_create_on_node
NVMe: reap completion entries when deleting queue
lightnvm: add free and bad lun info to show luns
lightnvm: keep track of block counts
nvme: lightnvm: use admin queues for admin cmds
lightnvm: missing free on init error
lightnvm: wrong return value and redundant free
null_blk: do not del gendisk with lightnvm
null_blk: use device addressing mode
null_blk: use ppa_cache pool
NVMe: Fix possible arithmetic overflow for max segments
blk-flush: Queue through IO scheduler when flush not required
null_blk: register as a LightNVM device
elevator: use list_{first,prev,next}_entry
lightnvm: cleanup queue before target removal
...
-rw-r--r-- | Documentation/block/null_blk.txt | 3 | ||||
-rw-r--r-- | MAINTAINERS | 1 | ||||
-rw-r--r-- | block/blk-flush.c | 2 | ||||
-rw-r--r-- | block/blk-merge.c | 32 | ||||
-rw-r--r-- | block/blk-mq.c | 14 | ||||
-rw-r--r-- | block/noop-iosched.c | 10 | ||||
-rw-r--r-- | block/partitions/mac.c | 10 | ||||
-rw-r--r-- | drivers/Makefile | 2 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 6 | ||||
-rw-r--r-- | drivers/block/null_blk.c | 207 | ||||
-rw-r--r-- | drivers/lightnvm/core.c | 51 | ||||
-rw-r--r-- | drivers/lightnvm/gennvm.c | 73 | ||||
-rw-r--r-- | drivers/lightnvm/gennvm.h | 2 | ||||
-rw-r--r-- | drivers/lightnvm/rrpc.c | 32 | ||||
-rw-r--r-- | drivers/nvme/host/lightnvm.c | 139 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 9 | ||||
-rw-r--r-- | include/linux/lightnvm.h | 176 |
17 files changed, 525 insertions, 244 deletions
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index 2f6c6ff7161d..d8880ca30af4 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt | |||
@@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0 | |||
70 | parameter. | 70 | parameter. |
71 | 1: The multi-queue block layer is instantiated with a hardware dispatch | 71 | 1: The multi-queue block layer is instantiated with a hardware dispatch |
72 | queue for each CPU node in the system. | 72 | queue for each CPU node in the system. |
73 | |||
74 | use_lightnvm=[0/1]: Default: 0 | ||
75 | Register device with LightNVM. Requires blk-mq to be used. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 050d0e77a2cf..3f92804f5dac 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6366,6 +6366,7 @@ F: arch/*/include/asm/pmem.h | |||
6366 | LIGHTNVM PLATFORM SUPPORT | 6366 | LIGHTNVM PLATFORM SUPPORT |
6367 | M: Matias Bjorling <mb@lightnvm.io> | 6367 | M: Matias Bjorling <mb@lightnvm.io> |
6368 | W: http://github/OpenChannelSSD | 6368 | W: http://github/OpenChannelSSD |
6369 | L: linux-block@vger.kernel.org | ||
6369 | S: Maintained | 6370 | S: Maintained |
6370 | F: drivers/lightnvm/ | 6371 | F: drivers/lightnvm/ |
6371 | F: include/linux/lightnvm.h | 6372 | F: include/linux/lightnvm.h |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 9c423e53324a..c81d56ec308f 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -422,7 +422,7 @@ void blk_insert_flush(struct request *rq) | |||
422 | if (q->mq_ops) { | 422 | if (q->mq_ops) { |
423 | blk_mq_insert_request(rq, false, false, true); | 423 | blk_mq_insert_request(rq, false, false, true); |
424 | } else | 424 | } else |
425 | list_add_tail(&rq->queuelist, &q->queue_head); | 425 | q->elevator->type->ops.elevator_add_req_fn(q, rq); |
426 | return; | 426 | return; |
427 | } | 427 | } |
428 | 428 | ||
diff --git a/block/blk-merge.c b/block/blk-merge.c index de5716d8e525..41a55ba0d78e 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
76 | struct bio_vec bv, bvprv, *bvprvp = NULL; | 76 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
77 | struct bvec_iter iter; | 77 | struct bvec_iter iter; |
78 | unsigned seg_size = 0, nsegs = 0, sectors = 0; | 78 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
79 | unsigned front_seg_size = bio->bi_seg_front_size; | ||
80 | bool do_split = true; | ||
81 | struct bio *new = NULL; | ||
79 | 82 | ||
80 | bio_for_each_segment(bv, bio, iter) { | 83 | bio_for_each_segment(bv, bio, iter) { |
81 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) | 84 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) |
@@ -98,7 +101,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
98 | 101 | ||
99 | seg_size += bv.bv_len; | 102 | seg_size += bv.bv_len; |
100 | bvprv = bv; | 103 | bvprv = bv; |
101 | bvprvp = &bv; | 104 | bvprvp = &bvprv; |
102 | sectors += bv.bv_len >> 9; | 105 | sectors += bv.bv_len >> 9; |
103 | continue; | 106 | continue; |
104 | } | 107 | } |
@@ -108,16 +111,29 @@ new_segment: | |||
108 | 111 | ||
109 | nsegs++; | 112 | nsegs++; |
110 | bvprv = bv; | 113 | bvprv = bv; |
111 | bvprvp = &bv; | 114 | bvprvp = &bvprv; |
112 | seg_size = bv.bv_len; | 115 | seg_size = bv.bv_len; |
113 | sectors += bv.bv_len >> 9; | 116 | sectors += bv.bv_len >> 9; |
117 | |||
118 | if (nsegs == 1 && seg_size > front_seg_size) | ||
119 | front_seg_size = seg_size; | ||
114 | } | 120 | } |
115 | 121 | ||
116 | *segs = nsegs; | 122 | do_split = false; |
117 | return NULL; | ||
118 | split: | 123 | split: |
119 | *segs = nsegs; | 124 | *segs = nsegs; |
120 | return bio_split(bio, sectors, GFP_NOIO, bs); | 125 | |
126 | if (do_split) { | ||
127 | new = bio_split(bio, sectors, GFP_NOIO, bs); | ||
128 | if (new) | ||
129 | bio = new; | ||
130 | } | ||
131 | |||
132 | bio->bi_seg_front_size = front_seg_size; | ||
133 | if (seg_size > bio->bi_seg_back_size) | ||
134 | bio->bi_seg_back_size = seg_size; | ||
135 | |||
136 | return do_split ? new : NULL; | ||
121 | } | 137 | } |
122 | 138 | ||
123 | void blk_queue_split(struct request_queue *q, struct bio **bio, | 139 | void blk_queue_split(struct request_queue *q, struct bio **bio, |
@@ -412,6 +428,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
412 | if (sg) | 428 | if (sg) |
413 | sg_mark_end(sg); | 429 | sg_mark_end(sg); |
414 | 430 | ||
431 | /* | ||
432 | * Something must have been wrong if the figured number of | ||
433 | * segment is bigger than number of req's physical segments | ||
434 | */ | ||
435 | WARN_ON(nsegs > rq->nr_phys_segments); | ||
436 | |||
415 | return nsegs; | 437 | return nsegs; |
416 | } | 438 | } |
417 | EXPORT_SYMBOL(blk_rq_map_sg); | 439 | EXPORT_SYMBOL(blk_rq_map_sg); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ae09de62f19..6d6f8feb48c0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
1291 | blk_mq_bio_to_request(rq, bio); | 1291 | blk_mq_bio_to_request(rq, bio); |
1292 | 1292 | ||
1293 | /* | 1293 | /* |
1294 | * we do limited pluging. If bio can be merged, do merge. | 1294 | * We do limited pluging. If the bio can be merged, do that. |
1295 | * Otherwise the existing request in the plug list will be | 1295 | * Otherwise the existing request in the plug list will be |
1296 | * issued. So the plug list will have one request at most | 1296 | * issued. So the plug list will have one request at most |
1297 | */ | 1297 | */ |
1298 | if (plug) { | 1298 | if (plug) { |
1299 | /* | 1299 | /* |
1300 | * The plug list might get flushed before this. If that | 1300 | * The plug list might get flushed before this. If that |
1301 | * happens, same_queue_rq is invalid and plug list is empty | 1301 | * happens, same_queue_rq is invalid and plug list is |
1302 | **/ | 1302 | * empty |
1303 | */ | ||
1303 | if (same_queue_rq && !list_empty(&plug->mq_list)) { | 1304 | if (same_queue_rq && !list_empty(&plug->mq_list)) { |
1304 | old_rq = same_queue_rq; | 1305 | old_rq = same_queue_rq; |
1305 | list_del_init(&old_rq->queuelist); | 1306 | list_del_init(&old_rq->queuelist); |
@@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) | |||
1380 | blk_mq_bio_to_request(rq, bio); | 1381 | blk_mq_bio_to_request(rq, bio); |
1381 | if (!request_count) | 1382 | if (!request_count) |
1382 | trace_block_plug(q); | 1383 | trace_block_plug(q); |
1383 | else if (request_count >= BLK_MAX_REQUEST_COUNT) { | 1384 | |
1385 | blk_mq_put_ctx(data.ctx); | ||
1386 | |||
1387 | if (request_count >= BLK_MAX_REQUEST_COUNT) { | ||
1384 | blk_flush_plug_list(plug, false); | 1388 | blk_flush_plug_list(plug, false); |
1385 | trace_block_plug(q); | 1389 | trace_block_plug(q); |
1386 | } | 1390 | } |
1391 | |||
1387 | list_add_tail(&rq->queuelist, &plug->mq_list); | 1392 | list_add_tail(&rq->queuelist, &plug->mq_list); |
1388 | blk_mq_put_ctx(data.ctx); | ||
1389 | return cookie; | 1393 | return cookie; |
1390 | } | 1394 | } |
1391 | 1395 | ||
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 3de89d4690f3..a163c487cf38 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
@@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq, | |||
21 | static int noop_dispatch(struct request_queue *q, int force) | 21 | static int noop_dispatch(struct request_queue *q, int force) |
22 | { | 22 | { |
23 | struct noop_data *nd = q->elevator->elevator_data; | 23 | struct noop_data *nd = q->elevator->elevator_data; |
24 | struct request *rq; | ||
24 | 25 | ||
25 | if (!list_empty(&nd->queue)) { | 26 | rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); |
26 | struct request *rq; | 27 | if (rq) { |
27 | rq = list_entry(nd->queue.next, struct request, queuelist); | ||
28 | list_del_init(&rq->queuelist); | 28 | list_del_init(&rq->queuelist); |
29 | elv_dispatch_sort(q, rq); | 29 | elv_dispatch_sort(q, rq); |
30 | return 1; | 30 | return 1; |
@@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq) | |||
46 | 46 | ||
47 | if (rq->queuelist.prev == &nd->queue) | 47 | if (rq->queuelist.prev == &nd->queue) |
48 | return NULL; | 48 | return NULL; |
49 | return list_entry(rq->queuelist.prev, struct request, queuelist); | 49 | return list_prev_entry(rq, queuelist); |
50 | } | 50 | } |
51 | 51 | ||
52 | static struct request * | 52 | static struct request * |
@@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq) | |||
56 | 56 | ||
57 | if (rq->queuelist.next == &nd->queue) | 57 | if (rq->queuelist.next == &nd->queue) |
58 | return NULL; | 58 | return NULL; |
59 | return list_entry(rq->queuelist.next, struct request, queuelist); | 59 | return list_next_entry(rq, queuelist); |
60 | } | 60 | } |
61 | 61 | ||
62 | static int noop_init_queue(struct request_queue *q, struct elevator_type *e) | 62 | static int noop_init_queue(struct request_queue *q, struct elevator_type *e) |
diff --git a/block/partitions/mac.c b/block/partitions/mac.c index c2c48ec64b27..621317ac4d59 100644 --- a/block/partitions/mac.c +++ b/block/partitions/mac.c | |||
@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) | |||
32 | Sector sect; | 32 | Sector sect; |
33 | unsigned char *data; | 33 | unsigned char *data; |
34 | int slot, blocks_in_map; | 34 | int slot, blocks_in_map; |
35 | unsigned secsize; | 35 | unsigned secsize, datasize, partoffset; |
36 | #ifdef CONFIG_PPC_PMAC | 36 | #ifdef CONFIG_PPC_PMAC |
37 | int found_root = 0; | 37 | int found_root = 0; |
38 | int found_root_goodness = 0; | 38 | int found_root_goodness = 0; |
@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) | |||
50 | } | 50 | } |
51 | secsize = be16_to_cpu(md->block_size); | 51 | secsize = be16_to_cpu(md->block_size); |
52 | put_dev_sector(sect); | 52 | put_dev_sector(sect); |
53 | data = read_part_sector(state, secsize/512, §); | 53 | datasize = round_down(secsize, 512); |
54 | data = read_part_sector(state, datasize / 512, §); | ||
54 | if (!data) | 55 | if (!data) |
55 | return -1; | 56 | return -1; |
56 | part = (struct mac_partition *) (data + secsize%512); | 57 | partoffset = secsize % 512; |
58 | if (partoffset + sizeof(*part) > datasize) | ||
59 | return -1; | ||
60 | part = (struct mac_partition *) (data + partoffset); | ||
57 | if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { | 61 | if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { |
58 | put_dev_sector(sect); | 62 | put_dev_sector(sect); |
59 | return 0; /* not a MacOS disk */ | 63 | return 0; /* not a MacOS disk */ |
diff --git a/drivers/Makefile b/drivers/Makefile index 73d039156ea7..795d0ca714bf 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/ | |||
63 | obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ | 63 | obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ |
64 | 64 | ||
65 | obj-$(CONFIG_PARPORT) += parport/ | 65 | obj-$(CONFIG_PARPORT) += parport/ |
66 | obj-$(CONFIG_NVM) += lightnvm/ | ||
66 | obj-y += base/ block/ misc/ mfd/ nfc/ | 67 | obj-y += base/ block/ misc/ mfd/ nfc/ |
67 | obj-$(CONFIG_LIBNVDIMM) += nvdimm/ | 68 | obj-$(CONFIG_LIBNVDIMM) += nvdimm/ |
68 | obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ | 69 | obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ |
@@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/ | |||
70 | obj-y += macintosh/ | 71 | obj-y += macintosh/ |
71 | obj-$(CONFIG_IDE) += ide/ | 72 | obj-$(CONFIG_IDE) += ide/ |
72 | obj-$(CONFIG_SCSI) += scsi/ | 73 | obj-$(CONFIG_SCSI) += scsi/ |
73 | obj-$(CONFIG_NVM) += lightnvm/ | ||
74 | obj-y += nvme/ | 74 | obj-y += nvme/ |
75 | obj-$(CONFIG_ATA) += ata/ | 75 | obj-$(CONFIG_ATA) += ata/ |
76 | obj-$(CONFIG_TARGET_CORE) += target/ | 76 | obj-$(CONFIG_TARGET_CORE) += target/ |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index a28a562f7b7f..3457ac8c03e2 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd) | |||
3810 | sector_t capacity; | 3810 | sector_t capacity; |
3811 | unsigned int index = 0; | 3811 | unsigned int index = 0; |
3812 | struct kobject *kobj; | 3812 | struct kobject *kobj; |
3813 | unsigned char thd_name[16]; | ||
3814 | 3813 | ||
3815 | if (dd->disk) | 3814 | if (dd->disk) |
3816 | goto skip_create_disk; /* hw init done, before rebuild */ | 3815 | goto skip_create_disk; /* hw init done, before rebuild */ |
@@ -3958,10 +3957,9 @@ skip_create_disk: | |||
3958 | } | 3957 | } |
3959 | 3958 | ||
3960 | start_service_thread: | 3959 | start_service_thread: |
3961 | sprintf(thd_name, "mtip_svc_thd_%02d", index); | ||
3962 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, | 3960 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, |
3963 | dd, dd->numa_node, "%s", | 3961 | dd, dd->numa_node, |
3964 | thd_name); | 3962 | "mtip_svc_thd_%02d", index); |
3965 | 3963 | ||
3966 | if (IS_ERR(dd->mtip_svc_handler)) { | 3964 | if (IS_ERR(dd->mtip_svc_handler)) { |
3967 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); | 3965 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 6255d1c4bba4..5c8ba5484d86 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <linux/blk-mq.h> | 9 | #include <linux/blk-mq.h> |
10 | #include <linux/hrtimer.h> | 10 | #include <linux/hrtimer.h> |
11 | #include <linux/lightnvm.h> | ||
11 | 12 | ||
12 | struct nullb_cmd { | 13 | struct nullb_cmd { |
13 | struct list_head list; | 14 | struct list_head list; |
@@ -39,12 +40,14 @@ struct nullb { | |||
39 | 40 | ||
40 | struct nullb_queue *queues; | 41 | struct nullb_queue *queues; |
41 | unsigned int nr_queues; | 42 | unsigned int nr_queues; |
43 | char disk_name[DISK_NAME_LEN]; | ||
42 | }; | 44 | }; |
43 | 45 | ||
44 | static LIST_HEAD(nullb_list); | 46 | static LIST_HEAD(nullb_list); |
45 | static struct mutex lock; | 47 | static struct mutex lock; |
46 | static int null_major; | 48 | static int null_major; |
47 | static int nullb_indexes; | 49 | static int nullb_indexes; |
50 | static struct kmem_cache *ppa_cache; | ||
48 | 51 | ||
49 | struct completion_queue { | 52 | struct completion_queue { |
50 | struct llist_head list; | 53 | struct llist_head list; |
@@ -119,6 +122,10 @@ static int nr_devices = 2; | |||
119 | module_param(nr_devices, int, S_IRUGO); | 122 | module_param(nr_devices, int, S_IRUGO); |
120 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); | 123 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); |
121 | 124 | ||
125 | static bool use_lightnvm; | ||
126 | module_param(use_lightnvm, bool, S_IRUGO); | ||
127 | MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); | ||
128 | |||
122 | static int irqmode = NULL_IRQ_SOFTIRQ; | 129 | static int irqmode = NULL_IRQ_SOFTIRQ; |
123 | 130 | ||
124 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) | 131 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) |
@@ -427,15 +434,156 @@ static void null_del_dev(struct nullb *nullb) | |||
427 | { | 434 | { |
428 | list_del_init(&nullb->list); | 435 | list_del_init(&nullb->list); |
429 | 436 | ||
430 | del_gendisk(nullb->disk); | 437 | if (use_lightnvm) |
438 | nvm_unregister(nullb->disk_name); | ||
439 | else | ||
440 | del_gendisk(nullb->disk); | ||
431 | blk_cleanup_queue(nullb->q); | 441 | blk_cleanup_queue(nullb->q); |
432 | if (queue_mode == NULL_Q_MQ) | 442 | if (queue_mode == NULL_Q_MQ) |
433 | blk_mq_free_tag_set(&nullb->tag_set); | 443 | blk_mq_free_tag_set(&nullb->tag_set); |
434 | put_disk(nullb->disk); | 444 | if (!use_lightnvm) |
445 | put_disk(nullb->disk); | ||
435 | cleanup_queues(nullb); | 446 | cleanup_queues(nullb); |
436 | kfree(nullb); | 447 | kfree(nullb); |
437 | } | 448 | } |
438 | 449 | ||
450 | #ifdef CONFIG_NVM | ||
451 | |||
452 | static void null_lnvm_end_io(struct request *rq, int error) | ||
453 | { | ||
454 | struct nvm_rq *rqd = rq->end_io_data; | ||
455 | struct nvm_dev *dev = rqd->dev; | ||
456 | |||
457 | dev->mt->end_io(rqd, error); | ||
458 | |||
459 | blk_put_request(rq); | ||
460 | } | ||
461 | |||
462 | static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) | ||
463 | { | ||
464 | struct request *rq; | ||
465 | struct bio *bio = rqd->bio; | ||
466 | |||
467 | rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0); | ||
468 | if (IS_ERR(rq)) | ||
469 | return -ENOMEM; | ||
470 | |||
471 | rq->cmd_type = REQ_TYPE_DRV_PRIV; | ||
472 | rq->__sector = bio->bi_iter.bi_sector; | ||
473 | rq->ioprio = bio_prio(bio); | ||
474 | |||
475 | if (bio_has_data(bio)) | ||
476 | rq->nr_phys_segments = bio_phys_segments(q, bio); | ||
477 | |||
478 | rq->__data_len = bio->bi_iter.bi_size; | ||
479 | rq->bio = rq->biotail = bio; | ||
480 | |||
481 | rq->end_io_data = rqd; | ||
482 | |||
483 | blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); | ||
484 | |||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | static int null_lnvm_id(struct request_queue *q, struct nvm_id *id) | ||
489 | { | ||
490 | sector_t size = gb * 1024 * 1024 * 1024ULL; | ||
491 | sector_t blksize; | ||
492 | struct nvm_id_group *grp; | ||
493 | |||
494 | id->ver_id = 0x1; | ||
495 | id->vmnt = 0; | ||
496 | id->cgrps = 1; | ||
497 | id->cap = 0x3; | ||
498 | id->dom = 0x1; | ||
499 | |||
500 | id->ppaf.blk_offset = 0; | ||
501 | id->ppaf.blk_len = 16; | ||
502 | id->ppaf.pg_offset = 16; | ||
503 | id->ppaf.pg_len = 16; | ||
504 | id->ppaf.sect_offset = 32; | ||
505 | id->ppaf.sect_len = 8; | ||
506 | id->ppaf.pln_offset = 40; | ||
507 | id->ppaf.pln_len = 8; | ||
508 | id->ppaf.lun_offset = 48; | ||
509 | id->ppaf.lun_len = 8; | ||
510 | id->ppaf.ch_offset = 56; | ||
511 | id->ppaf.ch_len = 8; | ||
512 | |||
513 | do_div(size, bs); /* convert size to pages */ | ||
514 | do_div(size, 256); /* concert size to pgs pr blk */ | ||
515 | grp = &id->groups[0]; | ||
516 | grp->mtype = 0; | ||
517 | grp->fmtype = 0; | ||
518 | grp->num_ch = 1; | ||
519 | grp->num_pg = 256; | ||
520 | blksize = size; | ||
521 | do_div(size, (1 << 16)); | ||
522 | grp->num_lun = size + 1; | ||
523 | do_div(blksize, grp->num_lun); | ||
524 | grp->num_blk = blksize; | ||
525 | grp->num_pln = 1; | ||
526 | |||
527 | grp->fpg_sz = bs; | ||
528 | grp->csecs = bs; | ||
529 | grp->trdt = 25000; | ||
530 | grp->trdm = 25000; | ||
531 | grp->tprt = 500000; | ||
532 | grp->tprm = 500000; | ||
533 | grp->tbet = 1500000; | ||
534 | grp->tbem = 1500000; | ||
535 | grp->mpos = 0x010101; /* single plane rwe */ | ||
536 | grp->cpar = hw_queue_depth; | ||
537 | |||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name) | ||
542 | { | ||
543 | mempool_t *virtmem_pool; | ||
544 | |||
545 | virtmem_pool = mempool_create_slab_pool(64, ppa_cache); | ||
546 | if (!virtmem_pool) { | ||
547 | pr_err("null_blk: Unable to create virtual memory pool\n"); | ||
548 | return NULL; | ||
549 | } | ||
550 | |||
551 | return virtmem_pool; | ||
552 | } | ||
553 | |||
554 | static void null_lnvm_destroy_dma_pool(void *pool) | ||
555 | { | ||
556 | mempool_destroy(pool); | ||
557 | } | ||
558 | |||
559 | static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool, | ||
560 | gfp_t mem_flags, dma_addr_t *dma_handler) | ||
561 | { | ||
562 | return mempool_alloc(pool, mem_flags); | ||
563 | } | ||
564 | |||
565 | static void null_lnvm_dev_dma_free(void *pool, void *entry, | ||
566 | dma_addr_t dma_handler) | ||
567 | { | ||
568 | mempool_free(entry, pool); | ||
569 | } | ||
570 | |||
571 | static struct nvm_dev_ops null_lnvm_dev_ops = { | ||
572 | .identity = null_lnvm_id, | ||
573 | .submit_io = null_lnvm_submit_io, | ||
574 | |||
575 | .create_dma_pool = null_lnvm_create_dma_pool, | ||
576 | .destroy_dma_pool = null_lnvm_destroy_dma_pool, | ||
577 | .dev_dma_alloc = null_lnvm_dev_dma_alloc, | ||
578 | .dev_dma_free = null_lnvm_dev_dma_free, | ||
579 | |||
580 | /* Simulate nvme protocol restriction */ | ||
581 | .max_phys_sect = 64, | ||
582 | }; | ||
583 | #else | ||
584 | static struct nvm_dev_ops null_lnvm_dev_ops; | ||
585 | #endif /* CONFIG_NVM */ | ||
586 | |||
439 | static int null_open(struct block_device *bdev, fmode_t mode) | 587 | static int null_open(struct block_device *bdev, fmode_t mode) |
440 | { | 588 | { |
441 | return 0; | 589 | return 0; |
@@ -575,11 +723,6 @@ static int null_add_dev(void) | |||
575 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); | 723 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); |
576 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); | 724 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); |
577 | 725 | ||
578 | disk = nullb->disk = alloc_disk_node(1, home_node); | ||
579 | if (!disk) { | ||
580 | rv = -ENOMEM; | ||
581 | goto out_cleanup_blk_queue; | ||
582 | } | ||
583 | 726 | ||
584 | mutex_lock(&lock); | 727 | mutex_lock(&lock); |
585 | list_add_tail(&nullb->list, &nullb_list); | 728 | list_add_tail(&nullb->list, &nullb_list); |
@@ -589,6 +732,21 @@ static int null_add_dev(void) | |||
589 | blk_queue_logical_block_size(nullb->q, bs); | 732 | blk_queue_logical_block_size(nullb->q, bs); |
590 | blk_queue_physical_block_size(nullb->q, bs); | 733 | blk_queue_physical_block_size(nullb->q, bs); |
591 | 734 | ||
735 | sprintf(nullb->disk_name, "nullb%d", nullb->index); | ||
736 | |||
737 | if (use_lightnvm) { | ||
738 | rv = nvm_register(nullb->q, nullb->disk_name, | ||
739 | &null_lnvm_dev_ops); | ||
740 | if (rv) | ||
741 | goto out_cleanup_blk_queue; | ||
742 | goto done; | ||
743 | } | ||
744 | |||
745 | disk = nullb->disk = alloc_disk_node(1, home_node); | ||
746 | if (!disk) { | ||
747 | rv = -ENOMEM; | ||
748 | goto out_cleanup_lightnvm; | ||
749 | } | ||
592 | size = gb * 1024 * 1024 * 1024ULL; | 750 | size = gb * 1024 * 1024 * 1024ULL; |
593 | set_capacity(disk, size >> 9); | 751 | set_capacity(disk, size >> 9); |
594 | 752 | ||
@@ -598,10 +756,15 @@ static int null_add_dev(void) | |||
598 | disk->fops = &null_fops; | 756 | disk->fops = &null_fops; |
599 | disk->private_data = nullb; | 757 | disk->private_data = nullb; |
600 | disk->queue = nullb->q; | 758 | disk->queue = nullb->q; |
601 | sprintf(disk->disk_name, "nullb%d", nullb->index); | 759 | strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); |
760 | |||
602 | add_disk(disk); | 761 | add_disk(disk); |
762 | done: | ||
603 | return 0; | 763 | return 0; |
604 | 764 | ||
765 | out_cleanup_lightnvm: | ||
766 | if (use_lightnvm) | ||
767 | nvm_unregister(nullb->disk_name); | ||
605 | out_cleanup_blk_queue: | 768 | out_cleanup_blk_queue: |
606 | blk_cleanup_queue(nullb->q); | 769 | blk_cleanup_queue(nullb->q); |
607 | out_cleanup_tags: | 770 | out_cleanup_tags: |
@@ -625,6 +788,18 @@ static int __init null_init(void) | |||
625 | bs = PAGE_SIZE; | 788 | bs = PAGE_SIZE; |
626 | } | 789 | } |
627 | 790 | ||
791 | if (use_lightnvm && bs != 4096) { | ||
792 | pr_warn("null_blk: LightNVM only supports 4k block size\n"); | ||
793 | pr_warn("null_blk: defaults block size to 4k\n"); | ||
794 | bs = 4096; | ||
795 | } | ||
796 | |||
797 | if (use_lightnvm && queue_mode != NULL_Q_MQ) { | ||
798 | pr_warn("null_blk: LightNVM only supported for blk-mq\n"); | ||
799 | pr_warn("null_blk: defaults queue mode to blk-mq\n"); | ||
800 | queue_mode = NULL_Q_MQ; | ||
801 | } | ||
802 | |||
628 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { | 803 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
629 | if (submit_queues < nr_online_nodes) { | 804 | if (submit_queues < nr_online_nodes) { |
630 | pr_warn("null_blk: submit_queues param is set to %u.", | 805 | pr_warn("null_blk: submit_queues param is set to %u.", |
@@ -655,15 +830,27 @@ static int __init null_init(void) | |||
655 | if (null_major < 0) | 830 | if (null_major < 0) |
656 | return null_major; | 831 | return null_major; |
657 | 832 | ||
833 | if (use_lightnvm) { | ||
834 | ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), | ||
835 | 0, 0, NULL); | ||
836 | if (!ppa_cache) { | ||
837 | pr_err("null_blk: unable to create ppa cache\n"); | ||
838 | return -ENOMEM; | ||
839 | } | ||
840 | } | ||
841 | |||
658 | for (i = 0; i < nr_devices; i++) { | 842 | for (i = 0; i < nr_devices; i++) { |
659 | if (null_add_dev()) { | 843 | if (null_add_dev()) { |
660 | unregister_blkdev(null_major, "nullb"); | 844 | unregister_blkdev(null_major, "nullb"); |
661 | return -EINVAL; | 845 | goto err_ppa; |
662 | } | 846 | } |
663 | } | 847 | } |
664 | 848 | ||
665 | pr_info("null: module loaded\n"); | 849 | pr_info("null: module loaded\n"); |
666 | return 0; | 850 | return 0; |
851 | err_ppa: | ||
852 | kmem_cache_destroy(ppa_cache); | ||
853 | return -EINVAL; | ||
667 | } | 854 | } |
668 | 855 | ||
669 | static void __exit null_exit(void) | 856 | static void __exit null_exit(void) |
@@ -678,6 +865,8 @@ static void __exit null_exit(void) | |||
678 | null_del_dev(nullb); | 865 | null_del_dev(nullb); |
679 | } | 866 | } |
680 | mutex_unlock(&lock); | 867 | mutex_unlock(&lock); |
868 | |||
869 | kmem_cache_destroy(ppa_cache); | ||
681 | } | 870 | } |
682 | 871 | ||
683 | module_init(null_init); | 872 | module_init(null_init); |
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index f659e605a406..5178645ac42b 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
@@ -160,11 +160,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) | |||
160 | } | 160 | } |
161 | EXPORT_SYMBOL(nvm_erase_blk); | 161 | EXPORT_SYMBOL(nvm_erase_blk); |
162 | 162 | ||
163 | static void nvm_core_free(struct nvm_dev *dev) | ||
164 | { | ||
165 | kfree(dev); | ||
166 | } | ||
167 | |||
168 | static int nvm_core_init(struct nvm_dev *dev) | 163 | static int nvm_core_init(struct nvm_dev *dev) |
169 | { | 164 | { |
170 | struct nvm_id *id = &dev->identity; | 165 | struct nvm_id *id = &dev->identity; |
@@ -179,12 +174,21 @@ static int nvm_core_init(struct nvm_dev *dev) | |||
179 | dev->sec_size = grp->csecs; | 174 | dev->sec_size = grp->csecs; |
180 | dev->oob_size = grp->sos; | 175 | dev->oob_size = grp->sos; |
181 | dev->sec_per_pg = grp->fpg_sz / grp->csecs; | 176 | dev->sec_per_pg = grp->fpg_sz / grp->csecs; |
182 | dev->addr_mode = id->ppat; | 177 | memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); |
183 | dev->addr_format = id->ppaf; | ||
184 | 178 | ||
185 | dev->plane_mode = NVM_PLANE_SINGLE; | 179 | dev->plane_mode = NVM_PLANE_SINGLE; |
186 | dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; | 180 | dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; |
187 | 181 | ||
182 | if (grp->mtype != 0) { | ||
183 | pr_err("nvm: memory type not supported\n"); | ||
184 | return -EINVAL; | ||
185 | } | ||
186 | |||
187 | if (grp->fmtype != 0 && grp->fmtype != 1) { | ||
188 | pr_err("nvm: flash type not supported\n"); | ||
189 | return -EINVAL; | ||
190 | } | ||
191 | |||
188 | if (grp->mpos & 0x020202) | 192 | if (grp->mpos & 0x020202) |
189 | dev->plane_mode = NVM_PLANE_DOUBLE; | 193 | dev->plane_mode = NVM_PLANE_DOUBLE; |
190 | if (grp->mpos & 0x040404) | 194 | if (grp->mpos & 0x040404) |
@@ -213,21 +217,18 @@ static void nvm_free(struct nvm_dev *dev) | |||
213 | 217 | ||
214 | if (dev->mt) | 218 | if (dev->mt) |
215 | dev->mt->unregister_mgr(dev); | 219 | dev->mt->unregister_mgr(dev); |
216 | |||
217 | nvm_core_free(dev); | ||
218 | } | 220 | } |
219 | 221 | ||
220 | static int nvm_init(struct nvm_dev *dev) | 222 | static int nvm_init(struct nvm_dev *dev) |
221 | { | 223 | { |
222 | struct nvmm_type *mt; | 224 | struct nvmm_type *mt; |
223 | int ret = 0; | 225 | int ret = -EINVAL; |
224 | 226 | ||
225 | if (!dev->q || !dev->ops) | 227 | if (!dev->q || !dev->ops) |
226 | return -EINVAL; | 228 | return ret; |
227 | 229 | ||
228 | if (dev->ops->identity(dev->q, &dev->identity)) { | 230 | if (dev->ops->identity(dev->q, &dev->identity)) { |
229 | pr_err("nvm: device could not be identified\n"); | 231 | pr_err("nvm: device could not be identified\n"); |
230 | ret = -EINVAL; | ||
231 | goto err; | 232 | goto err; |
232 | } | 233 | } |
233 | 234 | ||
@@ -273,7 +274,6 @@ static int nvm_init(struct nvm_dev *dev) | |||
273 | dev->nr_chnls); | 274 | dev->nr_chnls); |
274 | return 0; | 275 | return 0; |
275 | err: | 276 | err: |
276 | nvm_free(dev); | ||
277 | pr_err("nvm: failed to initialize nvm\n"); | 277 | pr_err("nvm: failed to initialize nvm\n"); |
278 | return ret; | 278 | return ret; |
279 | } | 279 | } |
@@ -308,22 +308,24 @@ int nvm_register(struct request_queue *q, char *disk_name, | |||
308 | if (ret) | 308 | if (ret) |
309 | goto err_init; | 309 | goto err_init; |
310 | 310 | ||
311 | down_write(&nvm_lock); | ||
312 | list_add(&dev->devices, &nvm_devices); | ||
313 | up_write(&nvm_lock); | ||
314 | |||
315 | if (dev->ops->max_phys_sect > 1) { | 311 | if (dev->ops->max_phys_sect > 1) { |
316 | dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, | 312 | dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, |
317 | "ppalist"); | 313 | "ppalist"); |
318 | if (!dev->ppalist_pool) { | 314 | if (!dev->ppalist_pool) { |
319 | pr_err("nvm: could not create ppa pool\n"); | 315 | pr_err("nvm: could not create ppa pool\n"); |
320 | return -ENOMEM; | 316 | ret = -ENOMEM; |
317 | goto err_init; | ||
321 | } | 318 | } |
322 | } else if (dev->ops->max_phys_sect > 256) { | 319 | } else if (dev->ops->max_phys_sect > 256) { |
323 | pr_info("nvm: max sectors supported is 256.\n"); | 320 | pr_info("nvm: max sectors supported is 256.\n"); |
324 | return -EINVAL; | 321 | ret = -EINVAL; |
322 | goto err_init; | ||
325 | } | 323 | } |
326 | 324 | ||
325 | down_write(&nvm_lock); | ||
326 | list_add(&dev->devices, &nvm_devices); | ||
327 | up_write(&nvm_lock); | ||
328 | |||
327 | return 0; | 329 | return 0; |
328 | err_init: | 330 | err_init: |
329 | kfree(dev); | 331 | kfree(dev); |
@@ -341,11 +343,12 @@ void nvm_unregister(char *disk_name) | |||
341 | return; | 343 | return; |
342 | } | 344 | } |
343 | 345 | ||
344 | nvm_exit(dev); | ||
345 | |||
346 | down_write(&nvm_lock); | 346 | down_write(&nvm_lock); |
347 | list_del(&dev->devices); | 347 | list_del(&dev->devices); |
348 | up_write(&nvm_lock); | 348 | up_write(&nvm_lock); |
349 | |||
350 | nvm_exit(dev); | ||
351 | kfree(dev); | ||
349 | } | 352 | } |
350 | EXPORT_SYMBOL(nvm_unregister); | 353 | EXPORT_SYMBOL(nvm_unregister); |
351 | 354 | ||
@@ -457,11 +460,11 @@ static void nvm_remove_target(struct nvm_target *t) | |||
457 | lockdep_assert_held(&nvm_lock); | 460 | lockdep_assert_held(&nvm_lock); |
458 | 461 | ||
459 | del_gendisk(tdisk); | 462 | del_gendisk(tdisk); |
463 | blk_cleanup_queue(q); | ||
464 | |||
460 | if (tt->exit) | 465 | if (tt->exit) |
461 | tt->exit(tdisk->private_data); | 466 | tt->exit(tdisk->private_data); |
462 | 467 | ||
463 | blk_cleanup_queue(q); | ||
464 | |||
465 | put_disk(tdisk); | 468 | put_disk(tdisk); |
466 | 469 | ||
467 | list_del(&t->list); | 470 | list_del(&t->list); |
@@ -541,7 +544,7 @@ static int nvm_configure_show(const char *val) | |||
541 | if (!dev->mt) | 544 | if (!dev->mt) |
542 | return 0; | 545 | return 0; |
543 | 546 | ||
544 | dev->mt->free_blocks_print(dev); | 547 | dev->mt->lun_info_print(dev); |
545 | 548 | ||
546 | return 0; | 549 | return 0; |
547 | } | 550 | } |
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index ae1fb2bdc5f4..e20e74ec6b91 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c | |||
@@ -60,23 +60,28 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) | |||
60 | lun->vlun.lun_id = i % dev->luns_per_chnl; | 60 | lun->vlun.lun_id = i % dev->luns_per_chnl; |
61 | lun->vlun.chnl_id = i / dev->luns_per_chnl; | 61 | lun->vlun.chnl_id = i / dev->luns_per_chnl; |
62 | lun->vlun.nr_free_blocks = dev->blks_per_lun; | 62 | lun->vlun.nr_free_blocks = dev->blks_per_lun; |
63 | lun->vlun.nr_inuse_blocks = 0; | ||
64 | lun->vlun.nr_bad_blocks = 0; | ||
63 | } | 65 | } |
64 | return 0; | 66 | return 0; |
65 | } | 67 | } |
66 | 68 | ||
67 | static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, | 69 | static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, |
68 | void *private) | 70 | void *private) |
69 | { | 71 | { |
70 | struct gen_nvm *gn = private; | 72 | struct gen_nvm *gn = private; |
71 | struct gen_lun *lun = &gn->luns[lun_id]; | 73 | struct nvm_dev *dev = gn->dev; |
74 | struct gen_lun *lun; | ||
72 | struct nvm_block *blk; | 75 | struct nvm_block *blk; |
73 | int i; | 76 | int i; |
74 | 77 | ||
75 | if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) | 78 | ppa = dev_to_generic_addr(gn->dev, ppa); |
76 | return 0; | 79 | lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; |
80 | |||
81 | for (i = 0; i < nr_blocks; i++) { | ||
82 | if (blks[i] == 0) | ||
83 | continue; | ||
77 | 84 | ||
78 | i = -1; | ||
79 | while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) { | ||
80 | blk = &lun->vlun.blocks[i]; | 85 | blk = &lun->vlun.blocks[i]; |
81 | if (!blk) { | 86 | if (!blk) { |
82 | pr_err("gennvm: BB data is out of bounds.\n"); | 87 | pr_err("gennvm: BB data is out of bounds.\n"); |
@@ -84,6 +89,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, | |||
84 | } | 89 | } |
85 | 90 | ||
86 | list_move_tail(&blk->list, &lun->bb_list); | 91 | list_move_tail(&blk->list, &lun->bb_list); |
92 | lun->vlun.nr_bad_blocks++; | ||
87 | } | 93 | } |
88 | 94 | ||
89 | return 0; | 95 | return 0; |
@@ -136,6 +142,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) | |||
136 | list_move_tail(&blk->list, &lun->used_list); | 142 | list_move_tail(&blk->list, &lun->used_list); |
137 | blk->type = 1; | 143 | blk->type = 1; |
138 | lun->vlun.nr_free_blocks--; | 144 | lun->vlun.nr_free_blocks--; |
145 | lun->vlun.nr_inuse_blocks++; | ||
139 | } | 146 | } |
140 | } | 147 | } |
141 | 148 | ||
@@ -164,15 +171,25 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) | |||
164 | block->id = cur_block_id++; | 171 | block->id = cur_block_id++; |
165 | 172 | ||
166 | /* First block is reserved for device */ | 173 | /* First block is reserved for device */ |
167 | if (unlikely(lun_iter == 0 && blk_iter == 0)) | 174 | if (unlikely(lun_iter == 0 && blk_iter == 0)) { |
175 | lun->vlun.nr_free_blocks--; | ||
168 | continue; | 176 | continue; |
177 | } | ||
169 | 178 | ||
170 | list_add_tail(&block->list, &lun->free_list); | 179 | list_add_tail(&block->list, &lun->free_list); |
171 | } | 180 | } |
172 | 181 | ||
173 | if (dev->ops->get_bb_tbl) { | 182 | if (dev->ops->get_bb_tbl) { |
174 | ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, | 183 | struct ppa_addr ppa; |
175 | dev->blks_per_lun, gennvm_block_bb, gn); | 184 | |
185 | ppa.ppa = 0; | ||
186 | ppa.g.ch = lun->vlun.chnl_id; | ||
187 | ppa.g.lun = lun->vlun.id; | ||
188 | ppa = generic_to_dev_addr(dev, ppa); | ||
189 | |||
190 | ret = dev->ops->get_bb_tbl(dev->q, ppa, | ||
191 | dev->blks_per_lun, | ||
192 | gennvm_block_bb, gn); | ||
176 | if (ret) | 193 | if (ret) |
177 | pr_err("gennvm: could not read BB table\n"); | 194 | pr_err("gennvm: could not read BB table\n"); |
178 | } | 195 | } |
@@ -199,6 +216,7 @@ static int gennvm_register(struct nvm_dev *dev) | |||
199 | if (!gn) | 216 | if (!gn) |
200 | return -ENOMEM; | 217 | return -ENOMEM; |
201 | 218 | ||
219 | gn->dev = dev; | ||
202 | gn->nr_luns = dev->nr_luns; | 220 | gn->nr_luns = dev->nr_luns; |
203 | dev->mp = gn; | 221 | dev->mp = gn; |
204 | 222 | ||
@@ -254,6 +272,7 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, | |||
254 | blk->type = 1; | 272 | blk->type = 1; |
255 | 273 | ||
256 | lun->vlun.nr_free_blocks--; | 274 | lun->vlun.nr_free_blocks--; |
275 | lun->vlun.nr_inuse_blocks++; | ||
257 | 276 | ||
258 | spin_unlock(&vlun->lock); | 277 | spin_unlock(&vlun->lock); |
259 | out: | 278 | out: |
@@ -271,16 +290,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) | |||
271 | case 1: | 290 | case 1: |
272 | list_move_tail(&blk->list, &lun->free_list); | 291 | list_move_tail(&blk->list, &lun->free_list); |
273 | lun->vlun.nr_free_blocks++; | 292 | lun->vlun.nr_free_blocks++; |
293 | lun->vlun.nr_inuse_blocks--; | ||
274 | blk->type = 0; | 294 | blk->type = 0; |
275 | break; | 295 | break; |
276 | case 2: | 296 | case 2: |
277 | list_move_tail(&blk->list, &lun->bb_list); | 297 | list_move_tail(&blk->list, &lun->bb_list); |
298 | lun->vlun.nr_bad_blocks++; | ||
299 | lun->vlun.nr_inuse_blocks--; | ||
278 | break; | 300 | break; |
279 | default: | 301 | default: |
280 | WARN_ON_ONCE(1); | 302 | WARN_ON_ONCE(1); |
281 | pr_err("gennvm: erroneous block type (%lu -> %u)\n", | 303 | pr_err("gennvm: erroneous block type (%lu -> %u)\n", |
282 | blk->id, blk->type); | 304 | blk->id, blk->type); |
283 | list_move_tail(&blk->list, &lun->bb_list); | 305 | list_move_tail(&blk->list, &lun->bb_list); |
306 | lun->vlun.nr_bad_blocks++; | ||
307 | lun->vlun.nr_inuse_blocks--; | ||
284 | } | 308 | } |
285 | 309 | ||
286 | spin_unlock(&vlun->lock); | 310 | spin_unlock(&vlun->lock); |
@@ -292,10 +316,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) | |||
292 | 316 | ||
293 | if (rqd->nr_pages > 1) { | 317 | if (rqd->nr_pages > 1) { |
294 | for (i = 0; i < rqd->nr_pages; i++) | 318 | for (i = 0; i < rqd->nr_pages; i++) |
295 | rqd->ppa_list[i] = addr_to_generic_mode(dev, | 319 | rqd->ppa_list[i] = dev_to_generic_addr(dev, |
296 | rqd->ppa_list[i]); | 320 | rqd->ppa_list[i]); |
297 | } else { | 321 | } else { |
298 | rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); | 322 | rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); |
299 | } | 323 | } |
300 | } | 324 | } |
301 | 325 | ||
@@ -305,10 +329,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) | |||
305 | 329 | ||
306 | if (rqd->nr_pages > 1) { | 330 | if (rqd->nr_pages > 1) { |
307 | for (i = 0; i < rqd->nr_pages; i++) | 331 | for (i = 0; i < rqd->nr_pages; i++) |
308 | rqd->ppa_list[i] = generic_to_addr_mode(dev, | 332 | rqd->ppa_list[i] = generic_to_dev_addr(dev, |
309 | rqd->ppa_list[i]); | 333 | rqd->ppa_list[i]); |
310 | } else { | 334 | } else { |
311 | rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); | 335 | rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); |
312 | } | 336 | } |
313 | } | 337 | } |
314 | 338 | ||
@@ -354,10 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) | |||
354 | { | 378 | { |
355 | int i; | 379 | int i; |
356 | 380 | ||
357 | if (!dev->ops->set_bb) | 381 | if (!dev->ops->set_bb_tbl) |
358 | return; | 382 | return; |
359 | 383 | ||
360 | if (dev->ops->set_bb(dev->q, rqd, 1)) | 384 | if (dev->ops->set_bb_tbl(dev->q, rqd, 1)) |
361 | return; | 385 | return; |
362 | 386 | ||
363 | gennvm_addr_to_generic_mode(dev, rqd); | 387 | gennvm_addr_to_generic_mode(dev, rqd); |
@@ -440,15 +464,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) | |||
440 | return &gn->luns[lunid].vlun; | 464 | return &gn->luns[lunid].vlun; |
441 | } | 465 | } |
442 | 466 | ||
443 | static void gennvm_free_blocks_print(struct nvm_dev *dev) | 467 | static void gennvm_lun_info_print(struct nvm_dev *dev) |
444 | { | 468 | { |
445 | struct gen_nvm *gn = dev->mp; | 469 | struct gen_nvm *gn = dev->mp; |
446 | struct gen_lun *lun; | 470 | struct gen_lun *lun; |
447 | unsigned int i; | 471 | unsigned int i; |
448 | 472 | ||
449 | gennvm_for_each_lun(gn, lun, i) | 473 | |
450 | pr_info("%s: lun%8u\t%u\n", | 474 | gennvm_for_each_lun(gn, lun, i) { |
451 | dev->name, i, lun->vlun.nr_free_blocks); | 475 | spin_lock(&lun->vlun.lock); |
476 | |||
477 | pr_info("%s: lun%8u\t%u\t%u\t%u\n", | ||
478 | dev->name, i, | ||
479 | lun->vlun.nr_free_blocks, | ||
480 | lun->vlun.nr_inuse_blocks, | ||
481 | lun->vlun.nr_bad_blocks); | ||
482 | |||
483 | spin_unlock(&lun->vlun.lock); | ||
484 | } | ||
452 | } | 485 | } |
453 | 486 | ||
454 | static struct nvmm_type gennvm = { | 487 | static struct nvmm_type gennvm = { |
@@ -466,7 +499,7 @@ static struct nvmm_type gennvm = { | |||
466 | .erase_blk = gennvm_erase_blk, | 499 | .erase_blk = gennvm_erase_blk, |
467 | 500 | ||
468 | .get_lun = gennvm_get_lun, | 501 | .get_lun = gennvm_get_lun, |
469 | .free_blocks_print = gennvm_free_blocks_print, | 502 | .lun_info_print = gennvm_lun_info_print, |
470 | }; | 503 | }; |
471 | 504 | ||
472 | static int __init gennvm_module_init(void) | 505 | static int __init gennvm_module_init(void) |
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h index d23bd3501ddc..9c24b5b32dac 100644 --- a/drivers/lightnvm/gennvm.h +++ b/drivers/lightnvm/gennvm.h | |||
@@ -35,6 +35,8 @@ struct gen_lun { | |||
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct gen_nvm { | 37 | struct gen_nvm { |
38 | struct nvm_dev *dev; | ||
39 | |||
38 | int nr_luns; | 40 | int nr_luns; |
39 | struct gen_lun *luns; | 41 | struct gen_lun *luns; |
40 | }; | 42 | }; |
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 7ba64c87ba1c..75e59c3a3f96 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c | |||
@@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) | |||
123 | return blk->id * rrpc->dev->pgs_per_blk; | 123 | return blk->id * rrpc->dev->pgs_per_blk; |
124 | } | 124 | } |
125 | 125 | ||
126 | static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, | ||
127 | struct ppa_addr r) | ||
128 | { | ||
129 | struct ppa_addr l; | ||
130 | int secs, pgs, blks, luns; | ||
131 | sector_t ppa = r.ppa; | ||
132 | |||
133 | l.ppa = 0; | ||
134 | |||
135 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | ||
136 | l.g.sec = secs; | ||
137 | |||
138 | sector_div(ppa, dev->sec_per_pg); | ||
139 | div_u64_rem(ppa, dev->sec_per_blk, &pgs); | ||
140 | l.g.pg = pgs; | ||
141 | |||
142 | sector_div(ppa, dev->pgs_per_blk); | ||
143 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | ||
144 | l.g.blk = blks; | ||
145 | |||
146 | sector_div(ppa, dev->blks_per_lun); | ||
147 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | ||
148 | l.g.lun = luns; | ||
149 | |||
150 | sector_div(ppa, dev->luns_per_chnl); | ||
151 | l.g.ch = ppa; | ||
152 | |||
153 | return l; | ||
154 | } | ||
155 | |||
126 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) | 156 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) |
127 | { | 157 | { |
128 | struct ppa_addr paddr; | 158 | struct ppa_addr paddr; |
129 | 159 | ||
130 | paddr.ppa = addr; | 160 | paddr.ppa = addr; |
131 | return __linear_to_generic_addr(dev, paddr); | 161 | return linear_to_generic_addr(dev, paddr); |
132 | } | 162 | } |
133 | 163 | ||
134 | /* requires lun->lock taken */ | 164 | /* requires lun->lock taken */ |
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index e0b7b95813bc..9202d1a468d0 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c | |||
@@ -93,7 +93,7 @@ struct nvme_nvm_l2ptbl { | |||
93 | __le16 cdw14[6]; | 93 | __le16 cdw14[6]; |
94 | }; | 94 | }; |
95 | 95 | ||
96 | struct nvme_nvm_bbtbl { | 96 | struct nvme_nvm_getbbtbl { |
97 | __u8 opcode; | 97 | __u8 opcode; |
98 | __u8 flags; | 98 | __u8 flags; |
99 | __u16 command_id; | 99 | __u16 command_id; |
@@ -101,10 +101,23 @@ struct nvme_nvm_bbtbl { | |||
101 | __u64 rsvd[2]; | 101 | __u64 rsvd[2]; |
102 | __le64 prp1; | 102 | __le64 prp1; |
103 | __le64 prp2; | 103 | __le64 prp2; |
104 | __le32 prp1_len; | 104 | __le64 spba; |
105 | __le32 prp2_len; | 105 | __u32 rsvd4[4]; |
106 | __le32 lbb; | 106 | }; |
107 | __u32 rsvd11[3]; | 107 | |
108 | struct nvme_nvm_setbbtbl { | ||
109 | __u8 opcode; | ||
110 | __u8 flags; | ||
111 | __u16 command_id; | ||
112 | __le32 nsid; | ||
113 | __le64 rsvd[2]; | ||
114 | __le64 prp1; | ||
115 | __le64 prp2; | ||
116 | __le64 spba; | ||
117 | __le16 nlb; | ||
118 | __u8 value; | ||
119 | __u8 rsvd3; | ||
120 | __u32 rsvd4[3]; | ||
108 | }; | 121 | }; |
109 | 122 | ||
110 | struct nvme_nvm_erase_blk { | 123 | struct nvme_nvm_erase_blk { |
@@ -129,8 +142,8 @@ struct nvme_nvm_command { | |||
129 | struct nvme_nvm_hb_rw hb_rw; | 142 | struct nvme_nvm_hb_rw hb_rw; |
130 | struct nvme_nvm_ph_rw ph_rw; | 143 | struct nvme_nvm_ph_rw ph_rw; |
131 | struct nvme_nvm_l2ptbl l2p; | 144 | struct nvme_nvm_l2ptbl l2p; |
132 | struct nvme_nvm_bbtbl get_bb; | 145 | struct nvme_nvm_getbbtbl get_bb; |
133 | struct nvme_nvm_bbtbl set_bb; | 146 | struct nvme_nvm_setbbtbl set_bb; |
134 | struct nvme_nvm_erase_blk erase; | 147 | struct nvme_nvm_erase_blk erase; |
135 | }; | 148 | }; |
136 | }; | 149 | }; |
@@ -142,11 +155,13 @@ struct nvme_nvm_id_group { | |||
142 | __u8 num_ch; | 155 | __u8 num_ch; |
143 | __u8 num_lun; | 156 | __u8 num_lun; |
144 | __u8 num_pln; | 157 | __u8 num_pln; |
158 | __u8 rsvd1; | ||
145 | __le16 num_blk; | 159 | __le16 num_blk; |
146 | __le16 num_pg; | 160 | __le16 num_pg; |
147 | __le16 fpg_sz; | 161 | __le16 fpg_sz; |
148 | __le16 csecs; | 162 | __le16 csecs; |
149 | __le16 sos; | 163 | __le16 sos; |
164 | __le16 rsvd2; | ||
150 | __le32 trdt; | 165 | __le32 trdt; |
151 | __le32 trdm; | 166 | __le32 trdm; |
152 | __le32 tprt; | 167 | __le32 tprt; |
@@ -154,8 +169,9 @@ struct nvme_nvm_id_group { | |||
154 | __le32 tbet; | 169 | __le32 tbet; |
155 | __le32 tbem; | 170 | __le32 tbem; |
156 | __le32 mpos; | 171 | __le32 mpos; |
172 | __le32 mccap; | ||
157 | __le16 cpar; | 173 | __le16 cpar; |
158 | __u8 reserved[913]; | 174 | __u8 reserved[906]; |
159 | } __packed; | 175 | } __packed; |
160 | 176 | ||
161 | struct nvme_nvm_addr_format { | 177 | struct nvme_nvm_addr_format { |
@@ -178,15 +194,28 @@ struct nvme_nvm_id { | |||
178 | __u8 ver_id; | 194 | __u8 ver_id; |
179 | __u8 vmnt; | 195 | __u8 vmnt; |
180 | __u8 cgrps; | 196 | __u8 cgrps; |
181 | __u8 res[5]; | 197 | __u8 res; |
182 | __le32 cap; | 198 | __le32 cap; |
183 | __le32 dom; | 199 | __le32 dom; |
184 | struct nvme_nvm_addr_format ppaf; | 200 | struct nvme_nvm_addr_format ppaf; |
185 | __u8 ppat; | 201 | __u8 resv[228]; |
186 | __u8 resv[223]; | ||
187 | struct nvme_nvm_id_group groups[4]; | 202 | struct nvme_nvm_id_group groups[4]; |
188 | } __packed; | 203 | } __packed; |
189 | 204 | ||
205 | struct nvme_nvm_bb_tbl { | ||
206 | __u8 tblid[4]; | ||
207 | __le16 verid; | ||
208 | __le16 revid; | ||
209 | __le32 rvsd1; | ||
210 | __le32 tblks; | ||
211 | __le32 tfact; | ||
212 | __le32 tgrown; | ||
213 | __le32 tdresv; | ||
214 | __le32 thresv; | ||
215 | __le32 rsvd2[8]; | ||
216 | __u8 blk[0]; | ||
217 | }; | ||
218 | |||
190 | /* | 219 | /* |
191 | * Check we didn't inadvertently grow the command struct | 220 | * Check we didn't inadvertently grow the command struct |
192 | */ | 221 | */ |
@@ -195,12 +224,14 @@ static inline void _nvme_nvm_check_size(void) | |||
195 | BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); | 224 | BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); |
196 | BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); | 225 | BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); |
197 | BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); | 226 | BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); |
198 | BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); | 227 | BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64); |
228 | BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64); | ||
199 | BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); | 229 | BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); |
200 | BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); | 230 | BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); |
201 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); | 231 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); |
202 | BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); | 232 | BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); |
203 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); | 233 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); |
234 | BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512); | ||
204 | } | 235 | } |
205 | 236 | ||
206 | static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | 237 | static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) |
@@ -234,6 +265,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | |||
234 | dst->tbet = le32_to_cpu(src->tbet); | 265 | dst->tbet = le32_to_cpu(src->tbet); |
235 | dst->tbem = le32_to_cpu(src->tbem); | 266 | dst->tbem = le32_to_cpu(src->tbem); |
236 | dst->mpos = le32_to_cpu(src->mpos); | 267 | dst->mpos = le32_to_cpu(src->mpos); |
268 | dst->mccap = le32_to_cpu(src->mccap); | ||
237 | 269 | ||
238 | dst->cpar = le16_to_cpu(src->cpar); | 270 | dst->cpar = le16_to_cpu(src->cpar); |
239 | } | 271 | } |
@@ -244,6 +276,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | |||
244 | static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | 276 | static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) |
245 | { | 277 | { |
246 | struct nvme_ns *ns = q->queuedata; | 278 | struct nvme_ns *ns = q->queuedata; |
279 | struct nvme_dev *dev = ns->dev; | ||
247 | struct nvme_nvm_id *nvme_nvm_id; | 280 | struct nvme_nvm_id *nvme_nvm_id; |
248 | struct nvme_nvm_command c = {}; | 281 | struct nvme_nvm_command c = {}; |
249 | int ret; | 282 | int ret; |
@@ -256,8 +289,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | |||
256 | if (!nvme_nvm_id) | 289 | if (!nvme_nvm_id) |
257 | return -ENOMEM; | 290 | return -ENOMEM; |
258 | 291 | ||
259 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, | 292 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, |
260 | sizeof(struct nvme_nvm_id)); | 293 | nvme_nvm_id, sizeof(struct nvme_nvm_id)); |
261 | if (ret) { | 294 | if (ret) { |
262 | ret = -EIO; | 295 | ret = -EIO; |
263 | goto out; | 296 | goto out; |
@@ -268,6 +301,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | |||
268 | nvm_id->cgrps = nvme_nvm_id->cgrps; | 301 | nvm_id->cgrps = nvme_nvm_id->cgrps; |
269 | nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); | 302 | nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); |
270 | nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); | 303 | nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); |
304 | memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, | ||
305 | sizeof(struct nvme_nvm_addr_format)); | ||
271 | 306 | ||
272 | ret = init_grps(nvm_id, nvme_nvm_id); | 307 | ret = init_grps(nvm_id, nvme_nvm_id); |
273 | out: | 308 | out: |
@@ -281,7 +316,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, | |||
281 | struct nvme_ns *ns = q->queuedata; | 316 | struct nvme_ns *ns = q->queuedata; |
282 | struct nvme_dev *dev = ns->dev; | 317 | struct nvme_dev *dev = ns->dev; |
283 | struct nvme_nvm_command c = {}; | 318 | struct nvme_nvm_command c = {}; |
284 | u32 len = queue_max_hw_sectors(q) << 9; | 319 | u32 len = queue_max_hw_sectors(dev->admin_q) << 9; |
285 | u32 nlb_pr_rq = len / sizeof(u64); | 320 | u32 nlb_pr_rq = len / sizeof(u64); |
286 | u64 cmd_slba = slba; | 321 | u64 cmd_slba = slba; |
287 | void *entries; | 322 | void *entries; |
@@ -299,8 +334,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, | |||
299 | c.l2p.slba = cpu_to_le64(cmd_slba); | 334 | c.l2p.slba = cpu_to_le64(cmd_slba); |
300 | c.l2p.nlb = cpu_to_le32(cmd_nlb); | 335 | c.l2p.nlb = cpu_to_le32(cmd_nlb); |
301 | 336 | ||
302 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, | 337 | ret = nvme_submit_sync_cmd(dev->admin_q, |
303 | entries, len); | 338 | (struct nvme_command *)&c, entries, len); |
304 | if (ret) { | 339 | if (ret) { |
305 | dev_err(dev->dev, "L2P table transfer failed (%d)\n", | 340 | dev_err(dev->dev, "L2P table transfer failed (%d)\n", |
306 | ret); | 341 | ret); |
@@ -322,43 +357,82 @@ out: | |||
322 | return ret; | 357 | return ret; |
323 | } | 358 | } |
324 | 359 | ||
325 | static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, | 360 | static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa, |
326 | unsigned int nr_blocks, | 361 | int nr_blocks, nvm_bb_update_fn *update_bbtbl, |
327 | nvm_bb_update_fn *update_bbtbl, void *priv) | 362 | void *priv) |
328 | { | 363 | { |
329 | struct nvme_ns *ns = q->queuedata; | 364 | struct nvme_ns *ns = q->queuedata; |
330 | struct nvme_dev *dev = ns->dev; | 365 | struct nvme_dev *dev = ns->dev; |
331 | struct nvme_nvm_command c = {}; | 366 | struct nvme_nvm_command c = {}; |
332 | void *bb_bitmap; | 367 | struct nvme_nvm_bb_tbl *bb_tbl; |
333 | u16 bb_bitmap_size; | 368 | int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; |
334 | int ret = 0; | 369 | int ret = 0; |
335 | 370 | ||
336 | c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; | 371 | c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; |
337 | c.get_bb.nsid = cpu_to_le32(ns->ns_id); | 372 | c.get_bb.nsid = cpu_to_le32(ns->ns_id); |
338 | c.get_bb.lbb = cpu_to_le32(lunid); | 373 | c.get_bb.spba = cpu_to_le64(ppa.ppa); |
339 | bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE; | ||
340 | bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL); | ||
341 | if (!bb_bitmap) | ||
342 | return -ENOMEM; | ||
343 | 374 | ||
344 | bitmap_zero(bb_bitmap, nr_blocks); | 375 | bb_tbl = kzalloc(tblsz, GFP_KERNEL); |
376 | if (!bb_tbl) | ||
377 | return -ENOMEM; | ||
345 | 378 | ||
346 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, | 379 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, |
347 | bb_bitmap_size); | 380 | bb_tbl, tblsz); |
348 | if (ret) { | 381 | if (ret) { |
349 | dev_err(dev->dev, "get bad block table failed (%d)\n", ret); | 382 | dev_err(dev->dev, "get bad block table failed (%d)\n", ret); |
350 | ret = -EIO; | 383 | ret = -EIO; |
351 | goto out; | 384 | goto out; |
352 | } | 385 | } |
353 | 386 | ||
354 | ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); | 387 | if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' || |
388 | bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') { | ||
389 | dev_err(dev->dev, "bbt format mismatch\n"); | ||
390 | ret = -EINVAL; | ||
391 | goto out; | ||
392 | } | ||
393 | |||
394 | if (le16_to_cpu(bb_tbl->verid) != 1) { | ||
395 | ret = -EINVAL; | ||
396 | dev_err(dev->dev, "bbt version not supported\n"); | ||
397 | goto out; | ||
398 | } | ||
399 | |||
400 | if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { | ||
401 | ret = -EINVAL; | ||
402 | dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)", | ||
403 | le32_to_cpu(bb_tbl->tblks), nr_blocks); | ||
404 | goto out; | ||
405 | } | ||
406 | |||
407 | ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); | ||
355 | if (ret) { | 408 | if (ret) { |
356 | ret = -EINTR; | 409 | ret = -EINTR; |
357 | goto out; | 410 | goto out; |
358 | } | 411 | } |
359 | 412 | ||
360 | out: | 413 | out: |
361 | kfree(bb_bitmap); | 414 | kfree(bb_tbl); |
415 | return ret; | ||
416 | } | ||
417 | |||
418 | static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, | ||
419 | int type) | ||
420 | { | ||
421 | struct nvme_ns *ns = q->queuedata; | ||
422 | struct nvme_dev *dev = ns->dev; | ||
423 | struct nvme_nvm_command c = {}; | ||
424 | int ret = 0; | ||
425 | |||
426 | c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl; | ||
427 | c.set_bb.nsid = cpu_to_le32(ns->ns_id); | ||
428 | c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa); | ||
429 | c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); | ||
430 | c.set_bb.value = type; | ||
431 | |||
432 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, | ||
433 | NULL, 0); | ||
434 | if (ret) | ||
435 | dev_err(dev->dev, "set bad block table failed (%d)\n", ret); | ||
362 | return ret; | 436 | return ret; |
363 | } | 437 | } |
364 | 438 | ||
@@ -474,6 +548,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { | |||
474 | .get_l2p_tbl = nvme_nvm_get_l2p_tbl, | 548 | .get_l2p_tbl = nvme_nvm_get_l2p_tbl, |
475 | 549 | ||
476 | .get_bb_tbl = nvme_nvm_get_bb_tbl, | 550 | .get_bb_tbl = nvme_nvm_get_bb_tbl, |
551 | .set_bb_tbl = nvme_nvm_set_bb_tbl, | ||
477 | 552 | ||
478 | .submit_io = nvme_nvm_submit_io, | 553 | .submit_io = nvme_nvm_submit_io, |
479 | .erase_block = nvme_nvm_erase_block, | 554 | .erase_block = nvme_nvm_erase_block, |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8187df204695..930042fa2d69 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -968,7 +968,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) | |||
968 | if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) | 968 | if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) |
969 | return; | 969 | return; |
970 | 970 | ||
971 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); | 971 | if (likely(nvmeq->cq_vector >= 0)) |
972 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); | ||
972 | nvmeq->cq_head = head; | 973 | nvmeq->cq_head = head; |
973 | nvmeq->cq_phase = phase; | 974 | nvmeq->cq_phase = phase; |
974 | 975 | ||
@@ -2268,7 +2269,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) | |||
2268 | if (dev->max_hw_sectors) { | 2269 | if (dev->max_hw_sectors) { |
2269 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); | 2270 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); |
2270 | blk_queue_max_segments(ns->queue, | 2271 | blk_queue_max_segments(ns->queue, |
2271 | ((dev->max_hw_sectors << 9) / dev->page_size) + 1); | 2272 | (dev->max_hw_sectors / (dev->page_size >> 9)) + 1); |
2272 | } | 2273 | } |
2273 | if (dev->stripe_size) | 2274 | if (dev->stripe_size) |
2274 | blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); | 2275 | blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); |
@@ -2787,6 +2788,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq) | |||
2787 | { | 2788 | { |
2788 | struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; | 2789 | struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; |
2789 | nvme_put_dq(dq); | 2790 | nvme_put_dq(dq); |
2791 | |||
2792 | spin_lock_irq(&nvmeq->q_lock); | ||
2793 | nvme_process_cq(nvmeq); | ||
2794 | spin_unlock_irq(&nvmeq->q_lock); | ||
2790 | } | 2795 | } |
2791 | 2796 | ||
2792 | static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, | 2797 | static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, |
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 69c9057e1ab8..3db5552b17d5 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h | |||
@@ -58,7 +58,6 @@ enum { | |||
58 | struct nvm_id_group { | 58 | struct nvm_id_group { |
59 | u8 mtype; | 59 | u8 mtype; |
60 | u8 fmtype; | 60 | u8 fmtype; |
61 | u16 res16; | ||
62 | u8 num_ch; | 61 | u8 num_ch; |
63 | u8 num_lun; | 62 | u8 num_lun; |
64 | u8 num_pln; | 63 | u8 num_pln; |
@@ -74,9 +73,9 @@ struct nvm_id_group { | |||
74 | u32 tbet; | 73 | u32 tbet; |
75 | u32 tbem; | 74 | u32 tbem; |
76 | u32 mpos; | 75 | u32 mpos; |
76 | u32 mccap; | ||
77 | u16 cpar; | 77 | u16 cpar; |
78 | u8 res[913]; | 78 | }; |
79 | } __packed; | ||
80 | 79 | ||
81 | struct nvm_addr_format { | 80 | struct nvm_addr_format { |
82 | u8 ch_offset; | 81 | u8 ch_offset; |
@@ -91,19 +90,15 @@ struct nvm_addr_format { | |||
91 | u8 pg_len; | 90 | u8 pg_len; |
92 | u8 sect_offset; | 91 | u8 sect_offset; |
93 | u8 sect_len; | 92 | u8 sect_len; |
94 | u8 res[4]; | ||
95 | }; | 93 | }; |
96 | 94 | ||
97 | struct nvm_id { | 95 | struct nvm_id { |
98 | u8 ver_id; | 96 | u8 ver_id; |
99 | u8 vmnt; | 97 | u8 vmnt; |
100 | u8 cgrps; | 98 | u8 cgrps; |
101 | u8 res[5]; | ||
102 | u32 cap; | 99 | u32 cap; |
103 | u32 dom; | 100 | u32 dom; |
104 | struct nvm_addr_format ppaf; | 101 | struct nvm_addr_format ppaf; |
105 | u8 ppat; | ||
106 | u8 resv[224]; | ||
107 | struct nvm_id_group groups[4]; | 102 | struct nvm_id_group groups[4]; |
108 | } __packed; | 103 | } __packed; |
109 | 104 | ||
@@ -123,39 +118,28 @@ struct nvm_tgt_instance { | |||
123 | #define NVM_VERSION_MINOR 0 | 118 | #define NVM_VERSION_MINOR 0 |
124 | #define NVM_VERSION_PATCH 0 | 119 | #define NVM_VERSION_PATCH 0 |
125 | 120 | ||
126 | #define NVM_SEC_BITS (8) | ||
127 | #define NVM_PL_BITS (6) | ||
128 | #define NVM_PG_BITS (16) | ||
129 | #define NVM_BLK_BITS (16) | 121 | #define NVM_BLK_BITS (16) |
130 | #define NVM_LUN_BITS (10) | 122 | #define NVM_PG_BITS (16) |
123 | #define NVM_SEC_BITS (8) | ||
124 | #define NVM_PL_BITS (8) | ||
125 | #define NVM_LUN_BITS (8) | ||
131 | #define NVM_CH_BITS (8) | 126 | #define NVM_CH_BITS (8) |
132 | 127 | ||
133 | struct ppa_addr { | 128 | struct ppa_addr { |
129 | /* Generic structure for all addresses */ | ||
134 | union { | 130 | union { |
135 | /* Channel-based PPA format in nand 4x2x2x2x8x10 */ | ||
136 | struct { | ||
137 | u64 ch : 4; | ||
138 | u64 sec : 2; /* 4 sectors per page */ | ||
139 | u64 pl : 2; /* 4 planes per LUN */ | ||
140 | u64 lun : 2; /* 4 LUNs per channel */ | ||
141 | u64 pg : 8; /* 256 pages per block */ | ||
142 | u64 blk : 10;/* 1024 blocks per plane */ | ||
143 | u64 resved : 36; | ||
144 | } chnl; | ||
145 | |||
146 | /* Generic structure for all addresses */ | ||
147 | struct { | 131 | struct { |
132 | u64 blk : NVM_BLK_BITS; | ||
133 | u64 pg : NVM_PG_BITS; | ||
148 | u64 sec : NVM_SEC_BITS; | 134 | u64 sec : NVM_SEC_BITS; |
149 | u64 pl : NVM_PL_BITS; | 135 | u64 pl : NVM_PL_BITS; |
150 | u64 pg : NVM_PG_BITS; | ||
151 | u64 blk : NVM_BLK_BITS; | ||
152 | u64 lun : NVM_LUN_BITS; | 136 | u64 lun : NVM_LUN_BITS; |
153 | u64 ch : NVM_CH_BITS; | 137 | u64 ch : NVM_CH_BITS; |
154 | } g; | 138 | } g; |
155 | 139 | ||
156 | u64 ppa; | 140 | u64 ppa; |
157 | }; | 141 | }; |
158 | } __packed; | 142 | }; |
159 | 143 | ||
160 | struct nvm_rq { | 144 | struct nvm_rq { |
161 | struct nvm_tgt_instance *ins; | 145 | struct nvm_tgt_instance *ins; |
@@ -191,11 +175,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) | |||
191 | struct nvm_block; | 175 | struct nvm_block; |
192 | 176 | ||
193 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); | 177 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); |
194 | typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); | 178 | typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); |
195 | typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); | 179 | typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); |
196 | typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, | 180 | typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, |
197 | nvm_l2p_update_fn *, void *); | 181 | nvm_l2p_update_fn *, void *); |
198 | typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, | 182 | typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int, |
199 | nvm_bb_update_fn *, void *); | 183 | nvm_bb_update_fn *, void *); |
200 | typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); | 184 | typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); |
201 | typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); | 185 | typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); |
@@ -210,7 +194,7 @@ struct nvm_dev_ops { | |||
210 | nvm_id_fn *identity; | 194 | nvm_id_fn *identity; |
211 | nvm_get_l2p_tbl_fn *get_l2p_tbl; | 195 | nvm_get_l2p_tbl_fn *get_l2p_tbl; |
212 | nvm_op_bb_tbl_fn *get_bb_tbl; | 196 | nvm_op_bb_tbl_fn *get_bb_tbl; |
213 | nvm_op_set_bb_fn *set_bb; | 197 | nvm_op_set_bb_fn *set_bb_tbl; |
214 | 198 | ||
215 | nvm_submit_io_fn *submit_io; | 199 | nvm_submit_io_fn *submit_io; |
216 | nvm_erase_blk_fn *erase_block; | 200 | nvm_erase_blk_fn *erase_block; |
@@ -220,7 +204,7 @@ struct nvm_dev_ops { | |||
220 | nvm_dev_dma_alloc_fn *dev_dma_alloc; | 204 | nvm_dev_dma_alloc_fn *dev_dma_alloc; |
221 | nvm_dev_dma_free_fn *dev_dma_free; | 205 | nvm_dev_dma_free_fn *dev_dma_free; |
222 | 206 | ||
223 | uint8_t max_phys_sect; | 207 | unsigned int max_phys_sect; |
224 | }; | 208 | }; |
225 | 209 | ||
226 | struct nvm_lun { | 210 | struct nvm_lun { |
@@ -229,7 +213,9 @@ struct nvm_lun { | |||
229 | int lun_id; | 213 | int lun_id; |
230 | int chnl_id; | 214 | int chnl_id; |
231 | 215 | ||
216 | unsigned int nr_inuse_blocks; /* Number of used blocks */ | ||
232 | unsigned int nr_free_blocks; /* Number of unused blocks */ | 217 | unsigned int nr_free_blocks; /* Number of unused blocks */ |
218 | unsigned int nr_bad_blocks; /* Number of bad blocks */ | ||
233 | struct nvm_block *blocks; | 219 | struct nvm_block *blocks; |
234 | 220 | ||
235 | spinlock_t lock; | 221 | spinlock_t lock; |
@@ -263,8 +249,7 @@ struct nvm_dev { | |||
263 | int blks_per_lun; | 249 | int blks_per_lun; |
264 | int sec_size; | 250 | int sec_size; |
265 | int oob_size; | 251 | int oob_size; |
266 | int addr_mode; | 252 | struct nvm_addr_format ppaf; |
267 | struct nvm_addr_format addr_format; | ||
268 | 253 | ||
269 | /* Calculated/Cached values. These do not reflect the actual usable | 254 | /* Calculated/Cached values. These do not reflect the actual usable |
270 | * blocks at run-time. | 255 | * blocks at run-time. |
@@ -290,118 +275,45 @@ struct nvm_dev { | |||
290 | char name[DISK_NAME_LEN]; | 275 | char name[DISK_NAME_LEN]; |
291 | }; | 276 | }; |
292 | 277 | ||
293 | /* fallback conversion */ | 278 | static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, |
294 | static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, | 279 | struct ppa_addr r) |
295 | struct ppa_addr r) | ||
296 | { | ||
297 | struct ppa_addr l; | ||
298 | |||
299 | l.ppa = r.g.sec + | ||
300 | r.g.pg * dev->sec_per_pg + | ||
301 | r.g.blk * (dev->pgs_per_blk * | ||
302 | dev->sec_per_pg) + | ||
303 | r.g.lun * (dev->blks_per_lun * | ||
304 | dev->pgs_per_blk * | ||
305 | dev->sec_per_pg) + | ||
306 | r.g.ch * (dev->blks_per_lun * | ||
307 | dev->pgs_per_blk * | ||
308 | dev->luns_per_chnl * | ||
309 | dev->sec_per_pg); | ||
310 | |||
311 | return l; | ||
312 | } | ||
313 | |||
314 | /* fallback conversion */ | ||
315 | static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, | ||
316 | struct ppa_addr r) | ||
317 | { | 280 | { |
318 | struct ppa_addr l; | 281 | struct ppa_addr l; |
319 | int secs, pgs, blks, luns; | ||
320 | sector_t ppa = r.ppa; | ||
321 | 282 | ||
322 | l.ppa = 0; | 283 | l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; |
323 | 284 | l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; | |
324 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | 285 | l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; |
325 | l.g.sec = secs; | 286 | l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; |
326 | 287 | l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; | |
327 | sector_div(ppa, dev->sec_per_pg); | 288 | l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; |
328 | div_u64_rem(ppa, dev->sec_per_blk, &pgs); | ||
329 | l.g.pg = pgs; | ||
330 | |||
331 | sector_div(ppa, dev->pgs_per_blk); | ||
332 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | ||
333 | l.g.blk = blks; | ||
334 | |||
335 | sector_div(ppa, dev->blks_per_lun); | ||
336 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | ||
337 | l.g.lun = luns; | ||
338 | |||
339 | sector_div(ppa, dev->luns_per_chnl); | ||
340 | l.g.ch = ppa; | ||
341 | 289 | ||
342 | return l; | 290 | return l; |
343 | } | 291 | } |
344 | 292 | ||
345 | static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) | 293 | static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, |
294 | struct ppa_addr r) | ||
346 | { | 295 | { |
347 | struct ppa_addr l; | 296 | struct ppa_addr l; |
348 | 297 | ||
349 | l.ppa = 0; | 298 | /* |
350 | 299 | * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. | |
351 | l.chnl.sec = r.g.sec; | 300 | */ |
352 | l.chnl.pl = r.g.pl; | 301 | l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & |
353 | l.chnl.pg = r.g.pg; | 302 | (((1 << dev->ppaf.blk_len) - 1)); |
354 | l.chnl.blk = r.g.blk; | 303 | l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & |
355 | l.chnl.lun = r.g.lun; | 304 | (((1 << dev->ppaf.pg_len) - 1)); |
356 | l.chnl.ch = r.g.ch; | 305 | l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & |
357 | 306 | (((1 << dev->ppaf.sect_len) - 1)); | |
358 | return l; | 307 | l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & |
359 | } | 308 | (((1 << dev->ppaf.pln_len) - 1)); |
360 | 309 | l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & | |
361 | static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) | 310 | (((1 << dev->ppaf.lun_len) - 1)); |
362 | { | 311 | l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & |
363 | struct ppa_addr l; | 312 | (((1 << dev->ppaf.ch_len) - 1)); |
364 | |||
365 | l.ppa = 0; | ||
366 | |||
367 | l.g.sec = r.chnl.sec; | ||
368 | l.g.pl = r.chnl.pl; | ||
369 | l.g.pg = r.chnl.pg; | ||
370 | l.g.blk = r.chnl.blk; | ||
371 | l.g.lun = r.chnl.lun; | ||
372 | l.g.ch = r.chnl.ch; | ||
373 | 313 | ||
374 | return l; | 314 | return l; |
375 | } | 315 | } |
376 | 316 | ||
377 | static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev, | ||
378 | struct ppa_addr gppa) | ||
379 | { | ||
380 | switch (dev->addr_mode) { | ||
381 | case NVM_ADDRMODE_LINEAR: | ||
382 | return __linear_to_generic_addr(dev, gppa); | ||
383 | case NVM_ADDRMODE_CHANNEL: | ||
384 | return __chnl_to_generic_addr(gppa); | ||
385 | default: | ||
386 | BUG(); | ||
387 | } | ||
388 | return gppa; | ||
389 | } | ||
390 | |||
391 | static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev, | ||
392 | struct ppa_addr gppa) | ||
393 | { | ||
394 | switch (dev->addr_mode) { | ||
395 | case NVM_ADDRMODE_LINEAR: | ||
396 | return __generic_to_linear_addr(dev, gppa); | ||
397 | case NVM_ADDRMODE_CHANNEL: | ||
398 | return __generic_to_chnl_addr(gppa); | ||
399 | default: | ||
400 | BUG(); | ||
401 | } | ||
402 | return gppa; | ||
403 | } | ||
404 | |||
405 | static inline int ppa_empty(struct ppa_addr ppa_addr) | 317 | static inline int ppa_empty(struct ppa_addr ppa_addr) |
406 | { | 318 | { |
407 | return (ppa_addr.ppa == ADDR_EMPTY); | 319 | return (ppa_addr.ppa == ADDR_EMPTY); |
@@ -468,7 +380,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); | |||
468 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, | 380 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, |
469 | unsigned long); | 381 | unsigned long); |
470 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); | 382 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); |
471 | typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); | 383 | typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); |
472 | 384 | ||
473 | struct nvmm_type { | 385 | struct nvmm_type { |
474 | const char *name; | 386 | const char *name; |
@@ -492,7 +404,7 @@ struct nvmm_type { | |||
492 | nvmm_get_lun_fn *get_lun; | 404 | nvmm_get_lun_fn *get_lun; |
493 | 405 | ||
494 | /* Statistics */ | 406 | /* Statistics */ |
495 | nvmm_free_blocks_print_fn *free_blocks_print; | 407 | nvmm_lun_info_print_fn *lun_info_print; |
496 | struct list_head list; | 408 | struct list_head list; |
497 | }; | 409 | }; |
498 | 410 | ||