summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-02-17 16:06:45 -0500
committerJens Axboe <axboe@fb.com>2017-02-17 16:06:45 -0500
commit6010720da8aab51f33beee63b73cf88016e9b250 (patch)
treea4c5a7f645998e86a1f49cb05f8e0c4e51448294 /drivers
parent2fe1e8a7b2f4dcac3fcb07ff06b0ae7396201fd6 (diff)
parent8a9ae523282f324989850fcf41312b42a2fb9296 (diff)
Merge branch 'for-4.11/block' into for-4.11/linus-merge
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cciss.c54
-rw-r--r--drivers/block/cciss.h6
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c17
-rw-r--r--drivers/block/null_blk.c6
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/cdrom/cdrom.c58
-rw-r--r--drivers/cdrom/gdrom.c12
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/lightnvm/Kconfig9
-rw-r--r--drivers/lightnvm/Makefile3
-rw-r--r--drivers/lightnvm/core.c1027
-rw-r--r--drivers/lightnvm/gennvm.c657
-rw-r--r--drivers/lightnvm/gennvm.h62
-rw-r--r--drivers/lightnvm/rrpc.c7
-rw-r--r--drivers/lightnvm/rrpc.h3
-rw-r--r--drivers/lightnvm/sysblk.c733
-rw-r--r--drivers/md/bcache/request.c2
-rw-r--r--drivers/md/dm-cache-target.c13
-rw-r--r--drivers/md/dm-thin.c13
-rw-r--r--drivers/nvme/host/core.c30
-rw-r--r--drivers/nvme/host/lightnvm.c315
-rw-r--r--drivers/nvme/host/nvme.h13
-rw-r--r--drivers/nvme/host/pci.c15
-rw-r--r--drivers/scsi/sr.c2
25 files changed, 1018 insertions, 2045 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index e5c5b8eb14a9..3a44438a1195 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -4074,41 +4074,27 @@ clean_up:
4074 4074
4075static void cciss_interrupt_mode(ctlr_info_t *h) 4075static void cciss_interrupt_mode(ctlr_info_t *h)
4076{ 4076{
4077#ifdef CONFIG_PCI_MSI 4077 int ret;
4078 int err;
4079 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
4080 {0, 2}, {0, 3}
4081 };
4082 4078
4083 /* Some boards advertise MSI but don't really support it */ 4079 /* Some boards advertise MSI but don't really support it */
4084 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 4080 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
4085 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 4081 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
4086 goto default_int_mode; 4082 goto default_int_mode;
4087 4083
4088 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 4084 ret = pci_alloc_irq_vectors(h->pdev, 4, 4, PCI_IRQ_MSIX);
4089 err = pci_enable_msix_exact(h->pdev, cciss_msix_entries, 4); 4085 if (ret >= 0) {
4090 if (!err) { 4086 h->intr[0] = pci_irq_vector(h->pdev, 0);
4091 h->intr[0] = cciss_msix_entries[0].vector; 4087 h->intr[1] = pci_irq_vector(h->pdev, 1);
4092 h->intr[1] = cciss_msix_entries[1].vector; 4088 h->intr[2] = pci_irq_vector(h->pdev, 2);
4093 h->intr[2] = cciss_msix_entries[2].vector; 4089 h->intr[3] = pci_irq_vector(h->pdev, 3);
4094 h->intr[3] = cciss_msix_entries[3].vector; 4090 return;
4095 h->msix_vector = 1;
4096 return;
4097 } else {
4098 dev_warn(&h->pdev->dev,
4099 "MSI-X init failed %d\n", err);
4100 }
4101 }
4102 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
4103 if (!pci_enable_msi(h->pdev))
4104 h->msi_vector = 1;
4105 else
4106 dev_warn(&h->pdev->dev, "MSI init failed\n");
4107 } 4091 }
4092
4093 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, PCI_IRQ_MSI);
4094
4108default_int_mode: 4095default_int_mode:
4109#endif /* CONFIG_PCI_MSI */
4110 /* if we get here we're going to use the default interrupt mode */ 4096 /* if we get here we're going to use the default interrupt mode */
4111 h->intr[h->intr_mode] = h->pdev->irq; 4097 h->intr[h->intr_mode] = pci_irq_vector(h->pdev, 0);
4112 return; 4098 return;
4113} 4099}
4114 4100
@@ -4888,7 +4874,7 @@ static int cciss_request_irq(ctlr_info_t *h,
4888 irqreturn_t (*msixhandler)(int, void *), 4874 irqreturn_t (*msixhandler)(int, void *),
4889 irqreturn_t (*intxhandler)(int, void *)) 4875 irqreturn_t (*intxhandler)(int, void *))
4890{ 4876{
4891 if (h->msix_vector || h->msi_vector) { 4877 if (h->pdev->msi_enabled || h->pdev->msix_enabled) {
4892 if (!request_irq(h->intr[h->intr_mode], msixhandler, 4878 if (!request_irq(h->intr[h->intr_mode], msixhandler,
4893 0, h->devname, h)) 4879 0, h->devname, h))
4894 return 0; 4880 return 0;
@@ -4934,12 +4920,7 @@ static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
4934 int ctlr = h->ctlr; 4920 int ctlr = h->ctlr;
4935 4921
4936 free_irq(h->intr[h->intr_mode], h); 4922 free_irq(h->intr[h->intr_mode], h);
4937#ifdef CONFIG_PCI_MSI 4923 pci_free_irq_vectors(h->pdev);
4938 if (h->msix_vector)
4939 pci_disable_msix(h->pdev);
4940 else if (h->msi_vector)
4941 pci_disable_msi(h->pdev);
4942#endif /* CONFIG_PCI_MSI */
4943 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 4924 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
4944 cciss_free_scatterlists(h); 4925 cciss_free_scatterlists(h);
4945 cciss_free_cmd_pool(h); 4926 cciss_free_cmd_pool(h);
@@ -5295,12 +5276,7 @@ static void cciss_remove_one(struct pci_dev *pdev)
5295 5276
5296 cciss_shutdown(pdev); 5277 cciss_shutdown(pdev);
5297 5278
5298#ifdef CONFIG_PCI_MSI 5279 pci_free_irq_vectors(h->pdev);
5299 if (h->msix_vector)
5300 pci_disable_msix(h->pdev);
5301 else if (h->msi_vector)
5302 pci_disable_msi(h->pdev);
5303#endif /* CONFIG_PCI_MSI */
5304 5280
5305 iounmap(h->transtable); 5281 iounmap(h->transtable);
5306 iounmap(h->cfgtable); 5282 iounmap(h->cfgtable);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 7fda30e4a241..4affa94ca17b 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -90,8 +90,6 @@ struct ctlr_info
90# define SIMPLE_MODE_INT 2 90# define SIMPLE_MODE_INT 2
91# define MEMQ_MODE_INT 3 91# define MEMQ_MODE_INT 3
92 unsigned int intr[4]; 92 unsigned int intr[4];
93 unsigned int msix_vector;
94 unsigned int msi_vector;
95 int intr_mode; 93 int intr_mode;
96 int cciss_max_sectors; 94 int cciss_max_sectors;
97 BYTE cciss_read; 95 BYTE cciss_read;
@@ -333,7 +331,7 @@ static unsigned long SA5_performant_completed(ctlr_info_t *h)
333 */ 331 */
334 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 332 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
335 /* msi auto clears the interrupt pending bit. */ 333 /* msi auto clears the interrupt pending bit. */
336 if (!(h->msi_vector || h->msix_vector)) { 334 if (!(h->pdev->msi_enabled || h->pdev->msix_enabled)) {
337 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 335 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
338 /* Do a read in order to flush the write to the controller 336 /* Do a read in order to flush the write to the controller
339 * (as per spec.) 337 * (as per spec.)
@@ -393,7 +391,7 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
393 if (!register_value) 391 if (!register_value)
394 return false; 392 return false;
395 393
396 if (h->msi_vector || h->msix_vector) 394 if (h->pdev->msi_enabled || h->pdev->msix_enabled)
397 return true; 395 return true;
398 396
399 /* Read outbound doorbell to flush */ 397 /* Read outbound doorbell to flush */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a391a3cfb3fe..184887af4b9f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3119,7 +3119,7 @@ static int raw_cmd_copyin(int cmd, void __user *param,
3119 *rcmd = NULL; 3119 *rcmd = NULL;
3120 3120
3121loop: 3121loop:
3122 ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER); 3122 ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
3123 if (!ptr) 3123 if (!ptr)
3124 return -ENOMEM; 3124 return -ENOMEM;
3125 *rcmd = ptr; 3125 *rcmd = ptr;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f347285c67ec..304377182c1a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1097,9 +1097,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1097 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) 1097 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1098 return -EINVAL; 1098 return -EINVAL;
1099 1099
1100 /* I/O need to be drained during transfer transition */
1101 blk_mq_freeze_queue(lo->lo_queue);
1102
1100 err = loop_release_xfer(lo); 1103 err = loop_release_xfer(lo);
1101 if (err) 1104 if (err)
1102 return err; 1105 goto exit;
1103 1106
1104 if (info->lo_encrypt_type) { 1107 if (info->lo_encrypt_type) {
1105 unsigned int type = info->lo_encrypt_type; 1108 unsigned int type = info->lo_encrypt_type;
@@ -1114,12 +1117,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1114 1117
1115 err = loop_init_xfer(lo, xfer, info); 1118 err = loop_init_xfer(lo, xfer, info);
1116 if (err) 1119 if (err)
1117 return err; 1120 goto exit;
1118 1121
1119 if (lo->lo_offset != info->lo_offset || 1122 if (lo->lo_offset != info->lo_offset ||
1120 lo->lo_sizelimit != info->lo_sizelimit) 1123 lo->lo_sizelimit != info->lo_sizelimit)
1121 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) 1124 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
1122 return -EFBIG; 1125 err = -EFBIG;
1126 goto exit;
1127 }
1123 1128
1124 loop_config_discard(lo); 1129 loop_config_discard(lo);
1125 1130
@@ -1156,7 +1161,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1156 /* update dio if lo_offset or transfer is changed */ 1161 /* update dio if lo_offset or transfer is changed */
1157 __loop_update_dio(lo, lo->use_dio); 1162 __loop_update_dio(lo, lo->use_dio);
1158 1163
1159 return 0; 1164 exit:
1165 blk_mq_unfreeze_queue(lo->lo_queue);
1166 return err;
1160} 1167}
1161 1168
1162static int 1169static int
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index c0e14e54909b..a67b7ea1e3bf 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -420,7 +420,8 @@ static void null_lnvm_end_io(struct request *rq, int error)
420{ 420{
421 struct nvm_rq *rqd = rq->end_io_data; 421 struct nvm_rq *rqd = rq->end_io_data;
422 422
423 nvm_end_io(rqd, error); 423 rqd->error = error;
424 nvm_end_io(rqd);
424 425
425 blk_put_request(rq); 426 blk_put_request(rq);
426} 427}
@@ -460,7 +461,6 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
460 461
461 id->ver_id = 0x1; 462 id->ver_id = 0x1;
462 id->vmnt = 0; 463 id->vmnt = 0;
463 id->cgrps = 1;
464 id->cap = 0x2; 464 id->cap = 0x2;
465 id->dom = 0x1; 465 id->dom = 0x1;
466 466
@@ -479,7 +479,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
479 479
480 sector_div(size, bs); /* convert size to pages */ 480 sector_div(size, bs); /* convert size to pages */
481 size >>= 8; /* concert size to pgs pr blk */ 481 size >>= 8; /* concert size to pgs pr blk */
482 grp = &id->groups[0]; 482 grp = &id->grp;
483 grp->mtype = 0; 483 grp->mtype = 0;
484 grp->fmtype = 0; 484 grp->fmtype = 0;
485 grp->num_ch = 1; 485 grp->num_ch = 1;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 5fd2d0e25567..10aed84244f5 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -273,7 +273,7 @@ static const struct block_device_operations pcd_bdops = {
273 .check_events = pcd_block_check_events, 273 .check_events = pcd_block_check_events,
274}; 274};
275 275
276static struct cdrom_device_ops pcd_dops = { 276static const struct cdrom_device_ops pcd_dops = {
277 .open = pcd_open, 277 .open = pcd_open,
278 .release = pcd_release, 278 .release = pcd_release,
279 .drive_status = pcd_drive_status, 279 .drive_status = pcd_drive_status,
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 59cca72647a6..bbbd3caa927c 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -342,8 +342,8 @@ static void cdrom_sysctl_register(void);
342 342
343static LIST_HEAD(cdrom_list); 343static LIST_HEAD(cdrom_list);
344 344
345static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, 345int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
346 struct packet_command *cgc) 346 struct packet_command *cgc)
347{ 347{
348 if (cgc->sense) { 348 if (cgc->sense) {
349 cgc->sense->sense_key = 0x05; 349 cgc->sense->sense_key = 0x05;
@@ -354,6 +354,7 @@ static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
354 cgc->stat = -EIO; 354 cgc->stat = -EIO;
355 return -EIO; 355 return -EIO;
356} 356}
357EXPORT_SYMBOL(cdrom_dummy_generic_packet);
357 358
358static int cdrom_flush_cache(struct cdrom_device_info *cdi) 359static int cdrom_flush_cache(struct cdrom_device_info *cdi)
359{ 360{
@@ -371,7 +372,7 @@ static int cdrom_flush_cache(struct cdrom_device_info *cdi)
371static int cdrom_get_disc_info(struct cdrom_device_info *cdi, 372static int cdrom_get_disc_info(struct cdrom_device_info *cdi,
372 disc_information *di) 373 disc_information *di)
373{ 374{
374 struct cdrom_device_ops *cdo = cdi->ops; 375 const struct cdrom_device_ops *cdo = cdi->ops;
375 struct packet_command cgc; 376 struct packet_command cgc;
376 int ret, buflen; 377 int ret, buflen;
377 378
@@ -586,7 +587,7 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
586int register_cdrom(struct cdrom_device_info *cdi) 587int register_cdrom(struct cdrom_device_info *cdi)
587{ 588{
588 static char banner_printed; 589 static char banner_printed;
589 struct cdrom_device_ops *cdo = cdi->ops; 590 const struct cdrom_device_ops *cdo = cdi->ops;
590 int *change_capability = (int *)&cdo->capability; /* hack */ 591 int *change_capability = (int *)&cdo->capability; /* hack */
591 592
592 cd_dbg(CD_OPEN, "entering register_cdrom\n"); 593 cd_dbg(CD_OPEN, "entering register_cdrom\n");
@@ -610,7 +611,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
610 ENSURE(reset, CDC_RESET); 611 ENSURE(reset, CDC_RESET);
611 ENSURE(generic_packet, CDC_GENERIC_PACKET); 612 ENSURE(generic_packet, CDC_GENERIC_PACKET);
612 cdi->mc_flags = 0; 613 cdi->mc_flags = 0;
613 cdo->n_minors = 0;
614 cdi->options = CDO_USE_FFLAGS; 614 cdi->options = CDO_USE_FFLAGS;
615 615
616 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY)) 616 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
@@ -630,8 +630,7 @@ int register_cdrom(struct cdrom_device_info *cdi)
630 else 630 else
631 cdi->cdda_method = CDDA_OLD; 631 cdi->cdda_method = CDDA_OLD;
632 632
633 if (!cdo->generic_packet) 633 WARN_ON(!cdo->generic_packet);
634 cdo->generic_packet = cdrom_dummy_generic_packet;
635 634
636 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); 635 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
637 mutex_lock(&cdrom_mutex); 636 mutex_lock(&cdrom_mutex);
@@ -652,7 +651,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
652 if (cdi->exit) 651 if (cdi->exit)
653 cdi->exit(cdi); 652 cdi->exit(cdi);
654 653
655 cdi->ops->n_minors--;
656 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); 654 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
657} 655}
658 656
@@ -1036,7 +1034,7 @@ static
1036int open_for_data(struct cdrom_device_info *cdi) 1034int open_for_data(struct cdrom_device_info *cdi)
1037{ 1035{
1038 int ret; 1036 int ret;
1039 struct cdrom_device_ops *cdo = cdi->ops; 1037 const struct cdrom_device_ops *cdo = cdi->ops;
1040 tracktype tracks; 1038 tracktype tracks;
1041 cd_dbg(CD_OPEN, "entering open_for_data\n"); 1039 cd_dbg(CD_OPEN, "entering open_for_data\n");
1042 /* Check if the driver can report drive status. If it can, we 1040 /* Check if the driver can report drive status. If it can, we
@@ -1198,8 +1196,8 @@ err:
1198/* This code is similar to that in open_for_data. The routine is called 1196/* This code is similar to that in open_for_data. The routine is called
1199 whenever an audio play operation is requested. 1197 whenever an audio play operation is requested.
1200*/ 1198*/
1201static int check_for_audio_disc(struct cdrom_device_info * cdi, 1199static int check_for_audio_disc(struct cdrom_device_info *cdi,
1202 struct cdrom_device_ops * cdo) 1200 const struct cdrom_device_ops *cdo)
1203{ 1201{
1204 int ret; 1202 int ret;
1205 tracktype tracks; 1203 tracktype tracks;
@@ -1254,7 +1252,7 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi,
1254 1252
1255void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode) 1253void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
1256{ 1254{
1257 struct cdrom_device_ops *cdo = cdi->ops; 1255 const struct cdrom_device_ops *cdo = cdi->ops;
1258 int opened_for_data; 1256 int opened_for_data;
1259 1257
1260 cd_dbg(CD_CLOSE, "entering cdrom_release\n"); 1258 cd_dbg(CD_CLOSE, "entering cdrom_release\n");
@@ -1294,7 +1292,7 @@ static int cdrom_read_mech_status(struct cdrom_device_info *cdi,
1294 struct cdrom_changer_info *buf) 1292 struct cdrom_changer_info *buf)
1295{ 1293{
1296 struct packet_command cgc; 1294 struct packet_command cgc;
1297 struct cdrom_device_ops *cdo = cdi->ops; 1295 const struct cdrom_device_ops *cdo = cdi->ops;
1298 int length; 1296 int length;
1299 1297
1300 /* 1298 /*
@@ -1643,7 +1641,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1643 int ret; 1641 int ret;
1644 u_char buf[20]; 1642 u_char buf[20];
1645 struct packet_command cgc; 1643 struct packet_command cgc;
1646 struct cdrom_device_ops *cdo = cdi->ops; 1644 const struct cdrom_device_ops *cdo = cdi->ops;
1647 rpc_state_t rpc_state; 1645 rpc_state_t rpc_state;
1648 1646
1649 memset(buf, 0, sizeof(buf)); 1647 memset(buf, 0, sizeof(buf));
@@ -1791,7 +1789,7 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s,
1791{ 1789{
1792 unsigned char buf[21], *base; 1790 unsigned char buf[21], *base;
1793 struct dvd_layer *layer; 1791 struct dvd_layer *layer;
1794 struct cdrom_device_ops *cdo = cdi->ops; 1792 const struct cdrom_device_ops *cdo = cdi->ops;
1795 int ret, layer_num = s->physical.layer_num; 1793 int ret, layer_num = s->physical.layer_num;
1796 1794
1797 if (layer_num >= DVD_LAYERS) 1795 if (layer_num >= DVD_LAYERS)
@@ -1842,7 +1840,7 @@ static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s,
1842{ 1840{
1843 int ret; 1841 int ret;
1844 u_char buf[8]; 1842 u_char buf[8];
1845 struct cdrom_device_ops *cdo = cdi->ops; 1843 const struct cdrom_device_ops *cdo = cdi->ops;
1846 1844
1847 init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ); 1845 init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
1848 cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1846 cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
@@ -1866,7 +1864,7 @@ static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s,
1866{ 1864{
1867 int ret, size; 1865 int ret, size;
1868 u_char *buf; 1866 u_char *buf;
1869 struct cdrom_device_ops *cdo = cdi->ops; 1867 const struct cdrom_device_ops *cdo = cdi->ops;
1870 1868
1871 size = sizeof(s->disckey.value) + 4; 1869 size = sizeof(s->disckey.value) + 4;
1872 1870
@@ -1894,7 +1892,7 @@ static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s,
1894{ 1892{
1895 int ret, size = 4 + 188; 1893 int ret, size = 4 + 188;
1896 u_char *buf; 1894 u_char *buf;
1897 struct cdrom_device_ops *cdo = cdi->ops; 1895 const struct cdrom_device_ops *cdo = cdi->ops;
1898 1896
1899 buf = kmalloc(size, GFP_KERNEL); 1897 buf = kmalloc(size, GFP_KERNEL);
1900 if (!buf) 1898 if (!buf)
@@ -1928,7 +1926,7 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
1928{ 1926{
1929 int ret = 0, size; 1927 int ret = 0, size;
1930 u_char *buf; 1928 u_char *buf;
1931 struct cdrom_device_ops *cdo = cdi->ops; 1929 const struct cdrom_device_ops *cdo = cdi->ops;
1932 1930
1933 size = sizeof(s->manufact.value) + 4; 1931 size = sizeof(s->manufact.value) + 4;
1934 1932
@@ -1995,7 +1993,7 @@ int cdrom_mode_sense(struct cdrom_device_info *cdi,
1995 struct packet_command *cgc, 1993 struct packet_command *cgc,
1996 int page_code, int page_control) 1994 int page_code, int page_control)
1997{ 1995{
1998 struct cdrom_device_ops *cdo = cdi->ops; 1996 const struct cdrom_device_ops *cdo = cdi->ops;
1999 1997
2000 memset(cgc->cmd, 0, sizeof(cgc->cmd)); 1998 memset(cgc->cmd, 0, sizeof(cgc->cmd));
2001 1999
@@ -2010,7 +2008,7 @@ int cdrom_mode_sense(struct cdrom_device_info *cdi,
2010int cdrom_mode_select(struct cdrom_device_info *cdi, 2008int cdrom_mode_select(struct cdrom_device_info *cdi,
2011 struct packet_command *cgc) 2009 struct packet_command *cgc)
2012{ 2010{
2013 struct cdrom_device_ops *cdo = cdi->ops; 2011 const struct cdrom_device_ops *cdo = cdi->ops;
2014 2012
2015 memset(cgc->cmd, 0, sizeof(cgc->cmd)); 2013 memset(cgc->cmd, 0, sizeof(cgc->cmd));
2016 memset(cgc->buffer, 0, 2); 2014 memset(cgc->buffer, 0, 2);
@@ -2025,7 +2023,7 @@ int cdrom_mode_select(struct cdrom_device_info *cdi,
2025static int cdrom_read_subchannel(struct cdrom_device_info *cdi, 2023static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
2026 struct cdrom_subchnl *subchnl, int mcn) 2024 struct cdrom_subchnl *subchnl, int mcn)
2027{ 2025{
2028 struct cdrom_device_ops *cdo = cdi->ops; 2026 const struct cdrom_device_ops *cdo = cdi->ops;
2029 struct packet_command cgc; 2027 struct packet_command cgc;
2030 char buffer[32]; 2028 char buffer[32];
2031 int ret; 2029 int ret;
@@ -2073,7 +2071,7 @@ static int cdrom_read_cd(struct cdrom_device_info *cdi,
2073 struct packet_command *cgc, int lba, 2071 struct packet_command *cgc, int lba,
2074 int blocksize, int nblocks) 2072 int blocksize, int nblocks)
2075{ 2073{
2076 struct cdrom_device_ops *cdo = cdi->ops; 2074 const struct cdrom_device_ops *cdo = cdi->ops;
2077 2075
2078 memset(&cgc->cmd, 0, sizeof(cgc->cmd)); 2076 memset(&cgc->cmd, 0, sizeof(cgc->cmd));
2079 cgc->cmd[0] = GPCMD_READ_10; 2077 cgc->cmd[0] = GPCMD_READ_10;
@@ -2093,7 +2091,7 @@ static int cdrom_read_block(struct cdrom_device_info *cdi,
2093 struct packet_command *cgc, 2091 struct packet_command *cgc,
2094 int lba, int nblocks, int format, int blksize) 2092 int lba, int nblocks, int format, int blksize)
2095{ 2093{
2096 struct cdrom_device_ops *cdo = cdi->ops; 2094 const struct cdrom_device_ops *cdo = cdi->ops;
2097 2095
2098 memset(&cgc->cmd, 0, sizeof(cgc->cmd)); 2096 memset(&cgc->cmd, 0, sizeof(cgc->cmd));
2099 cgc->cmd[0] = GPCMD_READ_CD; 2097 cgc->cmd[0] = GPCMD_READ_CD;
@@ -2764,7 +2762,7 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
2764 */ 2762 */
2765static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size) 2763static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
2766{ 2764{
2767 struct cdrom_device_ops *cdo = cdi->ops; 2765 const struct cdrom_device_ops *cdo = cdi->ops;
2768 struct packet_command cgc; 2766 struct packet_command cgc;
2769 struct modesel_head mh; 2767 struct modesel_head mh;
2770 2768
@@ -2790,7 +2788,7 @@ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
2790static int cdrom_get_track_info(struct cdrom_device_info *cdi, 2788static int cdrom_get_track_info(struct cdrom_device_info *cdi,
2791 __u16 track, __u8 type, track_information *ti) 2789 __u16 track, __u8 type, track_information *ti)
2792{ 2790{
2793 struct cdrom_device_ops *cdo = cdi->ops; 2791 const struct cdrom_device_ops *cdo = cdi->ops;
2794 struct packet_command cgc; 2792 struct packet_command cgc;
2795 int ret, buflen; 2793 int ret, buflen;
2796 2794
@@ -3049,7 +3047,7 @@ static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
3049 void __user *arg, 3047 void __user *arg,
3050 struct packet_command *cgc) 3048 struct packet_command *cgc)
3051{ 3049{
3052 struct cdrom_device_ops *cdo = cdi->ops; 3050 const struct cdrom_device_ops *cdo = cdi->ops;
3053 struct cdrom_msf msf; 3051 struct cdrom_msf msf;
3054 cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); 3052 cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
3055 if (copy_from_user(&msf, (struct cdrom_msf __user *)arg, sizeof(msf))) 3053 if (copy_from_user(&msf, (struct cdrom_msf __user *)arg, sizeof(msf)))
@@ -3069,7 +3067,7 @@ static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
3069 void __user *arg, 3067 void __user *arg,
3070 struct packet_command *cgc) 3068 struct packet_command *cgc)
3071{ 3069{
3072 struct cdrom_device_ops *cdo = cdi->ops; 3070 const struct cdrom_device_ops *cdo = cdi->ops;
3073 struct cdrom_blk blk; 3071 struct cdrom_blk blk;
3074 cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYBLK\n"); 3072 cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
3075 if (copy_from_user(&blk, (struct cdrom_blk __user *)arg, sizeof(blk))) 3073 if (copy_from_user(&blk, (struct cdrom_blk __user *)arg, sizeof(blk)))
@@ -3164,7 +3162,7 @@ static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
3164 struct packet_command *cgc, 3162 struct packet_command *cgc,
3165 int cmd) 3163 int cmd)
3166{ 3164{
3167 struct cdrom_device_ops *cdo = cdi->ops; 3165 const struct cdrom_device_ops *cdo = cdi->ops;
3168 cd_dbg(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n"); 3166 cd_dbg(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
3169 cgc->cmd[0] = GPCMD_START_STOP_UNIT; 3167 cgc->cmd[0] = GPCMD_START_STOP_UNIT;
3170 cgc->cmd[1] = 1; 3168 cgc->cmd[1] = 1;
@@ -3177,7 +3175,7 @@ static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
3177 struct packet_command *cgc, 3175 struct packet_command *cgc,
3178 int cmd) 3176 int cmd)
3179{ 3177{
3180 struct cdrom_device_ops *cdo = cdi->ops; 3178 const struct cdrom_device_ops *cdo = cdi->ops;
3181 cd_dbg(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n"); 3179 cd_dbg(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
3182 cgc->cmd[0] = GPCMD_PAUSE_RESUME; 3180 cgc->cmd[0] = GPCMD_PAUSE_RESUME;
3183 cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0; 3181 cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 584bc3126403..1afab6558d0c 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -481,7 +481,7 @@ static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
481 return -EINVAL; 481 return -EINVAL;
482} 482}
483 483
484static struct cdrom_device_ops gdrom_ops = { 484static const struct cdrom_device_ops gdrom_ops = {
485 .open = gdrom_open, 485 .open = gdrom_open,
486 .release = gdrom_release, 486 .release = gdrom_release,
487 .drive_status = gdrom_drivestatus, 487 .drive_status = gdrom_drivestatus,
@@ -489,9 +489,9 @@ static struct cdrom_device_ops gdrom_ops = {
489 .get_last_session = gdrom_get_last_session, 489 .get_last_session = gdrom_get_last_session,
490 .reset = gdrom_hardreset, 490 .reset = gdrom_hardreset,
491 .audio_ioctl = gdrom_audio_ioctl, 491 .audio_ioctl = gdrom_audio_ioctl,
492 .generic_packet = cdrom_dummy_generic_packet,
492 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | 493 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
493 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, 494 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
494 .n_minors = 1,
495}; 495};
496 496
497static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) 497static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
@@ -807,16 +807,20 @@ static int probe_gdrom(struct platform_device *devptr)
807 if (err) 807 if (err)
808 goto probe_fail_cmdirq_register; 808 goto probe_fail_cmdirq_register;
809 gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock); 809 gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock);
810 if (!gd.gdrom_rq) 810 if (!gd.gdrom_rq) {
811 err = -ENOMEM;
811 goto probe_fail_requestq; 812 goto probe_fail_requestq;
813 }
812 814
813 err = probe_gdrom_setupqueue(); 815 err = probe_gdrom_setupqueue();
814 if (err) 816 if (err)
815 goto probe_fail_toc; 817 goto probe_fail_toc;
816 818
817 gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL); 819 gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL);
818 if (!gd.toc) 820 if (!gd.toc) {
821 err = -ENOMEM;
819 goto probe_fail_toc; 822 goto probe_fail_toc;
823 }
820 add_disk(gd.disk); 824 add_disk(gd.disk);
821 return 0; 825 return 0;
822 826
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 9cbd217bc0c9..ab9232e1e16f 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1166,7 +1166,7 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf)
1166 CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET | \ 1166 CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET | \
1167 CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM) 1167 CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM)
1168 1168
1169static struct cdrom_device_ops ide_cdrom_dops = { 1169static const struct cdrom_device_ops ide_cdrom_dops = {
1170 .open = ide_cdrom_open_real, 1170 .open = ide_cdrom_open_real,
1171 .release = ide_cdrom_release_real, 1171 .release = ide_cdrom_release_real,
1172 .drive_status = ide_cdrom_drive_status, 1172 .drive_status = ide_cdrom_drive_status,
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 2f5d5f4a4c75..052714106b7b 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -26,15 +26,6 @@ config NVM_DEBUG
26 26
27 It is required to create/remove targets without IOCTLs. 27 It is required to create/remove targets without IOCTLs.
28 28
29config NVM_GENNVM
30 tristate "General Non-Volatile Memory Manager for Open-Channel SSDs"
31 ---help---
32 Non-volatile memory media manager for Open-Channel SSDs that implements
33 physical media metadata management and block provisioning API.
34
35 This is the standard media manager for using Open-Channel SSDs, and
36 required for targets to be instantiated.
37
38config NVM_RRPC 29config NVM_RRPC
39 tristate "Round-robin Hybrid Open-Channel SSD target" 30 tristate "Round-robin Hybrid Open-Channel SSD target"
40 ---help--- 31 ---help---
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index a7a0a22cf1a5..b2a39e2d2895 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,5 @@
2# Makefile for Open-Channel SSDs. 2# Makefile for Open-Channel SSDs.
3# 3#
4 4
5obj-$(CONFIG_NVM) := core.o sysblk.o 5obj-$(CONFIG_NVM) := core.o
6obj-$(CONFIG_NVM_GENNVM) += gennvm.o
7obj-$(CONFIG_NVM_RRPC) += rrpc.o 6obj-$(CONFIG_NVM_RRPC) += rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 02240a0b39c9..5262ba66a7a7 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -29,10 +29,483 @@
29 29
30static LIST_HEAD(nvm_tgt_types); 30static LIST_HEAD(nvm_tgt_types);
31static DECLARE_RWSEM(nvm_tgtt_lock); 31static DECLARE_RWSEM(nvm_tgtt_lock);
32static LIST_HEAD(nvm_mgrs);
33static LIST_HEAD(nvm_devices); 32static LIST_HEAD(nvm_devices);
34static DECLARE_RWSEM(nvm_lock); 33static DECLARE_RWSEM(nvm_lock);
35 34
35/* Map between virtual and physical channel and lun */
36struct nvm_ch_map {
37 int ch_off;
38 int nr_luns;
39 int *lun_offs;
40};
41
42struct nvm_dev_map {
43 struct nvm_ch_map *chnls;
44 int nr_chnls;
45};
46
47struct nvm_area {
48 struct list_head list;
49 sector_t begin;
50 sector_t end; /* end is excluded */
51};
52
53static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
54{
55 struct nvm_target *tgt;
56
57 list_for_each_entry(tgt, &dev->targets, list)
58 if (!strcmp(name, tgt->disk->disk_name))
59 return tgt;
60
61 return NULL;
62}
63
64static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
65{
66 int i;
67
68 for (i = lun_begin; i <= lun_end; i++) {
69 if (test_and_set_bit(i, dev->lun_map)) {
70 pr_err("nvm: lun %d already allocated\n", i);
71 goto err;
72 }
73 }
74
75 return 0;
76err:
77 while (--i > lun_begin)
78 clear_bit(i, dev->lun_map);
79
80 return -EBUSY;
81}
82
83static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
84 int lun_end)
85{
86 int i;
87
88 for (i = lun_begin; i <= lun_end; i++)
89 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
90}
91
92static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
93{
94 struct nvm_dev *dev = tgt_dev->parent;
95 struct nvm_dev_map *dev_map = tgt_dev->map;
96 int i, j;
97
98 for (i = 0; i < dev_map->nr_chnls; i++) {
99 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
100 int *lun_offs = ch_map->lun_offs;
101 int ch = i + ch_map->ch_off;
102
103 for (j = 0; j < ch_map->nr_luns; j++) {
104 int lun = j + lun_offs[j];
105 int lunid = (ch * dev->geo.luns_per_chnl) + lun;
106
107 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
108 }
109
110 kfree(ch_map->lun_offs);
111 }
112
113 kfree(dev_map->chnls);
114 kfree(dev_map);
115
116 kfree(tgt_dev->luns);
117 kfree(tgt_dev);
118}
119
120static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
121 int lun_begin, int lun_end)
122{
123 struct nvm_tgt_dev *tgt_dev = NULL;
124 struct nvm_dev_map *dev_rmap = dev->rmap;
125 struct nvm_dev_map *dev_map;
126 struct ppa_addr *luns;
127 int nr_luns = lun_end - lun_begin + 1;
128 int luns_left = nr_luns;
129 int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
130 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
131 int bch = lun_begin / dev->geo.luns_per_chnl;
132 int blun = lun_begin % dev->geo.luns_per_chnl;
133 int lunid = 0;
134 int lun_balanced = 1;
135 int prev_nr_luns;
136 int i, j;
137
138 nr_chnls = nr_luns / dev->geo.luns_per_chnl;
139 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
140
141 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
142 if (!dev_map)
143 goto err_dev;
144
145 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
146 GFP_KERNEL);
147 if (!dev_map->chnls)
148 goto err_chnls;
149
150 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
151 if (!luns)
152 goto err_luns;
153
154 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
155 dev->geo.luns_per_chnl : luns_left;
156 for (i = 0; i < nr_chnls; i++) {
157 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
158 int *lun_roffs = ch_rmap->lun_offs;
159 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
160 int *lun_offs;
161 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
162 dev->geo.luns_per_chnl : luns_left;
163
164 if (lun_balanced && prev_nr_luns != luns_in_chnl)
165 lun_balanced = 0;
166
167 ch_map->ch_off = ch_rmap->ch_off = bch;
168 ch_map->nr_luns = luns_in_chnl;
169
170 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
171 if (!lun_offs)
172 goto err_ch;
173
174 for (j = 0; j < luns_in_chnl; j++) {
175 luns[lunid].ppa = 0;
176 luns[lunid].g.ch = i;
177 luns[lunid++].g.lun = j;
178
179 lun_offs[j] = blun;
180 lun_roffs[j + blun] = blun;
181 }
182
183 ch_map->lun_offs = lun_offs;
184
185 /* when starting a new channel, lun offset is reset */
186 blun = 0;
187 luns_left -= luns_in_chnl;
188 }
189
190 dev_map->nr_chnls = nr_chnls;
191
192 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
193 if (!tgt_dev)
194 goto err_ch;
195
196 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
197 /* Target device only owns a portion of the physical device */
198 tgt_dev->geo.nr_chnls = nr_chnls;
199 tgt_dev->geo.nr_luns = nr_luns;
200 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
201 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
202 tgt_dev->q = dev->q;
203 tgt_dev->map = dev_map;
204 tgt_dev->luns = luns;
205 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
206
207 tgt_dev->parent = dev;
208
209 return tgt_dev;
210err_ch:
211 while (--i > 0)
212 kfree(dev_map->chnls[i].lun_offs);
213 kfree(luns);
214err_luns:
215 kfree(dev_map->chnls);
216err_chnls:
217 kfree(dev_map);
218err_dev:
219 return tgt_dev;
220}
221
222static const struct block_device_operations nvm_fops = {
223 .owner = THIS_MODULE,
224};
225
226static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
227{
228 struct nvm_ioctl_create_simple *s = &create->conf.s;
229 struct request_queue *tqueue;
230 struct gendisk *tdisk;
231 struct nvm_tgt_type *tt;
232 struct nvm_target *t;
233 struct nvm_tgt_dev *tgt_dev;
234 void *targetdata;
235
236 tt = nvm_find_target_type(create->tgttype, 1);
237 if (!tt) {
238 pr_err("nvm: target type %s not found\n", create->tgttype);
239 return -EINVAL;
240 }
241
242 mutex_lock(&dev->mlock);
243 t = nvm_find_target(dev, create->tgtname);
244 if (t) {
245 pr_err("nvm: target name already exists.\n");
246 mutex_unlock(&dev->mlock);
247 return -EINVAL;
248 }
249 mutex_unlock(&dev->mlock);
250
251 if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end))
252 return -ENOMEM;
253
254 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
255 if (!t)
256 goto err_reserve;
257
258 tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
259 if (!tgt_dev) {
260 pr_err("nvm: could not create target device\n");
261 goto err_t;
262 }
263
264 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
265 if (!tqueue)
266 goto err_dev;
267 blk_queue_make_request(tqueue, tt->make_rq);
268
269 tdisk = alloc_disk(0);
270 if (!tdisk)
271 goto err_queue;
272
273 sprintf(tdisk->disk_name, "%s", create->tgtname);
274 tdisk->flags = GENHD_FL_EXT_DEVT;
275 tdisk->major = 0;
276 tdisk->first_minor = 0;
277 tdisk->fops = &nvm_fops;
278 tdisk->queue = tqueue;
279
280 targetdata = tt->init(tgt_dev, tdisk);
281 if (IS_ERR(targetdata))
282 goto err_init;
283
284 tdisk->private_data = targetdata;
285 tqueue->queuedata = targetdata;
286
287 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
288
289 set_capacity(tdisk, tt->capacity(targetdata));
290 add_disk(tdisk);
291
292 if (tt->sysfs_init && tt->sysfs_init(tdisk))
293 goto err_sysfs;
294
295 t->type = tt;
296 t->disk = tdisk;
297 t->dev = tgt_dev;
298
299 mutex_lock(&dev->mlock);
300 list_add_tail(&t->list, &dev->targets);
301 mutex_unlock(&dev->mlock);
302
303 return 0;
304err_sysfs:
305 if (tt->exit)
306 tt->exit(targetdata);
307err_init:
308 put_disk(tdisk);
309err_queue:
310 blk_cleanup_queue(tqueue);
311err_dev:
312 nvm_remove_tgt_dev(tgt_dev);
313err_t:
314 kfree(t);
315err_reserve:
316 nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
317 return -ENOMEM;
318}
319
320static void __nvm_remove_target(struct nvm_target *t)
321{
322 struct nvm_tgt_type *tt = t->type;
323 struct gendisk *tdisk = t->disk;
324 struct request_queue *q = tdisk->queue;
325
326 del_gendisk(tdisk);
327 blk_cleanup_queue(q);
328
329 if (tt->sysfs_exit)
330 tt->sysfs_exit(tdisk);
331
332 if (tt->exit)
333 tt->exit(tdisk->private_data);
334
335 nvm_remove_tgt_dev(t->dev);
336 put_disk(tdisk);
337
338 list_del(&t->list);
339 kfree(t);
340}
341
342/**
343 * nvm_remove_tgt - Removes a target from the media manager
344 * @dev: device
345 * @remove: ioctl structure with target name to remove.
346 *
347 * Returns:
348 * 0: on success
349 * 1: on not found
350 * <0: on error
351 */
352static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
353{
354 struct nvm_target *t;
355
356 mutex_lock(&dev->mlock);
357 t = nvm_find_target(dev, remove->tgtname);
358 if (!t) {
359 mutex_unlock(&dev->mlock);
360 return 1;
361 }
362 __nvm_remove_target(t);
363 mutex_unlock(&dev->mlock);
364
365 return 0;
366}
367
368static int nvm_register_map(struct nvm_dev *dev)
369{
370 struct nvm_dev_map *rmap;
371 int i, j;
372
373 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
374 if (!rmap)
375 goto err_rmap;
376
377 rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
378 GFP_KERNEL);
379 if (!rmap->chnls)
380 goto err_chnls;
381
382 for (i = 0; i < dev->geo.nr_chnls; i++) {
383 struct nvm_ch_map *ch_rmap;
384 int *lun_roffs;
385 int luns_in_chnl = dev->geo.luns_per_chnl;
386
387 ch_rmap = &rmap->chnls[i];
388
389 ch_rmap->ch_off = -1;
390 ch_rmap->nr_luns = luns_in_chnl;
391
392 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
393 if (!lun_roffs)
394 goto err_ch;
395
396 for (j = 0; j < luns_in_chnl; j++)
397 lun_roffs[j] = -1;
398
399 ch_rmap->lun_offs = lun_roffs;
400 }
401
402 dev->rmap = rmap;
403
404 return 0;
405err_ch:
406 while (--i >= 0)
407 kfree(rmap->chnls[i].lun_offs);
408err_chnls:
409 kfree(rmap);
410err_rmap:
411 return -ENOMEM;
412}
413
414static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
415{
416 struct nvm_dev_map *dev_map = tgt_dev->map;
417 struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
418 int lun_off = ch_map->lun_offs[p->g.lun];
419
420 p->g.ch += ch_map->ch_off;
421 p->g.lun += lun_off;
422}
423
424static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
425{
426 struct nvm_dev *dev = tgt_dev->parent;
427 struct nvm_dev_map *dev_rmap = dev->rmap;
428 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
429 int lun_roff = ch_rmap->lun_offs[p->g.lun];
430
431 p->g.ch -= ch_rmap->ch_off;
432 p->g.lun -= lun_roff;
433}
434
435static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
436 struct ppa_addr *ppa_list, int nr_ppas)
437{
438 int i;
439
440 for (i = 0; i < nr_ppas; i++) {
441 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
442 ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]);
443 }
444}
445
446static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
447 struct ppa_addr *ppa_list, int nr_ppas)
448{
449 int i;
450
451 for (i = 0; i < nr_ppas; i++) {
452 ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]);
453 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
454 }
455}
456
457static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
458{
459 if (rqd->nr_ppas == 1) {
460 nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
461 return;
462 }
463
464 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
465}
466
467static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
468{
469 if (rqd->nr_ppas == 1) {
470 nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
471 return;
472 }
473
474 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
475}
476
477void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
478 int len)
479{
480 struct nvm_geo *geo = &dev->geo;
481 struct nvm_dev_map *dev_rmap = dev->rmap;
482 u64 i;
483
484 for (i = 0; i < len; i++) {
485 struct nvm_ch_map *ch_rmap;
486 int *lun_roffs;
487 struct ppa_addr gaddr;
488 u64 pba = le64_to_cpu(entries[i]);
489 int off;
490 u64 diff;
491
492 if (!pba)
493 continue;
494
495 gaddr = linear_to_generic_addr(geo, pba);
496 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
497 lun_roffs = ch_rmap->lun_offs;
498
499 off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
500
501 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
502 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
503
504 entries[i] -= cpu_to_le64(diff);
505 }
506}
507EXPORT_SYMBOL(nvm_part_to_tgt);
508
36struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock) 509struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
37{ 510{
38 struct nvm_tgt_type *tmp, *tt = NULL; 511 struct nvm_tgt_type *tmp, *tt = NULL;
@@ -92,78 +565,6 @@ void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
92} 565}
93EXPORT_SYMBOL(nvm_dev_dma_free); 566EXPORT_SYMBOL(nvm_dev_dma_free);
94 567
95static struct nvmm_type *nvm_find_mgr_type(const char *name)
96{
97 struct nvmm_type *mt;
98
99 list_for_each_entry(mt, &nvm_mgrs, list)
100 if (!strcmp(name, mt->name))
101 return mt;
102
103 return NULL;
104}
105
106static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
107{
108 struct nvmm_type *mt;
109 int ret;
110
111 lockdep_assert_held(&nvm_lock);
112
113 list_for_each_entry(mt, &nvm_mgrs, list) {
114 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
115 continue;
116
117 ret = mt->register_mgr(dev);
118 if (ret < 0) {
119 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
120 ret, dev->name);
121 return NULL; /* initialization failed */
122 } else if (ret > 0)
123 return mt;
124 }
125
126 return NULL;
127}
128
129int nvm_register_mgr(struct nvmm_type *mt)
130{
131 struct nvm_dev *dev;
132 int ret = 0;
133
134 down_write(&nvm_lock);
135 if (nvm_find_mgr_type(mt->name)) {
136 ret = -EEXIST;
137 goto finish;
138 } else {
139 list_add(&mt->list, &nvm_mgrs);
140 }
141
142 /* try to register media mgr if any device have none configured */
143 list_for_each_entry(dev, &nvm_devices, devices) {
144 if (dev->mt)
145 continue;
146
147 dev->mt = nvm_init_mgr(dev);
148 }
149finish:
150 up_write(&nvm_lock);
151
152 return ret;
153}
154EXPORT_SYMBOL(nvm_register_mgr);
155
156void nvm_unregister_mgr(struct nvmm_type *mt)
157{
158 if (!mt)
159 return;
160
161 down_write(&nvm_lock);
162 list_del(&mt->list);
163 up_write(&nvm_lock);
164}
165EXPORT_SYMBOL(nvm_unregister_mgr);
166
167static struct nvm_dev *nvm_find_nvm_dev(const char *name) 568static struct nvm_dev *nvm_find_nvm_dev(const char *name)
168{ 569{
169 struct nvm_dev *dev; 570 struct nvm_dev *dev;
@@ -175,53 +576,6 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
175 return NULL; 576 return NULL;
176} 577}
177 578
178static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
179 struct nvm_rq *rqd)
180{
181 struct nvm_dev *dev = tgt_dev->parent;
182 int i;
183
184 if (rqd->nr_ppas > 1) {
185 for (i = 0; i < rqd->nr_ppas; i++) {
186 rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
187 rqd->ppa_list[i], TRANS_TGT_TO_DEV);
188 rqd->ppa_list[i] = generic_to_dev_addr(dev,
189 rqd->ppa_list[i]);
190 }
191 } else {
192 rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
193 TRANS_TGT_TO_DEV);
194 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
195 }
196}
197
198int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
199 int type)
200{
201 struct nvm_rq rqd;
202 int ret;
203
204 if (nr_ppas > dev->ops->max_phys_sect) {
205 pr_err("nvm: unable to update all sysblocks atomically\n");
206 return -EINVAL;
207 }
208
209 memset(&rqd, 0, sizeof(struct nvm_rq));
210
211 nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
212 nvm_generic_to_addr_mode(dev, &rqd);
213
214 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
215 nvm_free_rqd_ppalist(dev, &rqd);
216 if (ret) {
217 pr_err("nvm: sysblk failed bb mark\n");
218 return -EINVAL;
219 }
220
221 return 0;
222}
223EXPORT_SYMBOL(nvm_set_bb_tbl);
224
225int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, 579int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
226 int nr_ppas, int type) 580 int nr_ppas, int type)
227{ 581{
@@ -237,12 +591,12 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
237 memset(&rqd, 0, sizeof(struct nvm_rq)); 591 memset(&rqd, 0, sizeof(struct nvm_rq));
238 592
239 nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); 593 nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
240 nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd); 594 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
241 595
242 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); 596 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
243 nvm_free_rqd_ppalist(dev, &rqd); 597 nvm_free_rqd_ppalist(dev, &rqd);
244 if (ret) { 598 if (ret) {
245 pr_err("nvm: sysblk failed bb mark\n"); 599 pr_err("nvm: failed bb mark\n");
246 return -EINVAL; 600 return -EINVAL;
247 } 601 }
248 602
@@ -262,15 +616,42 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
262{ 616{
263 struct nvm_dev *dev = tgt_dev->parent; 617 struct nvm_dev *dev = tgt_dev->parent;
264 618
265 return dev->mt->submit_io(tgt_dev, rqd); 619 if (!dev->ops->submit_io)
620 return -ENODEV;
621
622 nvm_rq_tgt_to_dev(tgt_dev, rqd);
623
624 rqd->dev = tgt_dev;
625 return dev->ops->submit_io(dev, rqd);
266} 626}
267EXPORT_SYMBOL(nvm_submit_io); 627EXPORT_SYMBOL(nvm_submit_io);
268 628
269int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags) 629int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags)
270{ 630{
271 struct nvm_dev *dev = tgt_dev->parent; 631 struct nvm_dev *dev = tgt_dev->parent;
632 struct nvm_rq rqd;
633 int ret;
634
635 if (!dev->ops->erase_block)
636 return 0;
637
638 nvm_map_to_dev(tgt_dev, ppas);
639
640 memset(&rqd, 0, sizeof(struct nvm_rq));
641
642 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1);
643 if (ret)
644 return ret;
645
646 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
647
648 rqd.flags = flags;
649
650 ret = dev->ops->erase_block(dev, &rqd);
272 651
273 return dev->mt->erase_blk(tgt_dev, p, flags); 652 nvm_free_rqd_ppalist(dev, &rqd);
653
654 return ret;
274} 655}
275EXPORT_SYMBOL(nvm_erase_blk); 656EXPORT_SYMBOL(nvm_erase_blk);
276 657
@@ -289,46 +670,67 @@ EXPORT_SYMBOL(nvm_get_l2p_tbl);
289int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len) 670int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
290{ 671{
291 struct nvm_dev *dev = tgt_dev->parent; 672 struct nvm_dev *dev = tgt_dev->parent;
673 struct nvm_geo *geo = &dev->geo;
674 struct nvm_area *area, *prev, *next;
675 sector_t begin = 0;
676 sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
292 677
293 return dev->mt->get_area(dev, lba, len); 678 if (len > max_sectors)
294} 679 return -EINVAL;
295EXPORT_SYMBOL(nvm_get_area);
296 680
297void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba) 681 area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
298{ 682 if (!area)
299 struct nvm_dev *dev = tgt_dev->parent; 683 return -ENOMEM;
300 684
301 dev->mt->put_area(dev, lba); 685 prev = NULL;
302}
303EXPORT_SYMBOL(nvm_put_area);
304 686
305void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) 687 spin_lock(&dev->lock);
306{ 688 list_for_each_entry(next, &dev->area_list, list) {
307 int i; 689 if (begin + len > next->begin) {
690 begin = next->end;
691 prev = next;
692 continue;
693 }
694 break;
695 }
308 696
309 if (rqd->nr_ppas > 1) { 697 if ((begin + len) > max_sectors) {
310 for (i = 0; i < rqd->nr_ppas; i++) 698 spin_unlock(&dev->lock);
311 rqd->ppa_list[i] = dev_to_generic_addr(dev, 699 kfree(area);
312 rqd->ppa_list[i]); 700 return -EINVAL;
313 } else {
314 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
315 } 701 }
702
703 area->begin = *lba = begin;
704 area->end = begin + len;
705
706 if (prev) /* insert into sorted order */
707 list_add(&area->list, &prev->list);
708 else
709 list_add(&area->list, &dev->area_list);
710 spin_unlock(&dev->lock);
711
712 return 0;
316} 713}
317EXPORT_SYMBOL(nvm_addr_to_generic_mode); 714EXPORT_SYMBOL(nvm_get_area);
318 715
319void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) 716void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
320{ 717{
321 int i; 718 struct nvm_dev *dev = tgt_dev->parent;
719 struct nvm_area *area;
322 720
323 if (rqd->nr_ppas > 1) { 721 spin_lock(&dev->lock);
324 for (i = 0; i < rqd->nr_ppas; i++) 722 list_for_each_entry(area, &dev->area_list, list) {
325 rqd->ppa_list[i] = generic_to_dev_addr(dev, 723 if (area->begin != begin)
326 rqd->ppa_list[i]); 724 continue;
327 } else { 725
328 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); 726 list_del(&area->list);
727 spin_unlock(&dev->lock);
728 kfree(area);
729 return;
329 } 730 }
731 spin_unlock(&dev->lock);
330} 732}
331EXPORT_SYMBOL(nvm_generic_to_addr_mode); 733EXPORT_SYMBOL(nvm_put_area);
332 734
333int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, 735int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
334 const struct ppa_addr *ppas, int nr_ppas, int vblk) 736 const struct ppa_addr *ppas, int nr_ppas, int vblk)
@@ -380,149 +782,19 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
380} 782}
381EXPORT_SYMBOL(nvm_free_rqd_ppalist); 783EXPORT_SYMBOL(nvm_free_rqd_ppalist);
382 784
383int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas, 785void nvm_end_io(struct nvm_rq *rqd)
384 int flags)
385{ 786{
386 struct nvm_rq rqd; 787 struct nvm_tgt_dev *tgt_dev = rqd->dev;
387 int ret;
388 788
389 if (!dev->ops->erase_block) 789 /* Convert address space */
390 return 0; 790 if (tgt_dev)
791 nvm_rq_dev_to_tgt(tgt_dev, rqd);
391 792
392 memset(&rqd, 0, sizeof(struct nvm_rq)); 793 if (rqd->end_io)
393 794 rqd->end_io(rqd);
394 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
395 if (ret)
396 return ret;
397
398 nvm_generic_to_addr_mode(dev, &rqd);
399
400 rqd.flags = flags;
401
402 ret = dev->ops->erase_block(dev, &rqd);
403
404 nvm_free_rqd_ppalist(dev, &rqd);
405
406 return ret;
407}
408EXPORT_SYMBOL(nvm_erase_ppa);
409
410void nvm_end_io(struct nvm_rq *rqd, int error)
411{
412 rqd->error = error;
413 rqd->end_io(rqd);
414} 795}
415EXPORT_SYMBOL(nvm_end_io); 796EXPORT_SYMBOL(nvm_end_io);
416 797
417static void nvm_end_io_sync(struct nvm_rq *rqd)
418{
419 struct completion *waiting = rqd->wait;
420
421 rqd->wait = NULL;
422
423 complete(waiting);
424}
425
426static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
427 int flags, void *buf, int len)
428{
429 DECLARE_COMPLETION_ONSTACK(wait);
430 struct bio *bio;
431 int ret;
432 unsigned long hang_check;
433
434 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
435 if (IS_ERR_OR_NULL(bio))
436 return -ENOMEM;
437
438 nvm_generic_to_addr_mode(dev, rqd);
439
440 rqd->dev = NULL;
441 rqd->opcode = opcode;
442 rqd->flags = flags;
443 rqd->bio = bio;
444 rqd->wait = &wait;
445 rqd->end_io = nvm_end_io_sync;
446
447 ret = dev->ops->submit_io(dev, rqd);
448 if (ret) {
449 bio_put(bio);
450 return ret;
451 }
452
453 /* Prevent hang_check timer from firing at us during very long I/O */
454 hang_check = sysctl_hung_task_timeout_secs;
455 if (hang_check)
456 while (!wait_for_completion_io_timeout(&wait,
457 hang_check * (HZ/2)))
458 ;
459 else
460 wait_for_completion_io(&wait);
461
462 return rqd->error;
463}
464
465/**
466 * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
467 * take to free ppa list if necessary.
468 * @dev: device
469 * @ppa_list: user created ppa_list
470 * @nr_ppas: length of ppa_list
471 * @opcode: device opcode
472 * @flags: device flags
473 * @buf: data buffer
474 * @len: data buffer length
475 */
476int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
477 int nr_ppas, int opcode, int flags, void *buf, int len)
478{
479 struct nvm_rq rqd;
480
481 if (dev->ops->max_phys_sect < nr_ppas)
482 return -EINVAL;
483
484 memset(&rqd, 0, sizeof(struct nvm_rq));
485
486 rqd.nr_ppas = nr_ppas;
487 if (nr_ppas > 1)
488 rqd.ppa_list = ppa_list;
489 else
490 rqd.ppa_addr = ppa_list[0];
491
492 return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
493}
494EXPORT_SYMBOL(nvm_submit_ppa_list);
495
496/**
497 * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
498 * as single, dual, quad plane PPAs depending on device type.
499 * @dev: device
500 * @ppa: user created ppa_list
501 * @nr_ppas: length of ppa_list
502 * @opcode: device opcode
503 * @flags: device flags
504 * @buf: data buffer
505 * @len: data buffer length
506 */
507int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
508 int opcode, int flags, void *buf, int len)
509{
510 struct nvm_rq rqd;
511 int ret;
512
513 memset(&rqd, 0, sizeof(struct nvm_rq));
514 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
515 if (ret)
516 return ret;
517
518 ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
519
520 nvm_free_rqd_ppalist(dev, &rqd);
521
522 return ret;
523}
524EXPORT_SYMBOL(nvm_submit_ppa);
525
526/* 798/*
527 * folds a bad block list from its plane representation to its virtual 799 * folds a bad block list from its plane representation to its virtual
528 * block representation. The fold is done in place and reduced size is 800 * block representation. The fold is done in place and reduced size is
@@ -559,21 +831,14 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
559} 831}
560EXPORT_SYMBOL(nvm_bb_tbl_fold); 832EXPORT_SYMBOL(nvm_bb_tbl_fold);
561 833
562int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
563{
564 ppa = generic_to_dev_addr(dev, ppa);
565
566 return dev->ops->get_bb_tbl(dev, ppa, blks);
567}
568EXPORT_SYMBOL(nvm_get_bb_tbl);
569
570int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, 834int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
571 u8 *blks) 835 u8 *blks)
572{ 836{
573 struct nvm_dev *dev = tgt_dev->parent; 837 struct nvm_dev *dev = tgt_dev->parent;
574 838
575 ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV); 839 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
576 return nvm_get_bb_tbl(dev, ppa, blks); 840
841 return dev->ops->get_bb_tbl(dev, ppa, blks);
577} 842}
578EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); 843EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
579 844
@@ -627,7 +892,7 @@ static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
627static int nvm_core_init(struct nvm_dev *dev) 892static int nvm_core_init(struct nvm_dev *dev)
628{ 893{
629 struct nvm_id *id = &dev->identity; 894 struct nvm_id *id = &dev->identity;
630 struct nvm_id_group *grp = &id->groups[0]; 895 struct nvm_id_group *grp = &id->grp;
631 struct nvm_geo *geo = &dev->geo; 896 struct nvm_geo *geo = &dev->geo;
632 int ret; 897 int ret;
633 898
@@ -691,36 +956,31 @@ static int nvm_core_init(struct nvm_dev *dev)
691 goto err_fmtype; 956 goto err_fmtype;
692 } 957 }
693 958
959 INIT_LIST_HEAD(&dev->area_list);
960 INIT_LIST_HEAD(&dev->targets);
694 mutex_init(&dev->mlock); 961 mutex_init(&dev->mlock);
695 spin_lock_init(&dev->lock); 962 spin_lock_init(&dev->lock);
696 963
697 blk_queue_logical_block_size(dev->q, geo->sec_size); 964 ret = nvm_register_map(dev);
965 if (ret)
966 goto err_fmtype;
698 967
968 blk_queue_logical_block_size(dev->q, geo->sec_size);
699 return 0; 969 return 0;
700err_fmtype: 970err_fmtype:
701 kfree(dev->lun_map); 971 kfree(dev->lun_map);
702 return ret; 972 return ret;
703} 973}
704 974
705static void nvm_free_mgr(struct nvm_dev *dev)
706{
707 if (!dev->mt)
708 return;
709
710 dev->mt->unregister_mgr(dev);
711 dev->mt = NULL;
712}
713
714void nvm_free(struct nvm_dev *dev) 975void nvm_free(struct nvm_dev *dev)
715{ 976{
716 if (!dev) 977 if (!dev)
717 return; 978 return;
718 979
719 nvm_free_mgr(dev);
720
721 if (dev->dma_pool) 980 if (dev->dma_pool)
722 dev->ops->destroy_dma_pool(dev->dma_pool); 981 dev->ops->destroy_dma_pool(dev->dma_pool);
723 982
983 kfree(dev->rmap);
724 kfree(dev->lptbl); 984 kfree(dev->lptbl);
725 kfree(dev->lun_map); 985 kfree(dev->lun_map);
726 kfree(dev); 986 kfree(dev);
@@ -731,28 +991,19 @@ static int nvm_init(struct nvm_dev *dev)
731 struct nvm_geo *geo = &dev->geo; 991 struct nvm_geo *geo = &dev->geo;
732 int ret = -EINVAL; 992 int ret = -EINVAL;
733 993
734 if (!dev->q || !dev->ops)
735 return ret;
736
737 if (dev->ops->identity(dev, &dev->identity)) { 994 if (dev->ops->identity(dev, &dev->identity)) {
738 pr_err("nvm: device could not be identified\n"); 995 pr_err("nvm: device could not be identified\n");
739 goto err; 996 goto err;
740 } 997 }
741 998
742 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n", 999 pr_debug("nvm: ver:%x nvm_vendor:%x\n",
743 dev->identity.ver_id, dev->identity.vmnt, 1000 dev->identity.ver_id, dev->identity.vmnt);
744 dev->identity.cgrps);
745 1001
746 if (dev->identity.ver_id != 1) { 1002 if (dev->identity.ver_id != 1) {
747 pr_err("nvm: device not supported by kernel."); 1003 pr_err("nvm: device not supported by kernel.");
748 goto err; 1004 goto err;
749 } 1005 }
750 1006
751 if (dev->identity.cgrps != 1) {
752 pr_err("nvm: only one group configuration supported.");
753 goto err;
754 }
755
756 ret = nvm_core_init(dev); 1007 ret = nvm_core_init(dev);
757 if (ret) { 1008 if (ret) {
758 pr_err("nvm: could not initialize core structures.\n"); 1009 pr_err("nvm: could not initialize core structures.\n");
@@ -779,49 +1030,50 @@ int nvm_register(struct nvm_dev *dev)
779{ 1030{
780 int ret; 1031 int ret;
781 1032
782 ret = nvm_init(dev); 1033 if (!dev->q || !dev->ops)
783 if (ret) 1034 return -EINVAL;
784 goto err_init;
785 1035
786 if (dev->ops->max_phys_sect > 256) { 1036 if (dev->ops->max_phys_sect > 256) {
787 pr_info("nvm: max sectors supported is 256.\n"); 1037 pr_info("nvm: max sectors supported is 256.\n");
788 ret = -EINVAL; 1038 return -EINVAL;
789 goto err_init;
790 } 1039 }
791 1040
792 if (dev->ops->max_phys_sect > 1) { 1041 if (dev->ops->max_phys_sect > 1) {
793 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); 1042 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
794 if (!dev->dma_pool) { 1043 if (!dev->dma_pool) {
795 pr_err("nvm: could not create dma pool\n"); 1044 pr_err("nvm: could not create dma pool\n");
796 ret = -ENOMEM; 1045 return -ENOMEM;
797 goto err_init;
798 } 1046 }
799 } 1047 }
800 1048
801 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) { 1049 ret = nvm_init(dev);
802 ret = nvm_get_sysblock(dev, &dev->sb); 1050 if (ret)
803 if (!ret) 1051 goto err_init;
804 pr_err("nvm: device not initialized.\n");
805 else if (ret < 0)
806 pr_err("nvm: err (%d) on device initialization\n", ret);
807 }
808 1052
809 /* register device with a supported media manager */ 1053 /* register device with a supported media manager */
810 down_write(&nvm_lock); 1054 down_write(&nvm_lock);
811 if (ret > 0)
812 dev->mt = nvm_init_mgr(dev);
813 list_add(&dev->devices, &nvm_devices); 1055 list_add(&dev->devices, &nvm_devices);
814 up_write(&nvm_lock); 1056 up_write(&nvm_lock);
815 1057
816 return 0; 1058 return 0;
817err_init: 1059err_init:
818 kfree(dev->lun_map); 1060 dev->ops->destroy_dma_pool(dev->dma_pool);
819 return ret; 1061 return ret;
820} 1062}
821EXPORT_SYMBOL(nvm_register); 1063EXPORT_SYMBOL(nvm_register);
822 1064
823void nvm_unregister(struct nvm_dev *dev) 1065void nvm_unregister(struct nvm_dev *dev)
824{ 1066{
1067 struct nvm_target *t, *tmp;
1068
1069 mutex_lock(&dev->mlock);
1070 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1071 if (t->dev->parent != dev)
1072 continue;
1073 __nvm_remove_target(t);
1074 }
1075 mutex_unlock(&dev->mlock);
1076
825 down_write(&nvm_lock); 1077 down_write(&nvm_lock);
826 list_del(&dev->devices); 1078 list_del(&dev->devices);
827 up_write(&nvm_lock); 1079 up_write(&nvm_lock);
@@ -844,24 +1096,24 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
844 return -EINVAL; 1096 return -EINVAL;
845 } 1097 }
846 1098
847 if (!dev->mt) {
848 pr_info("nvm: device has no media manager registered.\n");
849 return -ENODEV;
850 }
851
852 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) { 1099 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
853 pr_err("nvm: config type not valid\n"); 1100 pr_err("nvm: config type not valid\n");
854 return -EINVAL; 1101 return -EINVAL;
855 } 1102 }
856 s = &create->conf.s; 1103 s = &create->conf.s;
857 1104
858 if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) { 1105 if (s->lun_begin == -1 && s->lun_end == -1) {
1106 s->lun_begin = 0;
1107 s->lun_end = dev->geo.nr_luns - 1;
1108 }
1109
1110 if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
859 pr_err("nvm: lun out of bound (%u:%u > %u)\n", 1111 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
860 s->lun_begin, s->lun_end, dev->geo.nr_luns); 1112 s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
861 return -EINVAL; 1113 return -EINVAL;
862 } 1114 }
863 1115
864 return dev->mt->create_tgt(dev, create); 1116 return nvm_create_tgt(dev, create);
865} 1117}
866 1118
867static long nvm_ioctl_info(struct file *file, void __user *arg) 1119static long nvm_ioctl_info(struct file *file, void __user *arg)
@@ -923,16 +1175,14 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
923 struct nvm_ioctl_device_info *info = &devices->info[i]; 1175 struct nvm_ioctl_device_info *info = &devices->info[i];
924 1176
925 sprintf(info->devname, "%s", dev->name); 1177 sprintf(info->devname, "%s", dev->name);
926 if (dev->mt) {
927 info->bmversion[0] = dev->mt->version[0];
928 info->bmversion[1] = dev->mt->version[1];
929 info->bmversion[2] = dev->mt->version[2];
930 sprintf(info->bmname, "%s", dev->mt->name);
931 } else {
932 sprintf(info->bmname, "none");
933 }
934 1178
1179 /* kept for compatibility */
1180 info->bmversion[0] = 1;
1181 info->bmversion[1] = 0;
1182 info->bmversion[2] = 0;
1183 sprintf(info->bmname, "%s", "gennvm");
935 i++; 1184 i++;
1185
936 if (i > 31) { 1186 if (i > 31) {
937 pr_err("nvm: max 31 devices can be reported.\n"); 1187 pr_err("nvm: max 31 devices can be reported.\n");
938 break; 1188 break;
@@ -994,7 +1244,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
994 } 1244 }
995 1245
996 list_for_each_entry(dev, &nvm_devices, devices) { 1246 list_for_each_entry(dev, &nvm_devices, devices) {
997 ret = dev->mt->remove_tgt(dev, &remove); 1247 ret = nvm_remove_tgt(dev, &remove);
998 if (!ret) 1248 if (!ret)
999 break; 1249 break;
1000 } 1250 }
@@ -1002,47 +1252,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1002 return ret; 1252 return ret;
1003} 1253}
1004 1254
1005static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info) 1255/* kept for compatibility reasons */
1006{
1007 info->seqnr = 1;
1008 info->erase_cnt = 0;
1009 info->version = 1;
1010}
1011
1012static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1013{
1014 struct nvm_dev *dev;
1015 struct nvm_sb_info info;
1016 int ret;
1017
1018 down_write(&nvm_lock);
1019 dev = nvm_find_nvm_dev(init->dev);
1020 up_write(&nvm_lock);
1021 if (!dev) {
1022 pr_err("nvm: device not found\n");
1023 return -EINVAL;
1024 }
1025
1026 nvm_setup_nvm_sb_info(&info);
1027
1028 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1029 info.fs_ppa.ppa = -1;
1030
1031 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1032 ret = nvm_init_sysblock(dev, &info);
1033 if (ret)
1034 return ret;
1035 }
1036
1037 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1038
1039 down_write(&nvm_lock);
1040 dev->mt = nvm_init_mgr(dev);
1041 up_write(&nvm_lock);
1042
1043 return 0;
1044}
1045
1046static long nvm_ioctl_dev_init(struct file *file, void __user *arg) 1256static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1047{ 1257{
1048 struct nvm_ioctl_dev_init init; 1258 struct nvm_ioctl_dev_init init;
@@ -1058,15 +1268,13 @@ static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1058 return -EINVAL; 1268 return -EINVAL;
1059 } 1269 }
1060 1270
1061 init.dev[DISK_NAME_LEN - 1] = '\0'; 1271 return 0;
1062
1063 return __nvm_ioctl_dev_init(&init);
1064} 1272}
1065 1273
1274/* Kept for compatibility reasons */
1066static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) 1275static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1067{ 1276{
1068 struct nvm_ioctl_dev_factory fact; 1277 struct nvm_ioctl_dev_factory fact;
1069 struct nvm_dev *dev;
1070 1278
1071 if (!capable(CAP_SYS_ADMIN)) 1279 if (!capable(CAP_SYS_ADMIN))
1072 return -EPERM; 1280 return -EPERM;
@@ -1079,19 +1287,6 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1079 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1)) 1287 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1080 return -EINVAL; 1288 return -EINVAL;
1081 1289
1082 down_write(&nvm_lock);
1083 dev = nvm_find_nvm_dev(fact.dev);
1084 up_write(&nvm_lock);
1085 if (!dev) {
1086 pr_err("nvm: device not found\n");
1087 return -EINVAL;
1088 }
1089
1090 nvm_free_mgr(dev);
1091
1092 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1093 return nvm_dev_factory(dev, fact.flags);
1094
1095 return 0; 1290 return 0;
1096} 1291}
1097 1292
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
deleted file mode 100644
index ca7880082d80..000000000000
--- a/drivers/lightnvm/gennvm.c
+++ /dev/null
@@ -1,657 +0,0 @@
1/*
2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA.
17 *
18 * Implementation of a general nvm manager for Open-Channel SSDs.
19 */
20
21#include "gennvm.h"
22
23static struct nvm_target *gen_find_target(struct gen_dev *gn, const char *name)
24{
25 struct nvm_target *tgt;
26
27 list_for_each_entry(tgt, &gn->targets, list)
28 if (!strcmp(name, tgt->disk->disk_name))
29 return tgt;
30
31 return NULL;
32}
33
34static const struct block_device_operations gen_fops = {
35 .owner = THIS_MODULE,
36};
37
38static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t,
39 int lun_begin, int lun_end)
40{
41 int i;
42
43 for (i = lun_begin; i <= lun_end; i++) {
44 if (test_and_set_bit(i, dev->lun_map)) {
45 pr_err("nvm: lun %d already allocated\n", i);
46 goto err;
47 }
48 }
49
50 return 0;
51
52err:
53 while (--i > lun_begin)
54 clear_bit(i, dev->lun_map);
55
56 return -EBUSY;
57}
58
59static void gen_release_luns_err(struct nvm_dev *dev, int lun_begin,
60 int lun_end)
61{
62 int i;
63
64 for (i = lun_begin; i <= lun_end; i++)
65 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
66}
67
68static void gen_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
69{
70 struct nvm_dev *dev = tgt_dev->parent;
71 struct gen_dev_map *dev_map = tgt_dev->map;
72 int i, j;
73
74 for (i = 0; i < dev_map->nr_chnls; i++) {
75 struct gen_ch_map *ch_map = &dev_map->chnls[i];
76 int *lun_offs = ch_map->lun_offs;
77 int ch = i + ch_map->ch_off;
78
79 for (j = 0; j < ch_map->nr_luns; j++) {
80 int lun = j + lun_offs[j];
81 int lunid = (ch * dev->geo.luns_per_chnl) + lun;
82
83 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
84 }
85
86 kfree(ch_map->lun_offs);
87 }
88
89 kfree(dev_map->chnls);
90 kfree(dev_map);
91 kfree(tgt_dev->luns);
92 kfree(tgt_dev);
93}
94
95static struct nvm_tgt_dev *gen_create_tgt_dev(struct nvm_dev *dev,
96 int lun_begin, int lun_end)
97{
98 struct nvm_tgt_dev *tgt_dev = NULL;
99 struct gen_dev_map *dev_rmap = dev->rmap;
100 struct gen_dev_map *dev_map;
101 struct ppa_addr *luns;
102 int nr_luns = lun_end - lun_begin + 1;
103 int luns_left = nr_luns;
104 int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
105 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
106 int bch = lun_begin / dev->geo.luns_per_chnl;
107 int blun = lun_begin % dev->geo.luns_per_chnl;
108 int lunid = 0;
109 int lun_balanced = 1;
110 int prev_nr_luns;
111 int i, j;
112
113 nr_chnls = nr_luns / dev->geo.luns_per_chnl;
114 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
115
116 dev_map = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
117 if (!dev_map)
118 goto err_dev;
119
120 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct gen_ch_map),
121 GFP_KERNEL);
122 if (!dev_map->chnls)
123 goto err_chnls;
124
125 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
126 if (!luns)
127 goto err_luns;
128
129 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
130 dev->geo.luns_per_chnl : luns_left;
131 for (i = 0; i < nr_chnls; i++) {
132 struct gen_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
133 int *lun_roffs = ch_rmap->lun_offs;
134 struct gen_ch_map *ch_map = &dev_map->chnls[i];
135 int *lun_offs;
136 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
137 dev->geo.luns_per_chnl : luns_left;
138
139 if (lun_balanced && prev_nr_luns != luns_in_chnl)
140 lun_balanced = 0;
141
142 ch_map->ch_off = ch_rmap->ch_off = bch;
143 ch_map->nr_luns = luns_in_chnl;
144
145 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
146 if (!lun_offs)
147 goto err_ch;
148
149 for (j = 0; j < luns_in_chnl; j++) {
150 luns[lunid].ppa = 0;
151 luns[lunid].g.ch = i;
152 luns[lunid++].g.lun = j;
153
154 lun_offs[j] = blun;
155 lun_roffs[j + blun] = blun;
156 }
157
158 ch_map->lun_offs = lun_offs;
159
160 /* when starting a new channel, lun offset is reset */
161 blun = 0;
162 luns_left -= luns_in_chnl;
163 }
164
165 dev_map->nr_chnls = nr_chnls;
166
167 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
168 if (!tgt_dev)
169 goto err_ch;
170
171 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
172 /* Target device only owns a portion of the physical device */
173 tgt_dev->geo.nr_chnls = nr_chnls;
174 tgt_dev->geo.nr_luns = nr_luns;
175 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
176 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
177 tgt_dev->q = dev->q;
178 tgt_dev->map = dev_map;
179 tgt_dev->luns = luns;
180 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
181
182 tgt_dev->parent = dev;
183
184 return tgt_dev;
185err_ch:
186 while (--i > 0)
187 kfree(dev_map->chnls[i].lun_offs);
188 kfree(luns);
189err_luns:
190 kfree(dev_map->chnls);
191err_chnls:
192 kfree(dev_map);
193err_dev:
194 return tgt_dev;
195}
196
197static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
198{
199 struct gen_dev *gn = dev->mp;
200 struct nvm_ioctl_create_simple *s = &create->conf.s;
201 struct request_queue *tqueue;
202 struct gendisk *tdisk;
203 struct nvm_tgt_type *tt;
204 struct nvm_target *t;
205 struct nvm_tgt_dev *tgt_dev;
206 void *targetdata;
207
208 tt = nvm_find_target_type(create->tgttype, 1);
209 if (!tt) {
210 pr_err("nvm: target type %s not found\n", create->tgttype);
211 return -EINVAL;
212 }
213
214 mutex_lock(&gn->lock);
215 t = gen_find_target(gn, create->tgtname);
216 if (t) {
217 pr_err("nvm: target name already exists.\n");
218 mutex_unlock(&gn->lock);
219 return -EINVAL;
220 }
221 mutex_unlock(&gn->lock);
222
223 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
224 if (!t)
225 return -ENOMEM;
226
227 if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end))
228 goto err_t;
229
230 tgt_dev = gen_create_tgt_dev(dev, s->lun_begin, s->lun_end);
231 if (!tgt_dev) {
232 pr_err("nvm: could not create target device\n");
233 goto err_reserve;
234 }
235
236 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
237 if (!tqueue)
238 goto err_dev;
239 blk_queue_make_request(tqueue, tt->make_rq);
240
241 tdisk = alloc_disk(0);
242 if (!tdisk)
243 goto err_queue;
244
245 sprintf(tdisk->disk_name, "%s", create->tgtname);
246 tdisk->flags = GENHD_FL_EXT_DEVT;
247 tdisk->major = 0;
248 tdisk->first_minor = 0;
249 tdisk->fops = &gen_fops;
250 tdisk->queue = tqueue;
251
252 targetdata = tt->init(tgt_dev, tdisk);
253 if (IS_ERR(targetdata))
254 goto err_init;
255
256 tdisk->private_data = targetdata;
257 tqueue->queuedata = targetdata;
258
259 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
260
261 set_capacity(tdisk, tt->capacity(targetdata));
262 add_disk(tdisk);
263
264 t->type = tt;
265 t->disk = tdisk;
266 t->dev = tgt_dev;
267
268 mutex_lock(&gn->lock);
269 list_add_tail(&t->list, &gn->targets);
270 mutex_unlock(&gn->lock);
271
272 return 0;
273err_init:
274 put_disk(tdisk);
275err_queue:
276 blk_cleanup_queue(tqueue);
277err_dev:
278 kfree(tgt_dev);
279err_reserve:
280 gen_release_luns_err(dev, s->lun_begin, s->lun_end);
281err_t:
282 kfree(t);
283 return -ENOMEM;
284}
285
286static void __gen_remove_target(struct nvm_target *t)
287{
288 struct nvm_tgt_type *tt = t->type;
289 struct gendisk *tdisk = t->disk;
290 struct request_queue *q = tdisk->queue;
291
292 del_gendisk(tdisk);
293 blk_cleanup_queue(q);
294
295 if (tt->exit)
296 tt->exit(tdisk->private_data);
297
298 gen_remove_tgt_dev(t->dev);
299 put_disk(tdisk);
300
301 list_del(&t->list);
302 kfree(t);
303}
304
305/**
306 * gen_remove_tgt - Removes a target from the media manager
307 * @dev: device
308 * @remove: ioctl structure with target name to remove.
309 *
310 * Returns:
311 * 0: on success
312 * 1: on not found
313 * <0: on error
314 */
315static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
316{
317 struct gen_dev *gn = dev->mp;
318 struct nvm_target *t;
319
320 if (!gn)
321 return 1;
322
323 mutex_lock(&gn->lock);
324 t = gen_find_target(gn, remove->tgtname);
325 if (!t) {
326 mutex_unlock(&gn->lock);
327 return 1;
328 }
329 __gen_remove_target(t);
330 mutex_unlock(&gn->lock);
331
332 return 0;
333}
334
335static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
336{
337 struct nvm_geo *geo = &dev->geo;
338 struct gen_dev *gn = dev->mp;
339 struct gen_area *area, *prev, *next;
340 sector_t begin = 0;
341 sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
342
343 if (len > max_sectors)
344 return -EINVAL;
345
346 area = kmalloc(sizeof(struct gen_area), GFP_KERNEL);
347 if (!area)
348 return -ENOMEM;
349
350 prev = NULL;
351
352 spin_lock(&dev->lock);
353 list_for_each_entry(next, &gn->area_list, list) {
354 if (begin + len > next->begin) {
355 begin = next->end;
356 prev = next;
357 continue;
358 }
359 break;
360 }
361
362 if ((begin + len) > max_sectors) {
363 spin_unlock(&dev->lock);
364 kfree(area);
365 return -EINVAL;
366 }
367
368 area->begin = *lba = begin;
369 area->end = begin + len;
370
371 if (prev) /* insert into sorted order */
372 list_add(&area->list, &prev->list);
373 else
374 list_add(&area->list, &gn->area_list);
375 spin_unlock(&dev->lock);
376
377 return 0;
378}
379
380static void gen_put_area(struct nvm_dev *dev, sector_t begin)
381{
382 struct gen_dev *gn = dev->mp;
383 struct gen_area *area;
384
385 spin_lock(&dev->lock);
386 list_for_each_entry(area, &gn->area_list, list) {
387 if (area->begin != begin)
388 continue;
389
390 list_del(&area->list);
391 spin_unlock(&dev->lock);
392 kfree(area);
393 return;
394 }
395 spin_unlock(&dev->lock);
396}
397
398static void gen_free(struct nvm_dev *dev)
399{
400 kfree(dev->mp);
401 kfree(dev->rmap);
402 dev->mp = NULL;
403}
404
405static int gen_register(struct nvm_dev *dev)
406{
407 struct gen_dev *gn;
408 struct gen_dev_map *dev_rmap;
409 int i, j;
410
411 if (!try_module_get(THIS_MODULE))
412 return -ENODEV;
413
414 gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
415 if (!gn)
416 goto err_gn;
417
418 dev_rmap = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
419 if (!dev_rmap)
420 goto err_rmap;
421
422 dev_rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct gen_ch_map),
423 GFP_KERNEL);
424 if (!dev_rmap->chnls)
425 goto err_chnls;
426
427 for (i = 0; i < dev->geo.nr_chnls; i++) {
428 struct gen_ch_map *ch_rmap;
429 int *lun_roffs;
430 int luns_in_chnl = dev->geo.luns_per_chnl;
431
432 ch_rmap = &dev_rmap->chnls[i];
433
434 ch_rmap->ch_off = -1;
435 ch_rmap->nr_luns = luns_in_chnl;
436
437 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
438 if (!lun_roffs)
439 goto err_ch;
440
441 for (j = 0; j < luns_in_chnl; j++)
442 lun_roffs[j] = -1;
443
444 ch_rmap->lun_offs = lun_roffs;
445 }
446
447 gn->dev = dev;
448 gn->nr_luns = dev->geo.nr_luns;
449 INIT_LIST_HEAD(&gn->area_list);
450 mutex_init(&gn->lock);
451 INIT_LIST_HEAD(&gn->targets);
452 dev->mp = gn;
453 dev->rmap = dev_rmap;
454
455 return 1;
456err_ch:
457 while (--i >= 0)
458 kfree(dev_rmap->chnls[i].lun_offs);
459err_chnls:
460 kfree(dev_rmap);
461err_rmap:
462 gen_free(dev);
463err_gn:
464 module_put(THIS_MODULE);
465 return -ENOMEM;
466}
467
468static void gen_unregister(struct nvm_dev *dev)
469{
470 struct gen_dev *gn = dev->mp;
471 struct nvm_target *t, *tmp;
472
473 mutex_lock(&gn->lock);
474 list_for_each_entry_safe(t, tmp, &gn->targets, list) {
475 if (t->dev->parent != dev)
476 continue;
477 __gen_remove_target(t);
478 }
479 mutex_unlock(&gn->lock);
480
481 gen_free(dev);
482 module_put(THIS_MODULE);
483}
484
485static int gen_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
486{
487 struct gen_dev_map *dev_map = tgt_dev->map;
488 struct gen_ch_map *ch_map = &dev_map->chnls[p->g.ch];
489 int lun_off = ch_map->lun_offs[p->g.lun];
490 struct nvm_dev *dev = tgt_dev->parent;
491 struct gen_dev_map *dev_rmap = dev->rmap;
492 struct gen_ch_map *ch_rmap;
493 int lun_roff;
494
495 p->g.ch += ch_map->ch_off;
496 p->g.lun += lun_off;
497
498 ch_rmap = &dev_rmap->chnls[p->g.ch];
499 lun_roff = ch_rmap->lun_offs[p->g.lun];
500
501 if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) {
502 pr_err("nvm: corrupted device partition table\n");
503 return -EINVAL;
504 }
505
506 return 0;
507}
508
509static int gen_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
510{
511 struct nvm_dev *dev = tgt_dev->parent;
512 struct gen_dev_map *dev_rmap = dev->rmap;
513 struct gen_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
514 int lun_roff = ch_rmap->lun_offs[p->g.lun];
515
516 p->g.ch -= ch_rmap->ch_off;
517 p->g.lun -= lun_roff;
518
519 return 0;
520}
521
522static int gen_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
523 int flag)
524{
525 gen_trans_fn *f;
526 int i;
527 int ret = 0;
528
529 f = (flag == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
530
531 if (rqd->nr_ppas == 1)
532 return f(tgt_dev, &rqd->ppa_addr);
533
534 for (i = 0; i < rqd->nr_ppas; i++) {
535 ret = f(tgt_dev, &rqd->ppa_list[i]);
536 if (ret)
537 goto out;
538 }
539
540out:
541 return ret;
542}
543
544static void gen_end_io(struct nvm_rq *rqd)
545{
546 struct nvm_tgt_dev *tgt_dev = rqd->dev;
547 struct nvm_tgt_instance *ins = rqd->ins;
548
549 /* Convert address space */
550 if (tgt_dev)
551 gen_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
552
553 ins->tt->end_io(rqd);
554}
555
556static int gen_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
557{
558 struct nvm_dev *dev = tgt_dev->parent;
559
560 if (!dev->ops->submit_io)
561 return -ENODEV;
562
563 /* Convert address space */
564 gen_trans_rq(tgt_dev, rqd, TRANS_TGT_TO_DEV);
565 nvm_generic_to_addr_mode(dev, rqd);
566
567 rqd->dev = tgt_dev;
568 rqd->end_io = gen_end_io;
569 return dev->ops->submit_io(dev, rqd);
570}
571
572static int gen_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p,
573 int flags)
574{
575 /* Convert address space */
576 gen_map_to_dev(tgt_dev, p);
577
578 return nvm_erase_ppa(tgt_dev->parent, p, 1, flags);
579}
580
581static struct ppa_addr gen_trans_ppa(struct nvm_tgt_dev *tgt_dev,
582 struct ppa_addr p, int direction)
583{
584 gen_trans_fn *f;
585 struct ppa_addr ppa = p;
586
587 f = (direction == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
588 f(tgt_dev, &ppa);
589
590 return ppa;
591}
592
593static void gen_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
594 int len)
595{
596 struct nvm_geo *geo = &dev->geo;
597 struct gen_dev_map *dev_rmap = dev->rmap;
598 u64 i;
599
600 for (i = 0; i < len; i++) {
601 struct gen_ch_map *ch_rmap;
602 int *lun_roffs;
603 struct ppa_addr gaddr;
604 u64 pba = le64_to_cpu(entries[i]);
605 int off;
606 u64 diff;
607
608 if (!pba)
609 continue;
610
611 gaddr = linear_to_generic_addr(geo, pba);
612 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
613 lun_roffs = ch_rmap->lun_offs;
614
615 off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
616
617 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
618 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
619
620 entries[i] -= cpu_to_le64(diff);
621 }
622}
623
624static struct nvmm_type gen = {
625 .name = "gennvm",
626 .version = {0, 1, 0},
627
628 .register_mgr = gen_register,
629 .unregister_mgr = gen_unregister,
630
631 .create_tgt = gen_create_tgt,
632 .remove_tgt = gen_remove_tgt,
633
634 .submit_io = gen_submit_io,
635 .erase_blk = gen_erase_blk,
636
637 .get_area = gen_get_area,
638 .put_area = gen_put_area,
639
640 .trans_ppa = gen_trans_ppa,
641 .part_to_tgt = gen_part_to_tgt,
642};
643
644static int __init gen_module_init(void)
645{
646 return nvm_register_mgr(&gen);
647}
648
649static void gen_module_exit(void)
650{
651 nvm_unregister_mgr(&gen);
652}
653
654module_init(gen_module_init);
655module_exit(gen_module_exit);
656MODULE_LICENSE("GPL v2");
657MODULE_DESCRIPTION("General media manager for Open-Channel SSDs");
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
deleted file mode 100644
index 6a4b3f368848..000000000000
--- a/drivers/lightnvm/gennvm.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright: Matias Bjorling <mb@bjorling.me>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 */
14
15#ifndef GENNVM_H_
16#define GENNVM_H_
17
18#include <linux/module.h>
19#include <linux/vmalloc.h>
20
21#include <linux/lightnvm.h>
22
23struct gen_dev {
24 struct nvm_dev *dev;
25
26 int nr_luns;
27 struct list_head area_list;
28
29 struct mutex lock;
30 struct list_head targets;
31};
32
33/* Map between virtual and physical channel and lun */
34struct gen_ch_map {
35 int ch_off;
36 int nr_luns;
37 int *lun_offs;
38};
39
40struct gen_dev_map {
41 struct gen_ch_map *chnls;
42 int nr_chnls;
43};
44
45struct gen_area {
46 struct list_head list;
47 sector_t begin;
48 sector_t end; /* end is excluded */
49};
50
51static inline void *ch_map_to_lun_offs(struct gen_ch_map *ch_map)
52{
53 return ch_map + 1;
54}
55
56typedef int (gen_trans_fn)(struct nvm_tgt_dev *, struct ppa_addr *);
57
58#define gen_for_each_lun(bm, lun, i) \
59 for ((i) = 0, lun = &(bm)->luns[0]; \
60 (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
61
62#endif /* GENNVM_H_ */
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 9fb7de395915..e00b1d7b976f 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -779,7 +779,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
779 779
780static void rrpc_end_io(struct nvm_rq *rqd) 780static void rrpc_end_io(struct nvm_rq *rqd)
781{ 781{
782 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); 782 struct rrpc *rrpc = rqd->private;
783 struct nvm_tgt_dev *dev = rrpc->dev; 783 struct nvm_tgt_dev *dev = rrpc->dev;
784 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); 784 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
785 uint8_t npages = rqd->nr_ppas; 785 uint8_t npages = rqd->nr_ppas;
@@ -972,8 +972,9 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
972 972
973 bio_get(bio); 973 bio_get(bio);
974 rqd->bio = bio; 974 rqd->bio = bio;
975 rqd->ins = &rrpc->instance; 975 rqd->private = rrpc;
976 rqd->nr_ppas = nr_pages; 976 rqd->nr_ppas = nr_pages;
977 rqd->end_io = rrpc_end_io;
977 rrq->flags = flags; 978 rrq->flags = flags;
978 979
979 err = nvm_submit_io(dev, rqd); 980 err = nvm_submit_io(dev, rqd);
@@ -1532,7 +1533,6 @@ static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
1532 if (!rrpc) 1533 if (!rrpc)
1533 return ERR_PTR(-ENOMEM); 1534 return ERR_PTR(-ENOMEM);
1534 1535
1535 rrpc->instance.tt = &tt_rrpc;
1536 rrpc->dev = dev; 1536 rrpc->dev = dev;
1537 rrpc->disk = tdisk; 1537 rrpc->disk = tdisk;
1538 1538
@@ -1611,7 +1611,6 @@ static struct nvm_tgt_type tt_rrpc = {
1611 1611
1612 .make_rq = rrpc_make_rq, 1612 .make_rq = rrpc_make_rq,
1613 .capacity = rrpc_capacity, 1613 .capacity = rrpc_capacity,
1614 .end_io = rrpc_end_io,
1615 1614
1616 .init = rrpc_init, 1615 .init = rrpc_init,
1617 .exit = rrpc_exit, 1616 .exit = rrpc_exit,
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 94e4d73116b2..fdb6ff902903 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -102,9 +102,6 @@ struct rrpc_lun {
102}; 102};
103 103
104struct rrpc { 104struct rrpc {
105 /* instance must be kept in top to resolve rrpc in unprep */
106 struct nvm_tgt_instance instance;
107
108 struct nvm_tgt_dev *dev; 105 struct nvm_tgt_dev *dev;
109 struct gendisk *disk; 106 struct gendisk *disk;
110 107
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
deleted file mode 100644
index 12002bf4efc2..000000000000
--- a/drivers/lightnvm/sysblk.c
+++ /dev/null
@@ -1,733 +0,0 @@
1/*
2 * Copyright (C) 2015 Matias Bjorling. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA.
17 *
18 */
19
20#include <linux/lightnvm.h>
21
22#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
23#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
24 * enables ~1.5M updates per sysblk unit
25 */
26
27struct sysblk_scan {
28 /* A row is a collection of flash blocks for a system block. */
29 int nr_rows;
30 int row;
31 int act_blk[MAX_SYSBLKS];
32
33 int nr_ppas;
34 struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
35};
36
37static inline int scan_ppa_idx(int row, int blkid)
38{
39 return (row * MAX_BLKS_PR_SYSBLK) + blkid;
40}
41
42static void nvm_sysblk_to_cpu(struct nvm_sb_info *info,
43 struct nvm_system_block *sb)
44{
45 info->seqnr = be32_to_cpu(sb->seqnr);
46 info->erase_cnt = be32_to_cpu(sb->erase_cnt);
47 info->version = be16_to_cpu(sb->version);
48 strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
49 info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
50}
51
52static void nvm_cpu_to_sysblk(struct nvm_system_block *sb,
53 struct nvm_sb_info *info)
54{
55 sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
56 sb->seqnr = cpu_to_be32(info->seqnr);
57 sb->erase_cnt = cpu_to_be32(info->erase_cnt);
58 sb->version = cpu_to_be16(info->version);
59 strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
60 sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
61}
62
63static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
64{
65 struct nvm_geo *geo = &dev->geo;
66 int nr_rows = min_t(int, MAX_SYSBLKS, geo->nr_chnls);
67 int i;
68
69 for (i = 0; i < nr_rows; i++)
70 sysblk_ppas[i].ppa = 0;
71
72 /* if possible, place sysblk at first channel, middle channel and last
73 * channel of the device. If not, create only one or two sys blocks
74 */
75 switch (geo->nr_chnls) {
76 case 2:
77 sysblk_ppas[1].g.ch = 1;
78 /* fall-through */
79 case 1:
80 sysblk_ppas[0].g.ch = 0;
81 break;
82 default:
83 sysblk_ppas[0].g.ch = 0;
84 sysblk_ppas[1].g.ch = geo->nr_chnls / 2;
85 sysblk_ppas[2].g.ch = geo->nr_chnls - 1;
86 break;
87 }
88
89 return nr_rows;
90}
91
92static void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
93 struct ppa_addr *sysblk_ppas)
94{
95 memset(s, 0, sizeof(struct sysblk_scan));
96 s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
97}
98
99static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
100 u8 *blks, int nr_blks,
101 struct sysblk_scan *s)
102{
103 struct ppa_addr *sppa;
104 int i, blkid = 0;
105
106 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
107 if (nr_blks < 0)
108 return nr_blks;
109
110 for (i = 0; i < nr_blks; i++) {
111 if (blks[i] == NVM_BLK_T_HOST)
112 return -EEXIST;
113
114 if (blks[i] != NVM_BLK_T_FREE)
115 continue;
116
117 sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
118 sppa->g.ch = ppa.g.ch;
119 sppa->g.lun = ppa.g.lun;
120 sppa->g.blk = i;
121 s->nr_ppas++;
122 blkid++;
123
124 pr_debug("nvm: use (%u %u %u) as sysblk\n",
125 sppa->g.ch, sppa->g.lun, sppa->g.blk);
126 if (blkid > MAX_BLKS_PR_SYSBLK - 1)
127 return 0;
128 }
129
130 pr_err("nvm: sysblk failed get sysblk\n");
131 return -EINVAL;
132}
133
134static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
135 u8 *blks, int nr_blks,
136 struct sysblk_scan *s)
137{
138 int i, nr_sysblk = 0;
139
140 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
141 if (nr_blks < 0)
142 return nr_blks;
143
144 for (i = 0; i < nr_blks; i++) {
145 if (blks[i] != NVM_BLK_T_HOST)
146 continue;
147
148 if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
149 pr_err("nvm: too many host blks\n");
150 return -EINVAL;
151 }
152
153 ppa.g.blk = i;
154
155 s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
156 s->nr_ppas++;
157 nr_sysblk++;
158 }
159
160 return 0;
161}
162
163static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
164 struct ppa_addr *ppas, int get_free)
165{
166 struct nvm_geo *geo = &dev->geo;
167 int i, nr_blks, ret = 0;
168 u8 *blks;
169
170 s->nr_ppas = 0;
171 nr_blks = geo->blks_per_lun * geo->plane_mode;
172
173 blks = kmalloc(nr_blks, GFP_KERNEL);
174 if (!blks)
175 return -ENOMEM;
176
177 for (i = 0; i < s->nr_rows; i++) {
178 s->row = i;
179
180 ret = nvm_get_bb_tbl(dev, ppas[i], blks);
181 if (ret) {
182 pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
183 ppas[i].g.ch,
184 ppas[i].g.blk);
185 goto err_get;
186 }
187
188 if (get_free)
189 ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks,
190 s);
191 else
192 ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks,
193 s);
194
195 if (ret)
196 goto err_get;
197 }
198
199err_get:
200 kfree(blks);
201 return ret;
202}
203
204/*
205 * scans a block for latest sysblk.
206 * Returns:
207 * 0 - newer sysblk not found. PPA is updated to latest page.
208 * 1 - newer sysblk found and stored in *cur. PPA is updated to
209 * next valid page.
210 * <0- error.
211 */
212static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
213 struct nvm_system_block *sblk)
214{
215 struct nvm_geo *geo = &dev->geo;
216 struct nvm_system_block *cur;
217 int pg, ret, found = 0;
218
219 /* the full buffer for a flash page is allocated. Only the first of it
220 * contains the system block information
221 */
222 cur = kmalloc(geo->pfpg_size, GFP_KERNEL);
223 if (!cur)
224 return -ENOMEM;
225
226 /* perform linear scan through the block */
227 for (pg = 0; pg < dev->lps_per_blk; pg++) {
228 ppa->g.pg = ppa_to_slc(dev, pg);
229
230 ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
231 cur, geo->pfpg_size);
232 if (ret) {
233 if (ret == NVM_RSP_ERR_EMPTYPAGE) {
234 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
235 ppa->g.ch,
236 ppa->g.lun,
237 ppa->g.blk,
238 ppa->g.pg);
239 break;
240 }
241 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
242 ret,
243 ppa->g.ch,
244 ppa->g.lun,
245 ppa->g.blk,
246 ppa->g.pg);
247 break; /* if we can't read a page, continue to the
248 * next blk
249 */
250 }
251
252 if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
253 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
254 ppa->g.ch,
255 ppa->g.lun,
256 ppa->g.blk,
257 ppa->g.pg);
258 break; /* last valid page already found */
259 }
260
261 if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
262 continue;
263
264 memcpy(sblk, cur, sizeof(struct nvm_system_block));
265 found = 1;
266 }
267
268 kfree(cur);
269
270 return found;
271}
272
273static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s,
274 int type)
275{
276 return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type);
277}
278
279static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
280 struct sysblk_scan *s)
281{
282 struct nvm_geo *geo = &dev->geo;
283 struct nvm_system_block nvmsb;
284 void *buf;
285 int i, sect, ret = 0;
286 struct ppa_addr *ppas;
287
288 nvm_cpu_to_sysblk(&nvmsb, info);
289
290 buf = kzalloc(geo->pfpg_size, GFP_KERNEL);
291 if (!buf)
292 return -ENOMEM;
293 memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
294
295 ppas = kcalloc(geo->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
296 if (!ppas) {
297 ret = -ENOMEM;
298 goto err;
299 }
300
301 /* Write and verify */
302 for (i = 0; i < s->nr_rows; i++) {
303 ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
304
305 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
306 ppas[0].g.ch,
307 ppas[0].g.lun,
308 ppas[0].g.blk,
309 ppas[0].g.pg);
310
311 /* Expand to all sectors within a flash page */
312 if (geo->sec_per_pg > 1) {
313 for (sect = 1; sect < geo->sec_per_pg; sect++) {
314 ppas[sect].ppa = ppas[0].ppa;
315 ppas[sect].g.sec = sect;
316 }
317 }
318
319 ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PWRITE,
320 NVM_IO_SLC_MODE, buf, geo->pfpg_size);
321 if (ret) {
322 pr_err("nvm: sysblk failed program (%u %u %u)\n",
323 ppas[0].g.ch,
324 ppas[0].g.lun,
325 ppas[0].g.blk);
326 break;
327 }
328
329 ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PREAD,
330 NVM_IO_SLC_MODE, buf, geo->pfpg_size);
331 if (ret) {
332 pr_err("nvm: sysblk failed read (%u %u %u)\n",
333 ppas[0].g.ch,
334 ppas[0].g.lun,
335 ppas[0].g.blk);
336 break;
337 }
338
339 if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
340 pr_err("nvm: sysblk failed verify (%u %u %u)\n",
341 ppas[0].g.ch,
342 ppas[0].g.lun,
343 ppas[0].g.blk);
344 ret = -EINVAL;
345 break;
346 }
347 }
348
349 kfree(ppas);
350err:
351 kfree(buf);
352
353 return ret;
354}
355
356static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
357{
358 int i, ret;
359 unsigned long nxt_blk;
360 struct ppa_addr *ppa;
361
362 for (i = 0; i < s->nr_rows; i++) {
363 nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
364 ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
365 ppa->g.pg = ppa_to_slc(dev, 0);
366
367 ret = nvm_erase_ppa(dev, ppa, 1, 0);
368 if (ret)
369 return ret;
370
371 s->act_blk[i] = nxt_blk;
372 }
373
374 return 0;
375}
376
377int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
378{
379 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
380 struct sysblk_scan s;
381 struct nvm_system_block *cur;
382 int i, j, found = 0;
383 int ret = -ENOMEM;
384
385 /*
386 * 1. setup sysblk locations
387 * 2. get bad block list
388 * 3. filter on host-specific (type 3)
389 * 4. iterate through all and find the highest seq nr.
390 * 5. return superblock information
391 */
392
393 if (!dev->ops->get_bb_tbl)
394 return -EINVAL;
395
396 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
397
398 mutex_lock(&dev->mlock);
399 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
400 if (ret)
401 goto err_sysblk;
402
403 /* no sysblocks initialized */
404 if (!s.nr_ppas)
405 goto err_sysblk;
406
407 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
408 if (!cur)
409 goto err_sysblk;
410
411 /* find the latest block across all sysblocks */
412 for (i = 0; i < s.nr_rows; i++) {
413 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
414 struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
415
416 ret = nvm_scan_block(dev, &ppa, cur);
417 if (ret > 0)
418 found = 1;
419 else if (ret < 0)
420 break;
421 }
422 }
423
424 nvm_sysblk_to_cpu(info, cur);
425
426 kfree(cur);
427err_sysblk:
428 mutex_unlock(&dev->mlock);
429
430 if (found)
431 return 1;
432 return ret;
433}
434
435int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
436{
437 /* 1. for each latest superblock
438 * 2. if room
439 * a. write new flash page entry with the updated information
440 * 3. if no room
441 * a. find next available block on lun (linear search)
442 * if none, continue to next lun
443 * if none at all, report error. also report that it wasn't
444 * possible to write to all superblocks.
445 * c. write data to block.
446 */
447 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
448 struct sysblk_scan s;
449 struct nvm_system_block *cur;
450 int i, j, ppaidx, found = 0;
451 int ret = -ENOMEM;
452
453 if (!dev->ops->get_bb_tbl)
454 return -EINVAL;
455
456 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
457
458 mutex_lock(&dev->mlock);
459 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
460 if (ret)
461 goto err_sysblk;
462
463 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
464 if (!cur)
465 goto err_sysblk;
466
467 /* Get the latest sysblk for each sysblk row */
468 for (i = 0; i < s.nr_rows; i++) {
469 found = 0;
470 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
471 ppaidx = scan_ppa_idx(i, j);
472 ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
473 if (ret > 0) {
474 s.act_blk[i] = j;
475 found = 1;
476 } else if (ret < 0)
477 break;
478 }
479 }
480
481 if (!found) {
482 pr_err("nvm: no valid sysblks found to update\n");
483 ret = -EINVAL;
484 goto err_cur;
485 }
486
487 /*
488 * All sysblocks found. Check that they have same page id in their flash
489 * blocks
490 */
491 for (i = 1; i < s.nr_rows; i++) {
492 struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
493 struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
494
495 if (l.g.pg != r.g.pg) {
496 pr_err("nvm: sysblks not on same page. Previous update failed.\n");
497 ret = -EINVAL;
498 goto err_cur;
499 }
500 }
501
502 /*
503 * Check that there haven't been another update to the seqnr since we
504 * began
505 */
506 if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
507 pr_err("nvm: seq is not sequential\n");
508 ret = -EINVAL;
509 goto err_cur;
510 }
511
512 /*
513 * When all pages in a block has been written, a new block is selected
514 * and writing is performed on the new block.
515 */
516 if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
517 dev->lps_per_blk - 1) {
518 ret = nvm_prepare_new_sysblks(dev, &s);
519 if (ret)
520 goto err_cur;
521 }
522
523 ret = nvm_write_and_verify(dev, new, &s);
524err_cur:
525 kfree(cur);
526err_sysblk:
527 mutex_unlock(&dev->mlock);
528
529 return ret;
530}
531
532int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
533{
534 struct nvm_geo *geo = &dev->geo;
535 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
536 struct sysblk_scan s;
537 int ret;
538
539 /*
540 * 1. select master blocks and select first available blks
541 * 2. get bad block list
542 * 3. mark MAX_SYSBLKS block as host-based device allocated.
543 * 4. write and verify data to block
544 */
545
546 if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
547 return -EINVAL;
548
549 if (!(geo->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
550 pr_err("nvm: memory does not support SLC access\n");
551 return -EINVAL;
552 }
553
554 /* Index all sysblocks and mark them as host-driven */
555 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
556
557 mutex_lock(&dev->mlock);
558 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1);
559 if (ret)
560 goto err_mark;
561
562 ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
563 if (ret)
564 goto err_mark;
565
566 /* Write to the first block of each row */
567 ret = nvm_write_and_verify(dev, info, &s);
568err_mark:
569 mutex_unlock(&dev->mlock);
570 return ret;
571}
572
573static int factory_nblks(int nblks)
574{
575 /* Round up to nearest BITS_PER_LONG */
576 return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
577}
578
579static unsigned int factory_blk_offset(struct nvm_geo *geo, struct ppa_addr ppa)
580{
581 int nblks = factory_nblks(geo->blks_per_lun);
582
583 return ((ppa.g.ch * geo->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
584 BITS_PER_LONG;
585}
586
587static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
588 u8 *blks, int nr_blks,
589 unsigned long *blk_bitmap, int flags)
590{
591 int i, lunoff;
592
593 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
594 if (nr_blks < 0)
595 return nr_blks;
596
597 lunoff = factory_blk_offset(&dev->geo, ppa);
598
599 /* non-set bits correspond to the block must be erased */
600 for (i = 0; i < nr_blks; i++) {
601 switch (blks[i]) {
602 case NVM_BLK_T_FREE:
603 if (flags & NVM_FACTORY_ERASE_ONLY_USER)
604 set_bit(i, &blk_bitmap[lunoff]);
605 break;
606 case NVM_BLK_T_HOST:
607 if (!(flags & NVM_FACTORY_RESET_HOST_BLKS))
608 set_bit(i, &blk_bitmap[lunoff]);
609 break;
610 case NVM_BLK_T_GRWN_BAD:
611 if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS))
612 set_bit(i, &blk_bitmap[lunoff]);
613 break;
614 default:
615 set_bit(i, &blk_bitmap[lunoff]);
616 break;
617 }
618 }
619
620 return 0;
621}
622
623static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
624 int max_ppas, unsigned long *blk_bitmap)
625{
626 struct nvm_geo *geo = &dev->geo;
627 struct ppa_addr ppa;
628 int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
629 unsigned long *offset;
630
631 while (!done) {
632 done = 1;
633 nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
634 idx = factory_blk_offset(geo, ppa);
635 offset = &blk_bitmap[idx];
636
637 blkid = find_first_zero_bit(offset, geo->blks_per_lun);
638 if (blkid >= geo->blks_per_lun)
639 continue;
640 set_bit(blkid, offset);
641
642 ppa.g.blk = blkid;
643 pr_debug("nvm: erase ppa (%u %u %u)\n",
644 ppa.g.ch,
645 ppa.g.lun,
646 ppa.g.blk);
647
648 erase_list[ppa_cnt] = ppa;
649 ppa_cnt++;
650 done = 0;
651
652 if (ppa_cnt == max_ppas)
653 return ppa_cnt;
654 }
655 }
656
657 return ppa_cnt;
658}
659
660static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
661 int flags)
662{
663 struct nvm_geo *geo = &dev->geo;
664 struct ppa_addr ppa;
665 int ch, lun, nr_blks, ret = 0;
666 u8 *blks;
667
668 nr_blks = geo->blks_per_lun * geo->plane_mode;
669 blks = kmalloc(nr_blks, GFP_KERNEL);
670 if (!blks)
671 return -ENOMEM;
672
673 nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
674 ret = nvm_get_bb_tbl(dev, ppa, blks);
675 if (ret)
676 pr_err("nvm: failed bb tbl for ch%u lun%u\n",
677 ppa.g.ch, ppa.g.blk);
678
679 ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap,
680 flags);
681 if (ret)
682 break;
683 }
684
685 kfree(blks);
686 return ret;
687}
688
689int nvm_dev_factory(struct nvm_dev *dev, int flags)
690{
691 struct nvm_geo *geo = &dev->geo;
692 struct ppa_addr *ppas;
693 int ppa_cnt, ret = -ENOMEM;
694 int max_ppas = dev->ops->max_phys_sect / geo->nr_planes;
695 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
696 struct sysblk_scan s;
697 unsigned long *blk_bitmap;
698
699 blk_bitmap = kzalloc(factory_nblks(geo->blks_per_lun) * geo->nr_luns,
700 GFP_KERNEL);
701 if (!blk_bitmap)
702 return ret;
703
704 ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
705 if (!ppas)
706 goto err_blks;
707
708 /* create list of blks to be erased */
709 ret = nvm_fact_select_blks(dev, blk_bitmap, flags);
710 if (ret)
711 goto err_ppas;
712
713 /* continue to erase until list of blks until empty */
714 while ((ppa_cnt =
715 nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
716 nvm_erase_ppa(dev, ppas, ppa_cnt, 0);
717
718 /* mark host reserved blocks free */
719 if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
720 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
721 mutex_lock(&dev->mlock);
722 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
723 if (!ret)
724 ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
725 mutex_unlock(&dev->mlock);
726 }
727err_ppas:
728 kfree(ppas);
729err_blks:
730 kfree(blk_bitmap);
731 return ret;
732}
733EXPORT_SYMBOL(nvm_dev_factory);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 76d20875503c..01035e718c1c 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -666,7 +666,7 @@ static inline struct search *search_alloc(struct bio *bio,
666 s->iop.write_prio = 0; 666 s->iop.write_prio = 0;
667 s->iop.error = 0; 667 s->iop.error = 0;
668 s->iop.flags = 0; 668 s->iop.flags = 0;
669 s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0; 669 s->iop.flush_journal = op_is_flush(bio->bi_opf);
670 s->iop.wq = bcache_wq; 670 s->iop.wq = bcache_wq;
671 671
672 return s; 672 return s;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index e04c61e0839e..5b9cf56de8ef 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -787,8 +787,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
787 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 787 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
788 788
789 spin_lock_irqsave(&cache->lock, flags); 789 spin_lock_irqsave(&cache->lock, flags);
790 if (cache->need_tick_bio && 790 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
791 !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
792 bio_op(bio) != REQ_OP_DISCARD) { 791 bio_op(bio) != REQ_OP_DISCARD) {
793 pb->tick = true; 792 pb->tick = true;
794 cache->need_tick_bio = false; 793 cache->need_tick_bio = false;
@@ -828,11 +827,6 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
828 return to_oblock(block_nr); 827 return to_oblock(block_nr);
829} 828}
830 829
831static int bio_triggers_commit(struct cache *cache, struct bio *bio)
832{
833 return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
834}
835
836/* 830/*
837 * You must increment the deferred set whilst the prison cell is held. To 831 * You must increment the deferred set whilst the prison cell is held. To
838 * encourage this, we ask for 'cell' to be passed in. 832 * encourage this, we ask for 'cell' to be passed in.
@@ -884,7 +878,7 @@ static void issue(struct cache *cache, struct bio *bio)
884{ 878{
885 unsigned long flags; 879 unsigned long flags;
886 880
887 if (!bio_triggers_commit(cache, bio)) { 881 if (!op_is_flush(bio->bi_opf)) {
888 accounted_request(cache, bio); 882 accounted_request(cache, bio);
889 return; 883 return;
890 } 884 }
@@ -1069,8 +1063,7 @@ static void dec_io_migrations(struct cache *cache)
1069 1063
1070static bool discard_or_flush(struct bio *bio) 1064static bool discard_or_flush(struct bio *bio)
1071{ 1065{
1072 return bio_op(bio) == REQ_OP_DISCARD || 1066 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
1073 bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1074} 1067}
1075 1068
1076static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) 1069static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index d1c05c12a9db..110982db4b48 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
699 699
700static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) 700static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
701{ 701{
702 return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) && 702 return op_is_flush(bio->bi_opf) &&
703 dm_thin_changed_this_transaction(tc->td); 703 dm_thin_changed_this_transaction(tc->td);
704} 704}
705 705
@@ -870,8 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
870 struct bio *bio; 870 struct bio *bio;
871 871
872 while ((bio = bio_list_pop(&cell->bios))) { 872 while ((bio = bio_list_pop(&cell->bios))) {
873 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || 873 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
874 bio_op(bio) == REQ_OP_DISCARD)
875 bio_list_add(&info->defer_bios, bio); 874 bio_list_add(&info->defer_bios, bio);
876 else { 875 else {
877 inc_all_io_entry(info->tc->pool, bio); 876 inc_all_io_entry(info->tc->pool, bio);
@@ -1716,9 +1715,8 @@ static void __remap_and_issue_shared_cell(void *context,
1716 struct bio *bio; 1715 struct bio *bio;
1717 1716
1718 while ((bio = bio_list_pop(&cell->bios))) { 1717 while ((bio = bio_list_pop(&cell->bios))) {
1719 if ((bio_data_dir(bio) == WRITE) || 1718 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
1720 (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || 1719 bio_op(bio) == REQ_OP_DISCARD)
1721 bio_op(bio) == REQ_OP_DISCARD))
1722 bio_list_add(&info->defer_bios, bio); 1720 bio_list_add(&info->defer_bios, bio);
1723 else { 1721 else {
1724 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; 1722 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
@@ -2635,8 +2633,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2635 return DM_MAPIO_SUBMITTED; 2633 return DM_MAPIO_SUBMITTED;
2636 } 2634 }
2637 2635
2638 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || 2636 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
2639 bio_op(bio) == REQ_OP_DISCARD) {
2640 thin_defer_bio_with_throttle(tc, bio); 2637 thin_defer_bio_with_throttle(tc, bio);
2641 return DM_MAPIO_SUBMITTED; 2638 return DM_MAPIO_SUBMITTED;
2642 } 2639 }
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8a3c3e32a704..138c6fa00cd5 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -784,6 +784,13 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
784 return nvme_sg_io(ns, (void __user *)arg); 784 return nvme_sg_io(ns, (void __user *)arg);
785#endif 785#endif
786 default: 786 default:
787#ifdef CONFIG_NVM
788 if (ns->ndev)
789 return nvme_nvm_ioctl(ns, cmd, arg);
790#endif
791 if (is_sed_ioctl(cmd))
792 return sed_ioctl(ns->ctrl->opal_dev, cmd,
793 (void __user *) arg);
787 return -ENOTTY; 794 return -ENOTTY;
788 } 795 }
789} 796}
@@ -1051,6 +1058,28 @@ static const struct pr_ops nvme_pr_ops = {
1051 .pr_clear = nvme_pr_clear, 1058 .pr_clear = nvme_pr_clear,
1052}; 1059};
1053 1060
1061#ifdef CONFIG_BLK_SED_OPAL
1062int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
1063 bool send)
1064{
1065 struct nvme_ctrl *ctrl = data;
1066 struct nvme_command cmd;
1067
1068 memset(&cmd, 0, sizeof(cmd));
1069 if (send)
1070 cmd.common.opcode = nvme_admin_security_send;
1071 else
1072 cmd.common.opcode = nvme_admin_security_recv;
1073 cmd.common.nsid = 0;
1074 cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
1075 cmd.common.cdw10[1] = cpu_to_le32(len);
1076
1077 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
1078 ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0);
1079}
1080EXPORT_SYMBOL_GPL(nvme_sec_submit);
1081#endif /* CONFIG_BLK_SED_OPAL */
1082
1054static const struct block_device_operations nvme_fops = { 1083static const struct block_device_operations nvme_fops = {
1055 .owner = THIS_MODULE, 1084 .owner = THIS_MODULE,
1056 .ioctl = nvme_ioctl, 1085 .ioctl = nvme_ioctl,
@@ -1230,6 +1259,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
1230 return -EIO; 1259 return -EIO;
1231 } 1260 }
1232 1261
1262 ctrl->oacs = le16_to_cpu(id->oacs);
1233 ctrl->vid = le16_to_cpu(id->vid); 1263 ctrl->vid = le16_to_cpu(id->vid);
1234 ctrl->oncs = le16_to_cpup(&id->oncs); 1264 ctrl->oncs = le16_to_cpup(&id->oncs);
1235 atomic_set(&ctrl->abort_limit, id->acl + 1); 1265 atomic_set(&ctrl->abort_limit, id->acl + 1);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 588d4a34c083..21cac8523bd8 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -26,6 +26,8 @@
26#include <linux/bitops.h> 26#include <linux/bitops.h>
27#include <linux/lightnvm.h> 27#include <linux/lightnvm.h>
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/sched/sysctl.h>
30#include <uapi/linux/lightnvm.h>
29 31
30enum nvme_nvm_admin_opcode { 32enum nvme_nvm_admin_opcode {
31 nvme_nvm_admin_identity = 0xe2, 33 nvme_nvm_admin_identity = 0xe2,
@@ -248,50 +250,48 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
248{ 250{
249 struct nvme_nvm_id_group *src; 251 struct nvme_nvm_id_group *src;
250 struct nvm_id_group *dst; 252 struct nvm_id_group *dst;
251 int i, end;
252
253 end = min_t(u32, 4, nvm_id->cgrps);
254
255 for (i = 0; i < end; i++) {
256 src = &nvme_nvm_id->groups[i];
257 dst = &nvm_id->groups[i];
258
259 dst->mtype = src->mtype;
260 dst->fmtype = src->fmtype;
261 dst->num_ch = src->num_ch;
262 dst->num_lun = src->num_lun;
263 dst->num_pln = src->num_pln;
264
265 dst->num_pg = le16_to_cpu(src->num_pg);
266 dst->num_blk = le16_to_cpu(src->num_blk);
267 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
268 dst->csecs = le16_to_cpu(src->csecs);
269 dst->sos = le16_to_cpu(src->sos);
270
271 dst->trdt = le32_to_cpu(src->trdt);
272 dst->trdm = le32_to_cpu(src->trdm);
273 dst->tprt = le32_to_cpu(src->tprt);
274 dst->tprm = le32_to_cpu(src->tprm);
275 dst->tbet = le32_to_cpu(src->tbet);
276 dst->tbem = le32_to_cpu(src->tbem);
277 dst->mpos = le32_to_cpu(src->mpos);
278 dst->mccap = le32_to_cpu(src->mccap);
279
280 dst->cpar = le16_to_cpu(src->cpar);
281
282 if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
283 memcpy(dst->lptbl.id, src->lptbl.id, 8);
284 dst->lptbl.mlc.num_pairs =
285 le16_to_cpu(src->lptbl.mlc.num_pairs);
286
287 if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
288 pr_err("nvm: number of MLC pairs not supported\n");
289 return -EINVAL;
290 }
291 253
292 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, 254 if (nvme_nvm_id->cgrps != 1)
293 dst->lptbl.mlc.num_pairs); 255 return -EINVAL;
256
257 src = &nvme_nvm_id->groups[0];
258 dst = &nvm_id->grp;
259
260 dst->mtype = src->mtype;
261 dst->fmtype = src->fmtype;
262 dst->num_ch = src->num_ch;
263 dst->num_lun = src->num_lun;
264 dst->num_pln = src->num_pln;
265
266 dst->num_pg = le16_to_cpu(src->num_pg);
267 dst->num_blk = le16_to_cpu(src->num_blk);
268 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
269 dst->csecs = le16_to_cpu(src->csecs);
270 dst->sos = le16_to_cpu(src->sos);
271
272 dst->trdt = le32_to_cpu(src->trdt);
273 dst->trdm = le32_to_cpu(src->trdm);
274 dst->tprt = le32_to_cpu(src->tprt);
275 dst->tprm = le32_to_cpu(src->tprm);
276 dst->tbet = le32_to_cpu(src->tbet);
277 dst->tbem = le32_to_cpu(src->tbem);
278 dst->mpos = le32_to_cpu(src->mpos);
279 dst->mccap = le32_to_cpu(src->mccap);
280
281 dst->cpar = le16_to_cpu(src->cpar);
282
283 if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
284 memcpy(dst->lptbl.id, src->lptbl.id, 8);
285 dst->lptbl.mlc.num_pairs =
286 le16_to_cpu(src->lptbl.mlc.num_pairs);
287
288 if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
289 pr_err("nvm: number of MLC pairs not supported\n");
290 return -EINVAL;
294 } 291 }
292
293 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
294 dst->lptbl.mlc.num_pairs);
295 } 295 }
296 296
297 return 0; 297 return 0;
@@ -321,7 +321,6 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
321 321
322 nvm_id->ver_id = nvme_nvm_id->ver_id; 322 nvm_id->ver_id = nvme_nvm_id->ver_id;
323 nvm_id->vmnt = nvme_nvm_id->vmnt; 323 nvm_id->vmnt = nvme_nvm_id->vmnt;
324 nvm_id->cgrps = nvme_nvm_id->cgrps;
325 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); 324 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
326 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); 325 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
327 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, 326 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
@@ -372,7 +371,7 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
372 } 371 }
373 372
374 /* Transform physical address to target address space */ 373 /* Transform physical address to target address space */
375 nvmdev->mt->part_to_tgt(nvmdev, entries, cmd_nlb); 374 nvm_part_to_tgt(nvmdev, entries, cmd_nlb);
376 375
377 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) { 376 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
378 ret = -EINTR; 377 ret = -EINTR;
@@ -485,7 +484,8 @@ static void nvme_nvm_end_io(struct request *rq, int error)
485 struct nvm_rq *rqd = rq->end_io_data; 484 struct nvm_rq *rqd = rq->end_io_data;
486 485
487 rqd->ppa_status = nvme_req(rq)->result.u64; 486 rqd->ppa_status = nvme_req(rq)->result.u64;
488 nvm_end_io(rqd, error); 487 rqd->error = error;
488 nvm_end_io(rqd);
489 489
490 kfree(nvme_req(rq)->cmd); 490 kfree(nvme_req(rq)->cmd);
491 blk_mq_free_request(rq); 491 blk_mq_free_request(rq);
@@ -586,6 +586,224 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
586 .max_phys_sect = 64, 586 .max_phys_sect = 64,
587}; 587};
588 588
589static void nvme_nvm_end_user_vio(struct request *rq, int error)
590{
591 struct completion *waiting = rq->end_io_data;
592
593 complete(waiting);
594}
595
596static int nvme_nvm_submit_user_cmd(struct request_queue *q,
597 struct nvme_ns *ns,
598 struct nvme_nvm_command *vcmd,
599 void __user *ubuf, unsigned int bufflen,
600 void __user *meta_buf, unsigned int meta_len,
601 void __user *ppa_buf, unsigned int ppa_len,
602 u32 *result, u64 *status, unsigned int timeout)
603{
604 bool write = nvme_is_write((struct nvme_command *)vcmd);
605 struct nvm_dev *dev = ns->ndev;
606 struct gendisk *disk = ns->disk;
607 struct request *rq;
608 struct bio *bio = NULL;
609 __le64 *ppa_list = NULL;
610 dma_addr_t ppa_dma;
611 __le64 *metadata = NULL;
612 dma_addr_t metadata_dma;
613 DECLARE_COMPLETION_ONSTACK(wait);
614 int ret;
615
616 rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
617 NVME_QID_ANY);
618 if (IS_ERR(rq)) {
619 ret = -ENOMEM;
620 goto err_cmd;
621 }
622
623 rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
624
625 rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
626 rq->end_io_data = &wait;
627
628 if (ppa_buf && ppa_len) {
629 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
630 if (!ppa_list) {
631 ret = -ENOMEM;
632 goto err_rq;
633 }
634 if (copy_from_user(ppa_list, (void __user *)ppa_buf,
635 sizeof(u64) * (ppa_len + 1))) {
636 ret = -EFAULT;
637 goto err_ppa;
638 }
639 vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
640 } else {
641 vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
642 }
643
644 if (ubuf && bufflen) {
645 ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
646 if (ret)
647 goto err_ppa;
648 bio = rq->bio;
649
650 if (meta_buf && meta_len) {
651 metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
652 &metadata_dma);
653 if (!metadata) {
654 ret = -ENOMEM;
655 goto err_map;
656 }
657
658 if (write) {
659 if (copy_from_user(metadata,
660 (void __user *)meta_buf,
661 meta_len)) {
662 ret = -EFAULT;
663 goto err_meta;
664 }
665 }
666 vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
667 }
668
669 if (!disk)
670 goto submit;
671
672 bio->bi_bdev = bdget_disk(disk, 0);
673 if (!bio->bi_bdev) {
674 ret = -ENODEV;
675 goto err_meta;
676 }
677 }
678
679submit:
680 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_user_vio);
681
682 wait_for_completion_io(&wait);
683
684 ret = nvme_error_status(rq->errors);
685 if (result)
686 *result = rq->errors & 0x7ff;
687 if (status)
688 *status = le64_to_cpu(nvme_req(rq)->result.u64);
689
690 if (metadata && !ret && !write) {
691 if (copy_to_user(meta_buf, (void *)metadata, meta_len))
692 ret = -EFAULT;
693 }
694err_meta:
695 if (meta_buf && meta_len)
696 dma_pool_free(dev->dma_pool, metadata, metadata_dma);
697err_map:
698 if (bio) {
699 if (disk && bio->bi_bdev)
700 bdput(bio->bi_bdev);
701 blk_rq_unmap_user(bio);
702 }
703err_ppa:
704 if (ppa_buf && ppa_len)
705 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
706err_rq:
707 blk_mq_free_request(rq);
708err_cmd:
709 return ret;
710}
711
712static int nvme_nvm_submit_vio(struct nvme_ns *ns,
713 struct nvm_user_vio __user *uvio)
714{
715 struct nvm_user_vio vio;
716 struct nvme_nvm_command c;
717 unsigned int length;
718 int ret;
719
720 if (copy_from_user(&vio, uvio, sizeof(vio)))
721 return -EFAULT;
722 if (vio.flags)
723 return -EINVAL;
724
725 memset(&c, 0, sizeof(c));
726 c.ph_rw.opcode = vio.opcode;
727 c.ph_rw.nsid = cpu_to_le32(ns->ns_id);
728 c.ph_rw.control = cpu_to_le16(vio.control);
729 c.ph_rw.length = cpu_to_le16(vio.nppas);
730
731 length = (vio.nppas + 1) << ns->lba_shift;
732
733 ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
734 (void __user *)(uintptr_t)vio.addr, length,
735 (void __user *)(uintptr_t)vio.metadata,
736 vio.metadata_len,
737 (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
738 &vio.result, &vio.status, 0);
739
740 if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
741 return -EFAULT;
742
743 return ret;
744}
745
746static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
747 struct nvm_passthru_vio __user *uvcmd)
748{
749 struct nvm_passthru_vio vcmd;
750 struct nvme_nvm_command c;
751 struct request_queue *q;
752 unsigned int timeout = 0;
753 int ret;
754
755 if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
756 return -EFAULT;
757 if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
758 return -EACCES;
759 if (vcmd.flags)
760 return -EINVAL;
761
762 memset(&c, 0, sizeof(c));
763 c.common.opcode = vcmd.opcode;
764 c.common.nsid = cpu_to_le32(ns->ns_id);
765 c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
766 c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
767 /* cdw11-12 */
768 c.ph_rw.length = cpu_to_le16(vcmd.nppas);
769 c.ph_rw.control = cpu_to_le32(vcmd.control);
770 c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
771 c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
772 c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
773
774 if (vcmd.timeout_ms)
775 timeout = msecs_to_jiffies(vcmd.timeout_ms);
776
777 q = admin ? ns->ctrl->admin_q : ns->queue;
778
779 ret = nvme_nvm_submit_user_cmd(q, ns,
780 (struct nvme_nvm_command *)&c,
781 (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
782 (void __user *)(uintptr_t)vcmd.metadata,
783 vcmd.metadata_len,
784 (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
785 &vcmd.result, &vcmd.status, timeout);
786
787 if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
788 return -EFAULT;
789
790 return ret;
791}
792
793int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
794{
795 switch (cmd) {
796 case NVME_NVM_IOCTL_ADMIN_VIO:
797 return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
798 case NVME_NVM_IOCTL_IO_VIO:
799 return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
800 case NVME_NVM_IOCTL_SUBMIT_VIO:
801 return nvme_nvm_submit_vio(ns, (void __user *)arg);
802 default:
803 return -ENOTTY;
804 }
805}
806
589int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node) 807int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
590{ 808{
591 struct request_queue *q = ns->queue; 809 struct request_queue *q = ns->queue;
@@ -622,7 +840,7 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
622 return 0; 840 return 0;
623 841
624 id = &ndev->identity; 842 id = &ndev->identity;
625 grp = &id->groups[0]; 843 grp = &id->grp;
626 attr = &dattr->attr; 844 attr = &dattr->attr;
627 845
628 if (strcmp(attr->name, "version") == 0) { 846 if (strcmp(attr->name, "version") == 0) {
@@ -633,10 +851,9 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
633 return scnprintf(page, PAGE_SIZE, "%u\n", id->cap); 851 return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
634 } else if (strcmp(attr->name, "device_mode") == 0) { 852 } else if (strcmp(attr->name, "device_mode") == 0) {
635 return scnprintf(page, PAGE_SIZE, "%u\n", id->dom); 853 return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
854 /* kept for compatibility */
636 } else if (strcmp(attr->name, "media_manager") == 0) { 855 } else if (strcmp(attr->name, "media_manager") == 0) {
637 if (!ndev->mt) 856 return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
638 return scnprintf(page, PAGE_SIZE, "%s\n", "none");
639 return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
640 } else if (strcmp(attr->name, "ppa_format") == 0) { 857 } else if (strcmp(attr->name, "ppa_format") == 0) {
641 return scnprintf(page, PAGE_SIZE, 858 return scnprintf(page, PAGE_SIZE,
642 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", 859 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index aead6d08ed2c..14cfc6f7facb 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -19,6 +19,7 @@
19#include <linux/kref.h> 19#include <linux/kref.h>
20#include <linux/blk-mq.h> 20#include <linux/blk-mq.h>
21#include <linux/lightnvm.h> 21#include <linux/lightnvm.h>
22#include <linux/sed-opal.h>
22 23
23enum { 24enum {
24 /* 25 /*
@@ -125,6 +126,8 @@ struct nvme_ctrl {
125 struct list_head node; 126 struct list_head node;
126 struct ida ns_ida; 127 struct ida ns_ida;
127 128
129 struct opal_dev *opal_dev;
130
128 char name[12]; 131 char name[12];
129 char serial[20]; 132 char serial[20];
130 char model[40]; 133 char model[40];
@@ -137,6 +140,7 @@ struct nvme_ctrl {
137 u32 max_hw_sectors; 140 u32 max_hw_sectors;
138 u16 oncs; 141 u16 oncs;
139 u16 vid; 142 u16 vid;
143 u16 oacs;
140 atomic_t abort_limit; 144 atomic_t abort_limit;
141 u8 event_limit; 145 u8 event_limit;
142 u8 vwc; 146 u8 vwc;
@@ -267,6 +271,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl);
267void nvme_queue_scan(struct nvme_ctrl *ctrl); 271void nvme_queue_scan(struct nvme_ctrl *ctrl);
268void nvme_remove_namespaces(struct nvme_ctrl *ctrl); 272void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
269 273
274int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
275 bool send);
276
270#define NVME_NR_AERS 1 277#define NVME_NR_AERS 1
271void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 278void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
272 union nvme_result *res); 279 union nvme_result *res);
@@ -318,6 +325,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
318void nvme_nvm_unregister(struct nvme_ns *ns); 325void nvme_nvm_unregister(struct nvme_ns *ns);
319int nvme_nvm_register_sysfs(struct nvme_ns *ns); 326int nvme_nvm_register_sysfs(struct nvme_ns *ns);
320void nvme_nvm_unregister_sysfs(struct nvme_ns *ns); 327void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
328int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
321#else 329#else
322static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, 330static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
323 int node) 331 int node)
@@ -335,6 +343,11 @@ static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *i
335{ 343{
336 return 0; 344 return 0;
337} 345}
346static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
347 unsigned long arg)
348{
349 return -ENOTTY;
350}
338#endif /* CONFIG_NVM */ 351#endif /* CONFIG_NVM */
339 352
340static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) 353static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3faefabf339c..d67d0d0a3bc0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -43,6 +43,7 @@
43#include <linux/types.h> 43#include <linux/types.h>
44#include <linux/io-64-nonatomic-lo-hi.h> 44#include <linux/io-64-nonatomic-lo-hi.h>
45#include <asm/unaligned.h> 45#include <asm/unaligned.h>
46#include <linux/sed-opal.h>
46 47
47#include "nvme.h" 48#include "nvme.h"
48 49
@@ -895,12 +896,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
895 return BLK_EH_HANDLED; 896 return BLK_EH_HANDLED;
896 } 897 }
897 898
898 iod->aborted = 1;
899
900 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 899 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
901 atomic_inc(&dev->ctrl.abort_limit); 900 atomic_inc(&dev->ctrl.abort_limit);
902 return BLK_EH_RESET_TIMER; 901 return BLK_EH_RESET_TIMER;
903 } 902 }
903 iod->aborted = 1;
904 904
905 memset(&cmd, 0, sizeof(cmd)); 905 memset(&cmd, 0, sizeof(cmd));
906 cmd.abort.opcode = nvme_admin_abort_cmd; 906 cmd.abort.opcode = nvme_admin_abort_cmd;
@@ -1178,6 +1178,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1178 dev->admin_tagset.timeout = ADMIN_TIMEOUT; 1178 dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1179 dev->admin_tagset.numa_node = dev_to_node(dev->dev); 1179 dev->admin_tagset.numa_node = dev_to_node(dev->dev);
1180 dev->admin_tagset.cmd_size = nvme_cmd_size(dev); 1180 dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
1181 dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
1181 dev->admin_tagset.driver_data = dev; 1182 dev->admin_tagset.driver_data = dev;
1182 1183
1183 if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 1184 if (blk_mq_alloc_tag_set(&dev->admin_tagset))
@@ -1738,6 +1739,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
1738 if (dev->ctrl.admin_q) 1739 if (dev->ctrl.admin_q)
1739 blk_put_queue(dev->ctrl.admin_q); 1740 blk_put_queue(dev->ctrl.admin_q);
1740 kfree(dev->queues); 1741 kfree(dev->queues);
1742 kfree(dev->ctrl.opal_dev);
1741 kfree(dev); 1743 kfree(dev);
1742} 1744}
1743 1745
@@ -1754,6 +1756,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
1754static void nvme_reset_work(struct work_struct *work) 1756static void nvme_reset_work(struct work_struct *work)
1755{ 1757{
1756 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); 1758 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
1759 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
1757 int result = -ENODEV; 1760 int result = -ENODEV;
1758 1761
1759 if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) 1762 if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
@@ -1786,6 +1789,14 @@ static void nvme_reset_work(struct work_struct *work)
1786 if (result) 1789 if (result)
1787 goto out; 1790 goto out;
1788 1791
1792 if ((dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) && !dev->ctrl.opal_dev) {
1793 dev->ctrl.opal_dev =
1794 init_opal_dev(&dev->ctrl, &nvme_sec_submit);
1795 }
1796
1797 if (was_suspend)
1798 opal_unlock_from_suspend(dev->ctrl.opal_dev);
1799
1789 result = nvme_setup_io_queues(dev); 1800 result = nvme_setup_io_queues(dev);
1790 if (result) 1801 if (result)
1791 goto out; 1802 goto out;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 94352e4df831..013bfe049a48 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -117,7 +117,7 @@ static unsigned int sr_check_events(struct cdrom_device_info *cdi,
117 unsigned int clearing, int slot); 117 unsigned int clearing, int slot);
118static int sr_packet(struct cdrom_device_info *, struct packet_command *); 118static int sr_packet(struct cdrom_device_info *, struct packet_command *);
119 119
120static struct cdrom_device_ops sr_dops = { 120static const struct cdrom_device_ops sr_dops = {
121 .open = sr_open, 121 .open = sr_open,
122 .release = sr_release, 122 .release = sr_release,
123 .drive_status = sr_drive_status, 123 .drive_status = sr_drive_status,