aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme.c
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2012-11-09 18:33:05 -0500
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2013-03-26 14:18:58 -0400
commit0e5e4f0e56aca0df1d5648db0be9028bd573b25c (patch)
treea83ab74e90bae5e0a20e6fd9dd03924cac1f3db9 /drivers/block/nvme.c
parenta12183c62717ac4579319189a00f5883a18dff08 (diff)
NVMe: Add discard support for capable devices
This adds discard support to block queues if the nvme device is capable of deallocating blocks as indicated by the controller's optional command support. A discard flagged bio request will submit an NVMe deallocate Data Set Management command for the requested blocks. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r--drivers/block/nvme.c60
1 files changed, 59 insertions, 1 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 9dcefe40380b..26e266072079 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -80,6 +80,7 @@ struct nvme_dev {
80 char model[40]; 80 char model[40];
81 char firmware_rev[8]; 81 char firmware_rev[8];
82 u32 max_hw_sectors; 82 u32 max_hw_sectors;
83 u16 oncs;
83}; 84};
84 85
85/* 86/*
@@ -510,6 +511,44 @@ static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
510 return length; 511 return length;
511} 512}
512 513
514/*
515 * We reuse the small pool to allocate the 16-byte range here as it is not
516 * worth having a special pool for these or additional cases to handle freeing
517 * the iod.
518 */
519static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
520 struct bio *bio, struct nvme_iod *iod, int cmdid)
521{
522 struct nvme_dsm_range *range;
523 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
524
525 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
526 &iod->first_dma);
527 if (!range)
528 return -ENOMEM;
529
530 iod_list(iod)[0] = (__le64 *)range;
531 iod->npages = 0;
532
533 range->cattr = cpu_to_le32(0);
534 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
535 range->slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
536
537 memset(cmnd, 0, sizeof(*cmnd));
538 cmnd->dsm.opcode = nvme_cmd_dsm;
539 cmnd->dsm.command_id = cmdid;
540 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
541 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
542 cmnd->dsm.nr = 0;
543 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
544
545 if (++nvmeq->sq_tail == nvmeq->q_depth)
546 nvmeq->sq_tail = 0;
547 writel(nvmeq->sq_tail, nvmeq->q_db);
548
549 return 0;
550}
551
513static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, 552static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
514 int cmdid) 553 int cmdid)
515{ 554{
@@ -567,6 +606,12 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
567 if (unlikely(cmdid < 0)) 606 if (unlikely(cmdid < 0))
568 goto free_iod; 607 goto free_iod;
569 608
609 if (bio->bi_rw & REQ_DISCARD) {
610 result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
611 if (result)
612 goto free_cmdid;
613 return result;
614 }
570 if ((bio->bi_rw & REQ_FLUSH) && !psegs) 615 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
571 return nvme_submit_flush(nvmeq, ns, cmdid); 616 return nvme_submit_flush(nvmeq, ns, cmdid);
572 617
@@ -1347,6 +1392,16 @@ static void nvme_put_ns_idx(int index)
1347 spin_unlock(&dev_list_lock); 1392 spin_unlock(&dev_list_lock);
1348} 1393}
1349 1394
1395static void nvme_config_discard(struct nvme_ns *ns)
1396{
1397 u32 logical_block_size = queue_logical_block_size(ns->queue);
1398 ns->queue->limits.discard_zeroes_data = 0;
1399 ns->queue->limits.discard_alignment = logical_block_size;
1400 ns->queue->limits.discard_granularity = logical_block_size;
1401 ns->queue->limits.max_discard_sectors = 0xffffffff;
1402 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1403}
1404
1350static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, 1405static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
1351 struct nvme_id_ns *id, struct nvme_lba_range_type *rt) 1406 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1352{ 1407{
@@ -1366,7 +1421,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
1366 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; 1421 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
1367 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); 1422 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1368 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1423 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1369/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
1370 blk_queue_make_request(ns->queue, nvme_make_request); 1424 blk_queue_make_request(ns->queue, nvme_make_request);
1371 ns->dev = dev; 1425 ns->dev = dev;
1372 ns->queue->queuedata = ns; 1426 ns->queue->queuedata = ns;
@@ -1392,6 +1446,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
1392 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 1446 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
1393 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 1447 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1394 1448
1449 if (dev->oncs & NVME_CTRL_ONCS_DSM)
1450 nvme_config_discard(ns);
1451
1395 return ns; 1452 return ns;
1396 1453
1397 out_free_queue: 1454 out_free_queue:
@@ -1520,6 +1577,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
1520 1577
1521 ctrl = mem; 1578 ctrl = mem;
1522 nn = le32_to_cpup(&ctrl->nn); 1579 nn = le32_to_cpup(&ctrl->nn);
1580 dev->oncs = le16_to_cpup(&ctrl->oncs);
1523 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1581 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1524 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1582 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1525 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1583 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));