diff options
-rw-r--r-- | drivers/block/nvme-core.c | 71 | ||||
-rw-r--r-- | include/linux/nvme.h | 1 |
2 files changed, 68 insertions, 4 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 4151a3d26e2d..5a3f2235892a 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -1240,13 +1240,19 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1240 | struct nvme_queue *nvmeq; | 1240 | struct nvme_queue *nvmeq; |
1241 | struct nvme_user_io io; | 1241 | struct nvme_user_io io; |
1242 | struct nvme_command c; | 1242 | struct nvme_command c; |
1243 | unsigned length; | 1243 | unsigned length, meta_len; |
1244 | int status; | 1244 | int status, i; |
1245 | struct nvme_iod *iod; | 1245 | struct nvme_iod *iod, *meta_iod = NULL; |
1246 | dma_addr_t meta_dma_addr; | ||
1247 | void *meta, *uninitialized_var(meta_mem); | ||
1246 | 1248 | ||
1247 | if (copy_from_user(&io, uio, sizeof(io))) | 1249 | if (copy_from_user(&io, uio, sizeof(io))) |
1248 | return -EFAULT; | 1250 | return -EFAULT; |
1249 | length = (io.nblocks + 1) << ns->lba_shift; | 1251 | length = (io.nblocks + 1) << ns->lba_shift; |
1252 | meta_len = (io.nblocks + 1) * ns->ms; | ||
1253 | |||
1254 | if (meta_len && ((io.metadata & 3) || !io.metadata)) | ||
1255 | return -EINVAL; | ||
1250 | 1256 | ||
1251 | switch (io.opcode) { | 1257 | switch (io.opcode) { |
1252 | case nvme_cmd_write: | 1258 | case nvme_cmd_write: |
@@ -1272,7 +1278,38 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1272 | c.rw.reftag = cpu_to_le32(io.reftag); | 1278 | c.rw.reftag = cpu_to_le32(io.reftag); |
1273 | c.rw.apptag = cpu_to_le16(io.apptag); | 1279 | c.rw.apptag = cpu_to_le16(io.apptag); |
1274 | c.rw.appmask = cpu_to_le16(io.appmask); | 1280 | c.rw.appmask = cpu_to_le16(io.appmask); |
1275 | /* XXX: metadata */ | 1281 | |
1282 | if (meta_len) { | ||
1283 | meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len); | ||
1284 | if (IS_ERR(meta_iod)) { | ||
1285 | status = PTR_ERR(meta_iod); | ||
1286 | meta_iod = NULL; | ||
1287 | goto unmap; | ||
1288 | } | ||
1289 | |||
1290 | meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, | ||
1291 | &meta_dma_addr, GFP_KERNEL); | ||
1292 | if (!meta_mem) { | ||
1293 | status = -ENOMEM; | ||
1294 | goto unmap; | ||
1295 | } | ||
1296 | |||
1297 | if (io.opcode & 1) { | ||
1298 | int meta_offset = 0; | ||
1299 | |||
1300 | for (i = 0; i < meta_iod->nents; i++) { | ||
1301 | meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + | ||
1302 | meta_iod->sg[i].offset; | ||
1303 | memcpy(meta_mem + meta_offset, meta, | ||
1304 | meta_iod->sg[i].length); | ||
1305 | kunmap_atomic(meta); | ||
1306 | meta_offset += meta_iod->sg[i].length; | ||
1307 | } | ||
1308 | } | ||
1309 | |||
1310 | c.rw.metadata = cpu_to_le64(meta_dma_addr); | ||
1311 | } | ||
1312 | |||
1276 | length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); | 1313 | length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); |
1277 | 1314 | ||
1278 | nvmeq = get_nvmeq(dev); | 1315 | nvmeq = get_nvmeq(dev); |
@@ -1288,8 +1325,33 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1288 | else | 1325 | else |
1289 | status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); | 1326 | status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); |
1290 | 1327 | ||
1328 | if (meta_len) { | ||
1329 | if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { | ||
1330 | int meta_offset = 0; | ||
1331 | |||
1332 | for (i = 0; i < meta_iod->nents; i++) { | ||
1333 | meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + | ||
1334 | meta_iod->sg[i].offset; | ||
1335 | memcpy(meta, meta_mem + meta_offset, | ||
1336 | meta_iod->sg[i].length); | ||
1337 | kunmap_atomic(meta); | ||
1338 | meta_offset += meta_iod->sg[i].length; | ||
1339 | } | ||
1340 | } | ||
1341 | |||
1342 | dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem, | ||
1343 | meta_dma_addr); | ||
1344 | } | ||
1345 | |||
1346 | unmap: | ||
1291 | nvme_unmap_user_pages(dev, io.opcode & 1, iod); | 1347 | nvme_unmap_user_pages(dev, io.opcode & 1, iod); |
1292 | nvme_free_iod(dev, iod); | 1348 | nvme_free_iod(dev, iod); |
1349 | |||
1350 | if (meta_iod) { | ||
1351 | nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); | ||
1352 | nvme_free_iod(dev, meta_iod); | ||
1353 | } | ||
1354 | |||
1293 | return status; | 1355 | return status; |
1294 | } | 1356 | } |
1295 | 1357 | ||
@@ -1486,6 +1548,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, | |||
1486 | ns->disk = disk; | 1548 | ns->disk = disk; |
1487 | lbaf = id->flbas & 0xf; | 1549 | lbaf = id->flbas & 0xf; |
1488 | ns->lba_shift = id->lbaf[lbaf].ds; | 1550 | ns->lba_shift = id->lbaf[lbaf].ds; |
1551 | ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); | ||
1489 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); | 1552 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); |
1490 | if (dev->max_hw_sectors) | 1553 | if (dev->max_hw_sectors) |
1491 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); | 1554 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index af29b0e0b092..971ef086ed63 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
@@ -552,6 +552,7 @@ struct nvme_ns { | |||
552 | 552 | ||
553 | int ns_id; | 553 | int ns_id; |
554 | int lba_shift; | 554 | int lba_shift; |
555 | int ms; | ||
555 | u64 mode_select_num_blocks; | 556 | u64 mode_select_num_blocks; |
556 | u32 mode_select_block_len; | 557 | u32 mode_select_block_len; |
557 | }; | 558 | }; |