aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/nvme.c76
1 files changed, 43 insertions, 33 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 705f66ebd15f..b77894a75855 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -812,6 +812,34 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
812 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 812 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
813} 813}
814 814
815static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
816 dma_addr_t dma_addr)
817{
818 struct nvme_command c;
819
820 memset(&c, 0, sizeof(c));
821 c.identify.opcode = nvme_admin_identify;
822 c.identify.nsid = cpu_to_le32(nsid);
823 c.identify.prp1 = cpu_to_le64(dma_addr);
824 c.identify.cns = cpu_to_le32(cns);
825
826 return nvme_submit_admin_cmd(dev, &c, NULL);
827}
828
829static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
830 unsigned dword11, dma_addr_t dma_addr, u32 *result)
831{
832 struct nvme_command c;
833
834 memset(&c, 0, sizeof(c));
835 c.features.opcode = nvme_admin_get_features;
836 c.features.prp1 = cpu_to_le64(dma_addr);
837 c.features.fid = cpu_to_le32(fid);
838 c.features.dword11 = cpu_to_le32(dword11);
839
840 return nvme_submit_admin_cmd(dev, &c, result);
841}
842
815static void nvme_free_queue(struct nvme_dev *dev, int qid) 843static void nvme_free_queue(struct nvme_dev *dev, int qid)
816{ 844{
817 struct nvme_queue *nvmeq = dev->queues[qid]; 845 struct nvme_queue *nvmeq = dev->queues[qid];
@@ -1318,15 +1346,10 @@ static int set_queue_count(struct nvme_dev *dev, int count)
1318{ 1346{
1319 int status; 1347 int status;
1320 u32 result; 1348 u32 result;
1321 struct nvme_command c;
1322 u32 q_count = (count - 1) | ((count - 1) << 16); 1349 u32 q_count = (count - 1) | ((count - 1) << 16);
1323 1350
1324 memset(&c, 0, sizeof(c)); 1351 status = nvme_get_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
1325 c.features.opcode = nvme_admin_get_features; 1352 &result);
1326 c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES);
1327 c.features.dword11 = cpu_to_le32(q_count);
1328
1329 status = nvme_submit_admin_cmd(dev, &c, &result);
1330 if (status) 1353 if (status)
1331 return -EIO; 1354 return -EIO;
1332 return min(result & 0xffff, result >> 16) + 1; 1355 return min(result & 0xffff, result >> 16) + 1;
@@ -1400,65 +1423,51 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev)
1400 int res, nn, i; 1423 int res, nn, i;
1401 struct nvme_ns *ns, *next; 1424 struct nvme_ns *ns, *next;
1402 struct nvme_id_ctrl *ctrl; 1425 struct nvme_id_ctrl *ctrl;
1403 void *id; 1426 struct nvme_id_ns *id_ns;
1427 void *mem;
1404 dma_addr_t dma_addr; 1428 dma_addr_t dma_addr;
1405 struct nvme_command cid, crt;
1406 1429
1407 res = nvme_setup_io_queues(dev); 1430 res = nvme_setup_io_queues(dev);
1408 if (res) 1431 if (res)
1409 return res; 1432 return res;
1410 1433
1411 /* XXX: Switch to a SG list once prp2 works */ 1434 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
1412 id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
1413 GFP_KERNEL); 1435 GFP_KERNEL);
1414 1436
1415 memset(&cid, 0, sizeof(cid)); 1437 res = nvme_identify(dev, 0, 1, dma_addr);
1416 cid.identify.opcode = nvme_admin_identify;
1417 cid.identify.nsid = 0;
1418 cid.identify.prp1 = cpu_to_le64(dma_addr);
1419 cid.identify.cns = cpu_to_le32(1);
1420
1421 res = nvme_submit_admin_cmd(dev, &cid, NULL);
1422 if (res) { 1438 if (res) {
1423 res = -EIO; 1439 res = -EIO;
1424 goto out_free; 1440 goto out_free;
1425 } 1441 }
1426 1442
1427 ctrl = id; 1443 ctrl = mem;
1428 nn = le32_to_cpup(&ctrl->nn); 1444 nn = le32_to_cpup(&ctrl->nn);
1429 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1445 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1430 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1446 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1431 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1447 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
1432 1448
1433 cid.identify.cns = 0; 1449 id_ns = mem;
1434 memset(&crt, 0, sizeof(crt));
1435 crt.features.opcode = nvme_admin_get_features;
1436 crt.features.prp1 = cpu_to_le64(dma_addr + 4096);
1437 crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
1438
1439 for (i = 0; i <= nn; i++) { 1450 for (i = 0; i <= nn; i++) {
1440 cid.identify.nsid = cpu_to_le32(i); 1451 res = nvme_identify(dev, i, 0, dma_addr);
1441 res = nvme_submit_admin_cmd(dev, &cid, NULL);
1442 if (res) 1452 if (res)
1443 continue; 1453 continue;
1444 1454
1445 if (((struct nvme_id_ns *)id)->ncap == 0) 1455 if (id_ns->ncap == 0)
1446 continue; 1456 continue;
1447 1457
1448 crt.features.nsid = cpu_to_le32(i); 1458 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
1449 res = nvme_submit_admin_cmd(dev, &crt, NULL); 1459 dma_addr + 4096, NULL);
1450 if (res) 1460 if (res)
1451 continue; 1461 continue;
1452 1462
1453 ns = nvme_alloc_ns(dev, i, id, id + 4096); 1463 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
1454 if (ns) 1464 if (ns)
1455 list_add_tail(&ns->list, &dev->namespaces); 1465 list_add_tail(&ns->list, &dev->namespaces);
1456 } 1466 }
1457 list_for_each_entry(ns, &dev->namespaces, list) 1467 list_for_each_entry(ns, &dev->namespaces, list)
1458 add_disk(ns->disk); 1468 add_disk(ns->disk);
1459 1469
1460 dma_free_coherent(&dev->pci_dev->dev, 8192, id, dma_addr); 1470 goto out;
1461 return 0;
1462 1471
1463 out_free: 1472 out_free:
1464 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1473 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
@@ -1466,6 +1475,7 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev)
1466 nvme_ns_free(ns); 1475 nvme_ns_free(ns);
1467 } 1476 }
1468 1477
1478 out:
1469 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); 1479 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
1470 return res; 1480 return res;
1471} 1481}