diff options
author | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-05-12 13:51:41 -0400 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-11-04 15:53:03 -0400 |
commit | b77954cbddff28d55a36fad3c16f4daebb0f01df (patch) | |
tree | 94f992d9c3390b61e11209f7b0e31440f67ec9c4 | |
parent | 5aff9382ddc8aac6eb0c70ffbb351652d71da69a (diff) |
NVMe: Handle failures from memory allocations in nvme_setup_prps
If any of the memory allocations in nvme_setup_prps fail, handle it by
modifying the passed-in data length to reflect the number of bytes we are
actually able to send. Also allow the caller to specify the GFP flags
they need; for user-initiated commands, we can use GFP_KERNEL allocations.
The various callers are updated to handle this possibility; the main
I/O path is already prepared for this possibility (as it may happen
due to nvme_map_bio being unable to map all the segments of the I/O).
The other callers return -ENOMEM instead of doing partial I/Os.
Reported-by: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
-rw-r--r-- | drivers/block/nvme.c | 56 |
1 files changed, 41 insertions, 15 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 79012c53ae9c..ddc21ba24a70 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -329,9 +329,11 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx, | |||
329 | /* length is in bytes */ | 329 | /* length is in bytes */ |
330 | static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev, | 330 | static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev, |
331 | struct nvme_common_command *cmd, | 331 | struct nvme_common_command *cmd, |
332 | struct scatterlist *sg, int length) | 332 | struct scatterlist *sg, int *len, |
333 | gfp_t gfp) | ||
333 | { | 334 | { |
334 | struct dma_pool *pool; | 335 | struct dma_pool *pool; |
336 | int length = *len; | ||
335 | int dma_len = sg_dma_len(sg); | 337 | int dma_len = sg_dma_len(sg); |
336 | u64 dma_addr = sg_dma_address(sg); | 338 | u64 dma_addr = sg_dma_address(sg); |
337 | int offset = offset_in_page(dma_addr); | 339 | int offset = offset_in_page(dma_addr); |
@@ -361,7 +363,12 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev, | |||
361 | 363 | ||
362 | nprps = DIV_ROUND_UP(length, PAGE_SIZE); | 364 | nprps = DIV_ROUND_UP(length, PAGE_SIZE); |
363 | npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE); | 365 | npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE); |
364 | prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, GFP_ATOMIC); | 366 | prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp); |
367 | if (!prps) { | ||
368 | cmd->prp2 = cpu_to_le64(dma_addr); | ||
369 | *len = (*len - length) + PAGE_SIZE; | ||
370 | return prps; | ||
371 | } | ||
365 | prp_page = 0; | 372 | prp_page = 0; |
366 | if (nprps <= (256 / 8)) { | 373 | if (nprps <= (256 / 8)) { |
367 | pool = dev->prp_small_pool; | 374 | pool = dev->prp_small_pool; |
@@ -371,7 +378,13 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev, | |||
371 | prps->npages = npages; | 378 | prps->npages = npages; |
372 | } | 379 | } |
373 | 380 | ||
374 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); | 381 | prp_list = dma_pool_alloc(pool, gfp, &prp_dma); |
382 | if (!prp_list) { | ||
383 | cmd->prp2 = cpu_to_le64(dma_addr); | ||
384 | *len = (*len - length) + PAGE_SIZE; | ||
385 | kfree(prps); | ||
386 | return NULL; | ||
387 | } | ||
375 | prps->list[prp_page++] = prp_list; | 388 | prps->list[prp_page++] = prp_list; |
376 | prps->first_dma = prp_dma; | 389 | prps->first_dma = prp_dma; |
377 | cmd->prp2 = cpu_to_le64(prp_dma); | 390 | cmd->prp2 = cpu_to_le64(prp_dma); |
@@ -379,7 +392,11 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev, | |||
379 | for (;;) { | 392 | for (;;) { |
380 | if (i == PAGE_SIZE / 8) { | 393 | if (i == PAGE_SIZE / 8) { |
381 | __le64 *old_prp_list = prp_list; | 394 | __le64 *old_prp_list = prp_list; |
382 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); | 395 | prp_list = dma_pool_alloc(pool, gfp, &prp_dma); |
396 | if (!prp_list) { | ||
397 | *len = (*len - length); | ||
398 | return prps; | ||
399 | } | ||
383 | prps->list[prp_page++] = prp_list; | 400 | prps->list[prp_page++] = prp_list; |
384 | prp_list[0] = old_prp_list[i - 1]; | 401 | prp_list[0] = old_prp_list[i - 1]; |
385 | old_prp_list[i - 1] = cpu_to_le64(prp_dma); | 402 | old_prp_list[i - 1] = cpu_to_le64(prp_dma); |
@@ -525,7 +542,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
525 | cmnd->rw.command_id = cmdid; | 542 | cmnd->rw.command_id = cmdid; |
526 | cmnd->rw.nsid = cpu_to_le32(ns->ns_id); | 543 | cmnd->rw.nsid = cpu_to_le32(ns->ns_id); |
527 | nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg, | 544 | nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg, |
528 | length); | 545 | &length, GFP_ATOMIC); |
529 | cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); | 546 | cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); |
530 | cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); | 547 | cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); |
531 | cmnd->rw.control = cpu_to_le16(control); | 548 | cmnd->rw.control = cpu_to_le16(control); |
@@ -1009,15 +1026,18 @@ static int nvme_submit_user_admin_command(struct nvme_dev *dev, | |||
1009 | unsigned long addr, unsigned length, | 1026 | unsigned long addr, unsigned length, |
1010 | struct nvme_command *cmd) | 1027 | struct nvme_command *cmd) |
1011 | { | 1028 | { |
1012 | int err, nents; | 1029 | int err, nents, tmplen = length; |
1013 | struct scatterlist *sg; | 1030 | struct scatterlist *sg; |
1014 | struct nvme_prps *prps; | 1031 | struct nvme_prps *prps; |
1015 | 1032 | ||
1016 | nents = nvme_map_user_pages(dev, 0, addr, length, &sg); | 1033 | nents = nvme_map_user_pages(dev, 0, addr, length, &sg); |
1017 | if (nents < 0) | 1034 | if (nents < 0) |
1018 | return nents; | 1035 | return nents; |
1019 | prps = nvme_setup_prps(dev, &cmd->common, sg, length); | 1036 | prps = nvme_setup_prps(dev, &cmd->common, sg, &tmplen, GFP_KERNEL); |
1020 | err = nvme_submit_admin_cmd(dev, cmd, NULL); | 1037 | if (tmplen != length) |
1038 | err = -ENOMEM; | ||
1039 | else | ||
1040 | err = nvme_submit_admin_cmd(dev, cmd, NULL); | ||
1021 | nvme_unmap_user_pages(dev, 0, addr, length, sg, nents); | 1041 | nvme_unmap_user_pages(dev, 0, addr, length, sg, nents); |
1022 | nvme_free_prps(dev, prps); | 1042 | nvme_free_prps(dev, prps); |
1023 | return err ? -EIO : 0; | 1043 | return err ? -EIO : 0; |
@@ -1086,7 +1106,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1086 | c.rw.apptag = io.apptag; | 1106 | c.rw.apptag = io.apptag; |
1087 | c.rw.appmask = io.appmask; | 1107 | c.rw.appmask = io.appmask; |
1088 | /* XXX: metadata */ | 1108 | /* XXX: metadata */ |
1089 | prps = nvme_setup_prps(dev, &c.common, sg, length); | 1109 | prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL); |
1090 | 1110 | ||
1091 | nvmeq = get_nvmeq(ns); | 1111 | nvmeq = get_nvmeq(ns); |
1092 | /* | 1112 | /* |
@@ -1096,7 +1116,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1096 | * additional races since q_lock already protects against other CPUs. | 1116 | * additional races since q_lock already protects against other CPUs. |
1097 | */ | 1117 | */ |
1098 | put_nvmeq(nvmeq); | 1118 | put_nvmeq(nvmeq); |
1099 | status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT); | 1119 | if (length != (io.nblocks + 1) << ns->lba_shift) |
1120 | status = -ENOMEM; | ||
1121 | else | ||
1122 | status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT); | ||
1100 | 1123 | ||
1101 | nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents); | 1124 | nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents); |
1102 | nvme_free_prps(dev, prps); | 1125 | nvme_free_prps(dev, prps); |
@@ -1109,7 +1132,7 @@ static int nvme_download_firmware(struct nvme_ns *ns, | |||
1109 | struct nvme_dev *dev = ns->dev; | 1132 | struct nvme_dev *dev = ns->dev; |
1110 | struct nvme_dlfw dlfw; | 1133 | struct nvme_dlfw dlfw; |
1111 | struct nvme_command c; | 1134 | struct nvme_command c; |
1112 | int nents, status; | 1135 | int nents, status, length; |
1113 | struct scatterlist *sg; | 1136 | struct scatterlist *sg; |
1114 | struct nvme_prps *prps; | 1137 | struct nvme_prps *prps; |
1115 | 1138 | ||
@@ -1117,8 +1140,9 @@ static int nvme_download_firmware(struct nvme_ns *ns, | |||
1117 | return -EFAULT; | 1140 | return -EFAULT; |
1118 | if (dlfw.length >= (1 << 30)) | 1141 | if (dlfw.length >= (1 << 30)) |
1119 | return -EINVAL; | 1142 | return -EINVAL; |
1143 | length = dlfw.length * 4; | ||
1120 | 1144 | ||
1121 | nents = nvme_map_user_pages(dev, 1, dlfw.addr, dlfw.length * 4, &sg); | 1145 | nents = nvme_map_user_pages(dev, 1, dlfw.addr, length, &sg); |
1122 | if (nents < 0) | 1146 | if (nents < 0) |
1123 | return nents; | 1147 | return nents; |
1124 | 1148 | ||
@@ -1126,9 +1150,11 @@ static int nvme_download_firmware(struct nvme_ns *ns, | |||
1126 | c.dlfw.opcode = nvme_admin_download_fw; | 1150 | c.dlfw.opcode = nvme_admin_download_fw; |
1127 | c.dlfw.numd = cpu_to_le32(dlfw.length); | 1151 | c.dlfw.numd = cpu_to_le32(dlfw.length); |
1128 | c.dlfw.offset = cpu_to_le32(dlfw.offset); | 1152 | c.dlfw.offset = cpu_to_le32(dlfw.offset); |
1129 | prps = nvme_setup_prps(dev, &c.common, sg, dlfw.length * 4); | 1153 | prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL); |
1130 | 1154 | if (length != dlfw.length * 4) | |
1131 | status = nvme_submit_admin_cmd(dev, &c, NULL); | 1155 | status = -ENOMEM; |
1156 | else | ||
1157 | status = nvme_submit_admin_cmd(dev, &c, NULL); | ||
1132 | nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents); | 1158 | nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents); |
1133 | nvme_free_prps(dev, prps); | 1159 | nvme_free_prps(dev, prps); |
1134 | return status; | 1160 | return status; |