diff options
author | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-03-21 09:48:57 -0400 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-11-04 15:53:01 -0400 |
commit | 6c7d49455ceb63064f992347d9185ff5bf43497a (patch) | |
tree | 8a5fde81a68cd41c12079d8c2baf7ea1cf3e40fe /drivers/block | |
parent | 9d4af1b7796ba02b73a79a8694399e5a3cd1c55d (diff) |
NVMe: Change the definition of nvme_user_io
The read and write commands don't define a 'result', so there's no need
to copy it back to userspace.
Remove the ability of the ioctl to submit commands to a different
namespace; it's just asking for trouble, and the use case I have in mind
will be addressed througha different ioctl in the future. That removes
the need for both the block_shift and nsid arguments.
Check that the opcode is one of 'read' or 'write'. Future opcodes may
be added in the future, but we will need a different structure definition
for them.
The nblocks field is redefined to be 0-based. This allows the user to
request the full 65536 blocks.
Don't byteswap the reftag, apptag and appmask. Martin Petersen tells
me these are calculated in big-endian and are transmitted to the device
in big-endian.
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/nvme.c | 27 |
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index d0b52622e261..90a96ec8a596 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -1035,29 +1035,37 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1035 | struct nvme_user_io io; | 1035 | struct nvme_user_io io; |
1036 | struct nvme_command c; | 1036 | struct nvme_command c; |
1037 | unsigned length; | 1037 | unsigned length; |
1038 | u32 result; | ||
1039 | int nents, status; | 1038 | int nents, status; |
1040 | struct scatterlist *sg; | 1039 | struct scatterlist *sg; |
1041 | struct nvme_prps *prps; | 1040 | struct nvme_prps *prps; |
1042 | 1041 | ||
1043 | if (copy_from_user(&io, uio, sizeof(io))) | 1042 | if (copy_from_user(&io, uio, sizeof(io))) |
1044 | return -EFAULT; | 1043 | return -EFAULT; |
1045 | length = io.nblocks << io.block_shift; | 1044 | length = (io.nblocks + 1) << ns->lba_shift; |
1046 | nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length, &sg); | 1045 | |
1046 | switch (io.opcode) { | ||
1047 | case nvme_cmd_write: | ||
1048 | case nvme_cmd_read: | ||
1049 | nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, | ||
1050 | length, &sg); | ||
1051 | default: | ||
1052 | return -EFAULT; | ||
1053 | } | ||
1054 | |||
1047 | if (nents < 0) | 1055 | if (nents < 0) |
1048 | return nents; | 1056 | return nents; |
1049 | 1057 | ||
1050 | memset(&c, 0, sizeof(c)); | 1058 | memset(&c, 0, sizeof(c)); |
1051 | c.rw.opcode = io.opcode; | 1059 | c.rw.opcode = io.opcode; |
1052 | c.rw.flags = io.flags; | 1060 | c.rw.flags = io.flags; |
1053 | c.rw.nsid = cpu_to_le32(io.nsid); | 1061 | c.rw.nsid = cpu_to_le32(ns->ns_id); |
1054 | c.rw.slba = cpu_to_le64(io.slba); | 1062 | c.rw.slba = cpu_to_le64(io.slba); |
1055 | c.rw.length = cpu_to_le16(io.nblocks - 1); | 1063 | c.rw.length = cpu_to_le16(io.nblocks); |
1056 | c.rw.control = cpu_to_le16(io.control); | 1064 | c.rw.control = cpu_to_le16(io.control); |
1057 | c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); | 1065 | c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); |
1058 | c.rw.reftag = cpu_to_le32(io.reftag); /* XXX: endian? */ | 1066 | c.rw.reftag = io.reftag; |
1059 | c.rw.apptag = cpu_to_le16(io.apptag); | 1067 | c.rw.apptag = io.apptag; |
1060 | c.rw.appmask = cpu_to_le16(io.appmask); | 1068 | c.rw.appmask = io.appmask; |
1061 | /* XXX: metadata */ | 1069 | /* XXX: metadata */ |
1062 | prps = nvme_setup_prps(dev, &c.common, sg, length); | 1070 | prps = nvme_setup_prps(dev, &c.common, sg, length); |
1063 | 1071 | ||
@@ -1069,11 +1077,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
1069 | * additional races since q_lock already protects against other CPUs. | 1077 | * additional races since q_lock already protects against other CPUs. |
1070 | */ | 1078 | */ |
1071 | put_nvmeq(nvmeq); | 1079 | put_nvmeq(nvmeq); |
1072 | status = nvme_submit_sync_cmd(nvmeq, &c, &result, IO_TIMEOUT); | 1080 | status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT); |
1073 | 1081 | ||
1074 | nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents); | 1082 | nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents); |
1075 | nvme_free_prps(dev, prps); | 1083 | nvme_free_prps(dev, prps); |
1076 | put_user(result, &uio->result); | ||
1077 | return status; | 1084 | return status; |
1078 | } | 1085 | } |
1079 | 1086 | ||