diff options
author | Chong Yuan <chong.yuan@memblaze.com> | 2015-03-26 21:21:32 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-03-31 12:36:41 -0400 |
commit | fda631ffe5422424579e1649e04cc468d0215b85 (patch) | |
tree | 697fe528b9dc7b31c6a1350c18042c74cb59338f /drivers/block | |
parent | 6df3dbc83fb8043a5975d75970d296d6d14f7273 (diff) |
NVMe: embedded iod mask cleanup
Remove unused mask in nvme_alloc_iod
Signed-off-by: Chong Yuan <chong.yuan@memblaze.com>
Reviewed-by: Wenbo Wang <wenbo.wang@memblaze.com>
Acked-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/nvme-core.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index ee83554c28ba..7ed618125c27 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -152,6 +152,7 @@ struct nvme_cmd_info { | |||
152 | */ | 152 | */ |
153 | #define NVME_INT_PAGES 2 | 153 | #define NVME_INT_PAGES 2 |
154 | #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size) | 154 | #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size) |
155 | #define NVME_INT_MASK 0x01 | ||
155 | 156 | ||
156 | /* | 157 | /* |
157 | * Will slightly overestimate the number of pages needed. This is OK | 158 | * Will slightly overestimate the number of pages needed. This is OK |
@@ -257,7 +258,7 @@ static void *iod_get_private(struct nvme_iod *iod) | |||
257 | */ | 258 | */ |
258 | static bool iod_should_kfree(struct nvme_iod *iod) | 259 | static bool iod_should_kfree(struct nvme_iod *iod) |
259 | { | 260 | { |
260 | return (iod->private & 0x01) == 0; | 261 | return (iod->private & NVME_INT_MASK) == 0; |
261 | } | 262 | } |
262 | 263 | ||
263 | /* Special values must be less than 0x1000 */ | 264 | /* Special values must be less than 0x1000 */ |
@@ -432,7 +433,6 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev, | |||
432 | { | 433 | { |
433 | unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) : | 434 | unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) : |
434 | sizeof(struct nvme_dsm_range); | 435 | sizeof(struct nvme_dsm_range); |
435 | unsigned long mask = 0; | ||
436 | struct nvme_iod *iod; | 436 | struct nvme_iod *iod; |
437 | 437 | ||
438 | if (rq->nr_phys_segments <= NVME_INT_PAGES && | 438 | if (rq->nr_phys_segments <= NVME_INT_PAGES && |
@@ -440,9 +440,8 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev, | |||
440 | struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq); | 440 | struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq); |
441 | 441 | ||
442 | iod = cmd->iod; | 442 | iod = cmd->iod; |
443 | mask = 0x01; | ||
444 | iod_init(iod, size, rq->nr_phys_segments, | 443 | iod_init(iod, size, rq->nr_phys_segments, |
445 | (unsigned long) rq | 0x01); | 444 | (unsigned long) rq | NVME_INT_MASK); |
446 | return iod; | 445 | return iod; |
447 | } | 446 | } |
448 | 447 | ||