diff options
author | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-02-22 14:18:30 -0500 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-11-04 15:52:59 -0400 |
commit | 00df5cb4eb927078850086f8becc3286a69ea12e (patch) | |
tree | 891011b1387ead32520ce86e75032c11509b80ec | |
parent | c42705592be2a539f3027b6f3907de8e8f9591a8 (diff) |
NVMe: Implement Flush
Linux implements Flush as a bit in the bio. That means there may also be
data associated with the flush; if so the flush should be sent before the
data. To avoid completing the bio twice, I add CMD_CTX_FLUSH to indicate
the completion routine should do nothing.
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
-rw-r--r-- | drivers/block/nvme.c | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index ce919b49b30d..d99b400ccd79 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -191,10 +191,12 @@ enum { | |||
191 | bio_completion_id, | 191 | bio_completion_id, |
192 | }; | 192 | }; |
193 | 193 | ||
194 | /* Special values must be a multiple of 4, and less than 0x1000 */ | ||
194 | #define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id) | 195 | #define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id) |
195 | #define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) | 196 | #define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) |
196 | #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) | 197 | #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) |
197 | #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) | 198 | #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) |
199 | #define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) | ||
198 | 200 | ||
199 | static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid) | 201 | static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid) |
200 | { | 202 | { |
@@ -416,6 +418,33 @@ static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio, | |||
416 | return dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir); | 418 | return dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir); |
417 | } | 419 | } |
418 | 420 | ||
421 | static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, | ||
422 | int cmdid) | ||
423 | { | ||
424 | struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; | ||
425 | |||
426 | memset(cmnd, 0, sizeof(*cmnd)); | ||
427 | cmnd->common.opcode = nvme_cmd_flush; | ||
428 | cmnd->common.command_id = cmdid; | ||
429 | cmnd->common.nsid = cpu_to_le32(ns->ns_id); | ||
430 | |||
431 | if (++nvmeq->sq_tail == nvmeq->q_depth) | ||
432 | nvmeq->sq_tail = 0; | ||
433 | writel(nvmeq->sq_tail, nvmeq->q_db); | ||
434 | |||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) | ||
439 | { | ||
440 | int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, | ||
441 | sync_completion_id, IO_TIMEOUT); | ||
442 | if (unlikely(cmdid < 0)) | ||
443 | return cmdid; | ||
444 | |||
445 | return nvme_submit_flush(nvmeq, ns, cmdid); | ||
446 | } | ||
447 | |||
419 | static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | 448 | static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, |
420 | struct bio *bio) | 449 | struct bio *bio) |
421 | { | 450 | { |
@@ -427,6 +456,12 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
427 | u32 dsmgmt; | 456 | u32 dsmgmt; |
428 | int psegs = bio_phys_segments(ns->queue, bio); | 457 | int psegs = bio_phys_segments(ns->queue, bio); |
429 | 458 | ||
459 | if ((bio->bi_rw & REQ_FLUSH) && psegs) { | ||
460 | result = nvme_submit_flush_data(nvmeq, ns); | ||
461 | if (result) | ||
462 | return result; | ||
463 | } | ||
464 | |||
430 | nbio = alloc_nbio(psegs, GFP_ATOMIC); | 465 | nbio = alloc_nbio(psegs, GFP_ATOMIC); |
431 | if (!nbio) | 466 | if (!nbio) |
432 | goto nomem; | 467 | goto nomem; |
@@ -437,6 +472,9 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
437 | if (unlikely(cmdid < 0)) | 472 | if (unlikely(cmdid < 0)) |
438 | goto free_nbio; | 473 | goto free_nbio; |
439 | 474 | ||
475 | if ((bio->bi_rw & REQ_FLUSH) && !psegs) | ||
476 | return nvme_submit_flush(nvmeq, ns, cmdid); | ||
477 | |||
440 | control = 0; | 478 | control = 0; |
441 | if (bio->bi_rw & REQ_FUA) | 479 | if (bio->bi_rw & REQ_FUA) |
442 | control |= NVME_RW_FUA; | 480 | control |= NVME_RW_FUA; |
@@ -520,6 +558,8 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx, | |||
520 | struct sync_cmd_info *cmdinfo = ctx; | 558 | struct sync_cmd_info *cmdinfo = ctx; |
521 | if (unlikely((unsigned long)cmdinfo == CMD_CTX_CANCELLED)) | 559 | if (unlikely((unsigned long)cmdinfo == CMD_CTX_CANCELLED)) |
522 | return; | 560 | return; |
561 | if ((unsigned long)cmdinfo == CMD_CTX_FLUSH) | ||
562 | return; | ||
523 | if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) { | 563 | if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) { |
524 | dev_warn(nvmeq->q_dmadev, | 564 | dev_warn(nvmeq->q_dmadev, |
525 | "completed id %d twice on queue %d\n", | 565 | "completed id %d twice on queue %d\n", |