aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme.c
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2011-12-20 11:54:53 -0500
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2012-01-10 14:51:00 -0500
commit5c1281a3bf5655ec1b90db495da3a2b77826ba88 (patch)
tree6b8b76416b61c356cd9e6cccc825739e473cb69d /drivers/block/nvme.c
parent040a93b52a9eee8177ebaf2ba0ee0f9f518d1bf8 (diff)
NVMe: Change nvme_completion_fn to take a dev
The queue is only needed for some rare occasions, and it's more consistent to pass the device around. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r--drivers/block/nvme.c43
1 files changed, 25 insertions, 18 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index aa2fd66aabd6..b0e8a6dd33b1 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -135,7 +135,7 @@ static inline void _nvme_check_size(void)
135 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 135 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
136} 136}
137 137
138typedef void (*nvme_completion_fn)(struct nvme_queue *, void *, 138typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
139 struct nvme_completion *); 139 struct nvme_completion *);
140 140
141struct nvme_cmd_info { 141struct nvme_cmd_info {
@@ -199,7 +199,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
199#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 199#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
200#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 200#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
201 201
202static void special_completion(struct nvme_queue *nvmeq, void *ctx, 202static void special_completion(struct nvme_dev *dev, void *ctx,
203 struct nvme_completion *cqe) 203 struct nvme_completion *cqe)
204{ 204{
205 if (ctx == CMD_CTX_CANCELLED) 205 if (ctx == CMD_CTX_CANCELLED)
@@ -207,19 +207,19 @@ static void special_completion(struct nvme_queue *nvmeq, void *ctx,
207 if (ctx == CMD_CTX_FLUSH) 207 if (ctx == CMD_CTX_FLUSH)
208 return; 208 return;
209 if (ctx == CMD_CTX_COMPLETED) { 209 if (ctx == CMD_CTX_COMPLETED) {
210 dev_warn(nvmeq->q_dmadev, 210 dev_warn(&dev->pci_dev->dev,
211 "completed id %d twice on queue %d\n", 211 "completed id %d twice on queue %d\n",
212 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 212 cqe->command_id, le16_to_cpup(&cqe->sq_id));
213 return; 213 return;
214 } 214 }
215 if (ctx == CMD_CTX_INVALID) { 215 if (ctx == CMD_CTX_INVALID) {
216 dev_warn(nvmeq->q_dmadev, 216 dev_warn(&dev->pci_dev->dev,
217 "invalid id %d completed on queue %d\n", 217 "invalid id %d completed on queue %d\n",
218 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 218 cqe->command_id, le16_to_cpup(&cqe->sq_id));
219 return; 219 return;
220 } 220 }
221 221
222 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx); 222 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
223} 223}
224 224
225/* 225/*
@@ -332,29 +332,36 @@ static struct nvme_bio *alloc_nbio(unsigned nseg, gfp_t gfp)
332 sizeof(struct scatterlist) * nseg, gfp); 332 sizeof(struct scatterlist) * nseg, gfp);
333} 333}
334 334
335static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio) 335static void free_nbio(struct nvme_dev *dev, struct nvme_bio *nbio)
336{ 336{
337 nvme_free_prps(nvmeq->dev, nbio->prps); 337 nvme_free_prps(dev, nbio->prps);
338 kfree(nbio); 338 kfree(nbio);
339} 339}
340 340
341static void bio_completion(struct nvme_queue *nvmeq, void *ctx, 341static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
342{
343 struct nvme_queue *nvmeq = get_nvmeq(dev);
344 if (bio_list_empty(&nvmeq->sq_cong))
345 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
346 bio_list_add(&nvmeq->sq_cong, bio);
347 put_nvmeq(nvmeq);
348 wake_up_process(nvme_thread);
349}
350
351static void bio_completion(struct nvme_dev *dev, void *ctx,
342 struct nvme_completion *cqe) 352 struct nvme_completion *cqe)
343{ 353{
344 struct nvme_bio *nbio = ctx; 354 struct nvme_bio *nbio = ctx;
345 struct bio *bio = nbio->bio; 355 struct bio *bio = nbio->bio;
346 u16 status = le16_to_cpup(&cqe->status) >> 1; 356 u16 status = le16_to_cpup(&cqe->status) >> 1;
347 357
348 dma_unmap_sg(nvmeq->q_dmadev, nbio->sg, nbio->nents, 358 dma_unmap_sg(&dev->pci_dev->dev, nbio->sg, nbio->nents,
349 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 359 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
350 free_nbio(nvmeq, nbio); 360 free_nbio(dev, nbio);
351 if (status) { 361 if (status) {
352 bio_endio(bio, -EIO); 362 bio_endio(bio, -EIO);
353 } else if (bio->bi_vcnt > bio->bi_idx) { 363 } else if (bio->bi_vcnt > bio->bi_idx) {
354 if (bio_list_empty(&nvmeq->sq_cong)) 364 requeue_bio(dev, bio);
355 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
356 bio_list_add(&nvmeq->sq_cong, bio);
357 wake_up_process(nvme_thread);
358 } else { 365 } else {
359 bio_endio(bio, 0); 366 bio_endio(bio, 0);
360 } 367 }
@@ -594,7 +601,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
594 return 0; 601 return 0;
595 602
596 free_nbio: 603 free_nbio:
597 free_nbio(nvmeq, nbio); 604 free_nbio(nvmeq->dev, nbio);
598 nomem: 605 nomem:
599 return result; 606 return result;
600} 607}
@@ -644,7 +651,7 @@ static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
644 } 651 }
645 652
646 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); 653 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
647 fn(nvmeq, ctx, &cqe); 654 fn(nvmeq->dev, ctx, &cqe);
648 } 655 }
649 656
650 /* If the controller ignores the cq head doorbell and continuously 657 /* If the controller ignores the cq head doorbell and continuously
@@ -695,7 +702,7 @@ struct sync_cmd_info {
695 int status; 702 int status;
696}; 703};
697 704
698static void sync_completion(struct nvme_queue *nvmeq, void *ctx, 705static void sync_completion(struct nvme_dev *dev, void *ctx,
699 struct nvme_completion *cqe) 706 struct nvme_completion *cqe)
700{ 707{
701 struct sync_cmd_info *cmdinfo = ctx; 708 struct sync_cmd_info *cmdinfo = ctx;
@@ -1207,7 +1214,7 @@ static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1207 continue; 1214 continue;
1208 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid); 1215 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
1209 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 1216 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1210 fn(nvmeq, ctx, &cqe); 1217 fn(nvmeq->dev, ctx, &cqe);
1211 } 1218 }
1212} 1219}
1213 1220