aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2011-01-20 13:24:06 -0500
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2011-11-04 15:52:51 -0400
commit821234603b265f59d7eebce16d9e8beca2a5752d (patch)
tree6f145d86784163647a0fd3a93af1ca0c0519335c
parent1b23484bd012c078de2ea939249e2fb2e85a0a6e (diff)
NVMe: Rename 'cycle' to 'phase'
It's called the phase bit in the current draft Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
-rw-r--r--drivers/block/nvme.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index b6a213c98584..3d917a87ea93 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -93,7 +93,7 @@ struct nvme_queue {
93 u16 sq_head; 93 u16 sq_head;
94 u16 sq_tail; 94 u16 sq_tail;
95 u16 cq_head; 95 u16 cq_head;
96 u16 cq_cycle; 96 u16 cq_phase;
97 unsigned long cmdid_data[]; 97 unsigned long cmdid_data[];
98}; 98};
99 99
@@ -364,7 +364,7 @@ typedef void (*completion_fn)(struct nvme_queue *, void *,
364 364
365static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) 365static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
366{ 366{
367 u16 head, cycle; 367 u16 head, phase;
368 368
369 static const completion_fn completions[4] = { 369 static const completion_fn completions[4] = {
370 [sync_completion_id] = sync_completion, 370 [sync_completion_id] = sync_completion,
@@ -372,19 +372,19 @@ static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
372 }; 372 };
373 373
374 head = nvmeq->cq_head; 374 head = nvmeq->cq_head;
375 cycle = nvmeq->cq_cycle; 375 phase = nvmeq->cq_phase;
376 376
377 for (;;) { 377 for (;;) {
378 unsigned long data; 378 unsigned long data;
379 void *ptr; 379 void *ptr;
380 unsigned char handler; 380 unsigned char handler;
381 struct nvme_completion cqe = nvmeq->cqes[head]; 381 struct nvme_completion cqe = nvmeq->cqes[head];
382 if ((le16_to_cpu(cqe.status) & 1) != cycle) 382 if ((le16_to_cpu(cqe.status) & 1) != phase)
383 break; 383 break;
384 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); 384 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
385 if (++head == nvmeq->q_depth) { 385 if (++head == nvmeq->q_depth) {
386 head = 0; 386 head = 0;
387 cycle = !cycle; 387 phase = !phase;
388 } 388 }
389 389
390 data = free_cmdid(nvmeq, cqe.command_id); 390 data = free_cmdid(nvmeq, cqe.command_id);
@@ -399,12 +399,12 @@ static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
399 * requires that 0.1% of your interrupts are handled, so this isn't 399 * requires that 0.1% of your interrupts are handled, so this isn't
400 * a big problem. 400 * a big problem.
401 */ 401 */
402 if (head == nvmeq->cq_head && cycle == nvmeq->cq_cycle) 402 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
403 return IRQ_NONE; 403 return IRQ_NONE;
404 404
405 writel(head, nvmeq->q_db + 1); 405 writel(head, nvmeq->q_db + 1);
406 nvmeq->cq_head = head; 406 nvmeq->cq_head = head;
407 nvmeq->cq_cycle = cycle; 407 nvmeq->cq_phase = phase;
408 408
409 return IRQ_HANDLED; 409 return IRQ_HANDLED;
410} 410}
@@ -557,7 +557,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
557 nvmeq->q_dmadev = dmadev; 557 nvmeq->q_dmadev = dmadev;
558 spin_lock_init(&nvmeq->q_lock); 558 spin_lock_init(&nvmeq->q_lock);
559 nvmeq->cq_head = 0; 559 nvmeq->cq_head = 0;
560 nvmeq->cq_cycle = 1; 560 nvmeq->cq_phase = 1;
561 init_waitqueue_head(&nvmeq->sq_full); 561 init_waitqueue_head(&nvmeq->sq_full);
562 bio_list_init(&nvmeq->sq_cong); 562 bio_list_init(&nvmeq->sq_cong);
563 nvmeq->q_db = &dev->dbs[qid * 2]; 563 nvmeq->q_db = &dev->dbs[qid * 2];