summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/hpsa.c
diff options
context:
space:
mode:
authorMatt Gates <matthew.gates@hp.com>2012-05-01 12:43:11 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-05-10 04:17:26 -0400
commite16a33adc0e59aa96a483fd2923d77e674f013c1 (patch)
tree832c2b44baa4dd48553d156b0340dfcc5bbee624 /drivers/scsi/hpsa.c
parent254f796b9f22b1944c64caabc356a56caaa2facd (diff)
[SCSI] hpsa: refine interrupt handler locking for greater concurrency
Use spinlocks with finer granularity in the submission and completion paths to allow concurrent execution for multiple reply queues. In particular, do not hold a spin lock while submitting a request to the device, nor during most of the interrupt handler. Signed-off-by: Matt Gates <matthew.gates@hp.com> Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r--drivers/scsi/hpsa.c61
1 files changed, 40 insertions, 21 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index e4b27c449ec1..1834373d902f 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -533,6 +533,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
533{ 533{
534 u32 a; 534 u32 a;
535 struct reply_pool *rq = &h->reply_queue[q]; 535 struct reply_pool *rq = &h->reply_queue[q];
536 unsigned long flags;
536 537
537 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 538 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
538 return h->access.command_completed(h, q); 539 return h->access.command_completed(h, q);
@@ -540,7 +541,9 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
540 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 541 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
541 a = rq->head[rq->current_entry]; 542 a = rq->head[rq->current_entry];
542 rq->current_entry++; 543 rq->current_entry++;
544 spin_lock_irqsave(&h->lock, flags);
543 h->commands_outstanding--; 545 h->commands_outstanding--;
546 spin_unlock_irqrestore(&h->lock, flags);
544 } else { 547 } else {
545 a = FIFO_EMPTY; 548 a = FIFO_EMPTY;
546 } 549 }
@@ -575,8 +578,8 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
575 spin_lock_irqsave(&h->lock, flags); 578 spin_lock_irqsave(&h->lock, flags);
576 addQ(&h->reqQ, c); 579 addQ(&h->reqQ, c);
577 h->Qdepth++; 580 h->Qdepth++;
578 start_io(h);
579 spin_unlock_irqrestore(&h->lock, flags); 581 spin_unlock_irqrestore(&h->lock, flags);
582 start_io(h);
580} 583}
581 584
582static inline void removeQ(struct CommandList *c) 585static inline void removeQ(struct CommandList *c)
@@ -2091,9 +2094,8 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2091 done(cmd); 2094 done(cmd);
2092 return 0; 2095 return 0;
2093 } 2096 }
2094 /* Need a lock as this is being allocated from the pool */
2095 c = cmd_alloc(h);
2096 spin_unlock_irqrestore(&h->lock, flags); 2097 spin_unlock_irqrestore(&h->lock, flags);
2098 c = cmd_alloc(h);
2097 if (c == NULL) { /* trouble... */ 2099 if (c == NULL) { /* trouble... */
2098 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2100 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2099 return SCSI_MLQUEUE_HOST_BUSY; 2101 return SCSI_MLQUEUE_HOST_BUSY;
@@ -2627,14 +2629,21 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
2627 int i; 2629 int i;
2628 union u64bit temp64; 2630 union u64bit temp64;
2629 dma_addr_t cmd_dma_handle, err_dma_handle; 2631 dma_addr_t cmd_dma_handle, err_dma_handle;
2632 unsigned long flags;
2630 2633
2634 spin_lock_irqsave(&h->lock, flags);
2631 do { 2635 do {
2632 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 2636 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2633 if (i == h->nr_cmds) 2637 if (i == h->nr_cmds) {
2638 spin_unlock_irqrestore(&h->lock, flags);
2634 return NULL; 2639 return NULL;
2640 }
2635 } while (test_and_set_bit 2641 } while (test_and_set_bit
2636 (i & (BITS_PER_LONG - 1), 2642 (i & (BITS_PER_LONG - 1),
2637 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 2643 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2644 h->nr_allocs++;
2645 spin_unlock_irqrestore(&h->lock, flags);
2646
2638 c = h->cmd_pool + i; 2647 c = h->cmd_pool + i;
2639 memset(c, 0, sizeof(*c)); 2648 memset(c, 0, sizeof(*c));
2640 cmd_dma_handle = h->cmd_pool_dhandle 2649 cmd_dma_handle = h->cmd_pool_dhandle
@@ -2643,7 +2652,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
2643 memset(c->err_info, 0, sizeof(*c->err_info)); 2652 memset(c->err_info, 0, sizeof(*c->err_info));
2644 err_dma_handle = h->errinfo_pool_dhandle 2653 err_dma_handle = h->errinfo_pool_dhandle
2645 + i * sizeof(*c->err_info); 2654 + i * sizeof(*c->err_info);
2646 h->nr_allocs++;
2647 2655
2648 c->cmdindex = i; 2656 c->cmdindex = i;
2649 2657
@@ -2699,11 +2707,14 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2699static void cmd_free(struct ctlr_info *h, struct CommandList *c) 2707static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2700{ 2708{
2701 int i; 2709 int i;
2710 unsigned long flags;
2702 2711
2703 i = c - h->cmd_pool; 2712 i = c - h->cmd_pool;
2713 spin_lock_irqsave(&h->lock, flags);
2704 clear_bit(i & (BITS_PER_LONG - 1), 2714 clear_bit(i & (BITS_PER_LONG - 1),
2705 h->cmd_pool_bits + (i / BITS_PER_LONG)); 2715 h->cmd_pool_bits + (i / BITS_PER_LONG));
2706 h->nr_frees++; 2716 h->nr_frees++;
2717 spin_unlock_irqrestore(&h->lock, flags);
2707} 2718}
2708 2719
2709static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 2720static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
@@ -3307,7 +3318,9 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
3307static void start_io(struct ctlr_info *h) 3318static void start_io(struct ctlr_info *h)
3308{ 3319{
3309 struct CommandList *c; 3320 struct CommandList *c;
3321 unsigned long flags;
3310 3322
3323 spin_lock_irqsave(&h->lock, flags);
3311 while (!list_empty(&h->reqQ)) { 3324 while (!list_empty(&h->reqQ)) {
3312 c = list_entry(h->reqQ.next, struct CommandList, list); 3325 c = list_entry(h->reqQ.next, struct CommandList, list);
3313 /* can't do anything if fifo is full */ 3326 /* can't do anything if fifo is full */
@@ -3320,12 +3333,23 @@ static void start_io(struct ctlr_info *h)
3320 removeQ(c); 3333 removeQ(c);
3321 h->Qdepth--; 3334 h->Qdepth--;
3322 3335
3323 /* Tell the controller execute command */
3324 h->access.submit_command(h, c);
3325
3326 /* Put job onto the completed Q */ 3336 /* Put job onto the completed Q */
3327 addQ(&h->cmpQ, c); 3337 addQ(&h->cmpQ, c);
3338
3339 /* Must increment commands_outstanding before unlocking
3340 * and submitting to avoid race checking for fifo full
3341 * condition.
3342 */
3343 h->commands_outstanding++;
3344 if (h->commands_outstanding > h->max_outstanding)
3345 h->max_outstanding = h->commands_outstanding;
3346
3347 /* Tell the controller execute command */
3348 spin_unlock_irqrestore(&h->lock, flags);
3349 h->access.submit_command(h, c);
3350 spin_lock_irqsave(&h->lock, flags);
3328 } 3351 }
3352 spin_unlock_irqrestore(&h->lock, flags);
3329} 3353}
3330 3354
3331static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) 3355static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
@@ -3356,7 +3380,11 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
3356 3380
3357static inline void finish_cmd(struct CommandList *c) 3381static inline void finish_cmd(struct CommandList *c)
3358{ 3382{
3383 unsigned long flags;
3384
3385 spin_lock_irqsave(&c->h->lock, flags);
3359 removeQ(c); 3386 removeQ(c);
3387 spin_unlock_irqrestore(&c->h->lock, flags);
3360 if (likely(c->cmd_type == CMD_SCSI)) 3388 if (likely(c->cmd_type == CMD_SCSI))
3361 complete_scsi_command(c); 3389 complete_scsi_command(c);
3362 else if (c->cmd_type == CMD_IOCTL_PEND) 3390 else if (c->cmd_type == CMD_IOCTL_PEND)
@@ -3403,14 +3431,18 @@ static inline void process_nonindexed_cmd(struct ctlr_info *h,
3403{ 3431{
3404 u32 tag; 3432 u32 tag;
3405 struct CommandList *c = NULL; 3433 struct CommandList *c = NULL;
3434 unsigned long flags;
3406 3435
3407 tag = hpsa_tag_discard_error_bits(h, raw_tag); 3436 tag = hpsa_tag_discard_error_bits(h, raw_tag);
3437 spin_lock_irqsave(&h->lock, flags);
3408 list_for_each_entry(c, &h->cmpQ, list) { 3438 list_for_each_entry(c, &h->cmpQ, list) {
3409 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 3439 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
3440 spin_unlock_irqrestore(&h->lock, flags);
3410 finish_cmd(c); 3441 finish_cmd(c);
3411 return; 3442 return;
3412 } 3443 }
3413 } 3444 }
3445 spin_unlock_irqrestore(&h->lock, flags);
3414 bad_tag(h, h->nr_cmds + 1, raw_tag); 3446 bad_tag(h, h->nr_cmds + 1, raw_tag);
3415} 3447}
3416 3448
@@ -3447,7 +3479,6 @@ static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
3447{ 3479{
3448 struct ctlr_info *h = queue_to_hba(queue); 3480 struct ctlr_info *h = queue_to_hba(queue);
3449 u8 q = *(u8 *) queue; 3481 u8 q = *(u8 *) queue;
3450 unsigned long flags;
3451 u32 raw_tag; 3482 u32 raw_tag;
3452 3483
3453 if (ignore_bogus_interrupt(h)) 3484 if (ignore_bogus_interrupt(h))
@@ -3455,47 +3486,39 @@ static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
3455 3486
3456 if (interrupt_not_for_us(h)) 3487 if (interrupt_not_for_us(h))
3457 return IRQ_NONE; 3488 return IRQ_NONE;
3458 spin_lock_irqsave(&h->lock, flags);
3459 h->last_intr_timestamp = get_jiffies_64(); 3489 h->last_intr_timestamp = get_jiffies_64();
3460 while (interrupt_pending(h)) { 3490 while (interrupt_pending(h)) {
3461 raw_tag = get_next_completion(h, q); 3491 raw_tag = get_next_completion(h, q);
3462 while (raw_tag != FIFO_EMPTY) 3492 while (raw_tag != FIFO_EMPTY)
3463 raw_tag = next_command(h, q); 3493 raw_tag = next_command(h, q);
3464 } 3494 }
3465 spin_unlock_irqrestore(&h->lock, flags);
3466 return IRQ_HANDLED; 3495 return IRQ_HANDLED;
3467} 3496}
3468 3497
3469static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) 3498static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
3470{ 3499{
3471 struct ctlr_info *h = queue_to_hba(queue); 3500 struct ctlr_info *h = queue_to_hba(queue);
3472 unsigned long flags;
3473 u32 raw_tag; 3501 u32 raw_tag;
3474 u8 q = *(u8 *) queue; 3502 u8 q = *(u8 *) queue;
3475 3503
3476 if (ignore_bogus_interrupt(h)) 3504 if (ignore_bogus_interrupt(h))
3477 return IRQ_NONE; 3505 return IRQ_NONE;
3478 3506
3479 spin_lock_irqsave(&h->lock, flags);
3480
3481 h->last_intr_timestamp = get_jiffies_64(); 3507 h->last_intr_timestamp = get_jiffies_64();
3482 raw_tag = get_next_completion(h, q); 3508 raw_tag = get_next_completion(h, q);
3483 while (raw_tag != FIFO_EMPTY) 3509 while (raw_tag != FIFO_EMPTY)
3484 raw_tag = next_command(h, q); 3510 raw_tag = next_command(h, q);
3485 spin_unlock_irqrestore(&h->lock, flags);
3486 return IRQ_HANDLED; 3511 return IRQ_HANDLED;
3487} 3512}
3488 3513
3489static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) 3514static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
3490{ 3515{
3491 struct ctlr_info *h = queue_to_hba((u8 *) queue); 3516 struct ctlr_info *h = queue_to_hba((u8 *) queue);
3492 unsigned long flags;
3493 u32 raw_tag; 3517 u32 raw_tag;
3494 u8 q = *(u8 *) queue; 3518 u8 q = *(u8 *) queue;
3495 3519
3496 if (interrupt_not_for_us(h)) 3520 if (interrupt_not_for_us(h))
3497 return IRQ_NONE; 3521 return IRQ_NONE;
3498 spin_lock_irqsave(&h->lock, flags);
3499 h->last_intr_timestamp = get_jiffies_64(); 3522 h->last_intr_timestamp = get_jiffies_64();
3500 while (interrupt_pending(h)) { 3523 while (interrupt_pending(h)) {
3501 raw_tag = get_next_completion(h, q); 3524 raw_tag = get_next_completion(h, q);
@@ -3507,18 +3530,15 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
3507 raw_tag = next_command(h, q); 3530 raw_tag = next_command(h, q);
3508 } 3531 }
3509 } 3532 }
3510 spin_unlock_irqrestore(&h->lock, flags);
3511 return IRQ_HANDLED; 3533 return IRQ_HANDLED;
3512} 3534}
3513 3535
3514static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) 3536static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
3515{ 3537{
3516 struct ctlr_info *h = queue_to_hba(queue); 3538 struct ctlr_info *h = queue_to_hba(queue);
3517 unsigned long flags;
3518 u32 raw_tag; 3539 u32 raw_tag;
3519 u8 q = *(u8 *) queue; 3540 u8 q = *(u8 *) queue;
3520 3541
3521 spin_lock_irqsave(&h->lock, flags);
3522 h->last_intr_timestamp = get_jiffies_64(); 3542 h->last_intr_timestamp = get_jiffies_64();
3523 raw_tag = get_next_completion(h, q); 3543 raw_tag = get_next_completion(h, q);
3524 while (raw_tag != FIFO_EMPTY) { 3544 while (raw_tag != FIFO_EMPTY) {
@@ -3528,7 +3548,6 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
3528 process_nonindexed_cmd(h, raw_tag); 3548 process_nonindexed_cmd(h, raw_tag);
3529 raw_tag = next_command(h, q); 3549 raw_tag = next_command(h, q);
3530 } 3550 }
3531 spin_unlock_irqrestore(&h->lock, flags);
3532 return IRQ_HANDLED; 3551 return IRQ_HANDLED;
3533} 3552}
3534 3553