aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla4xxx/ql4_isr.c
diff options
context:
space:
mode:
authorVikas Chaudhary <vikas.chaudhary@qlogic.com>2010-07-28 06:23:44 -0400
committerJames Bottomley <James.Bottomley@suse.de>2010-07-28 10:04:23 -0400
commitf4f5df23bf72208d0c2f1d8be629839924c2f4c2 (patch)
tree88c41a002e0f9f4470543209047d1111a51a0d06 /drivers/scsi/qla4xxx/ql4_isr.c
parentdbaf82ece08bf93ae5200f03efd87c4f1fc453f1 (diff)
[SCSI] qla4xxx: Added support for ISP82XX
Signed-off-by: Vikas Chaudhary <Vikas Chaudhary@qlogic.com> Signed-off-by: Karen Higgins <karen.higgins@qlogic.com> Signed-off-by: Ravi Anand <ravi.anand@qlogic.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/qla4xxx/ql4_isr.c')
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c388
1 files changed, 325 insertions, 63 deletions
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 596c3031483c..68d7942bf2e3 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -118,7 +118,6 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
118 118
119 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); 119 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
120 if (!srb) { 120 if (!srb) {
121 /* FIXMEdg: Don't we need to reset ISP in this case??? */
122 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid " 121 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
123 "handle 0x%x, sp=%p. This cmd may have already " 122 "handle 0x%x, sp=%p. This cmd may have already "
124 "been completed.\n", ha->host_no, __func__, 123 "been completed.\n", ha->host_no, __func__,
@@ -293,6 +292,10 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
293 292
294 case SCS_DEVICE_LOGGED_OUT: 293 case SCS_DEVICE_LOGGED_OUT:
295 case SCS_DEVICE_UNAVAILABLE: 294 case SCS_DEVICE_UNAVAILABLE:
295 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE "
296 "state: 0x%x\n", ha->host_no,
297 cmd->device->channel, cmd->device->id,
298 cmd->device->lun, sts_entry->completionStatus));
296 /* 299 /*
297 * Mark device missing so that we won't continue to 300 * Mark device missing so that we won't continue to
298 * send I/O to this device. We should get a ddb 301 * send I/O to this device. We should get a ddb
@@ -339,16 +342,14 @@ status_entry_exit:
339 * This routine process response queue completions in interrupt context. 342 * This routine process response queue completions in interrupt context.
340 * Hardware_lock locked upon entry 343 * Hardware_lock locked upon entry
341 **/ 344 **/
342static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) 345void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
343{ 346{
344 uint32_t count = 0; 347 uint32_t count = 0;
345 struct srb *srb = NULL; 348 struct srb *srb = NULL;
346 struct status_entry *sts_entry; 349 struct status_entry *sts_entry;
347 350
348 /* Process all responses from response queue */ 351 /* Process all responses from response queue */
349 while ((ha->response_in = 352 while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
350 (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
351 ha->response_out) {
352 sts_entry = (struct status_entry *) ha->response_ptr; 353 sts_entry = (struct status_entry *) ha->response_ptr;
353 count++; 354 count++;
354 355
@@ -413,14 +414,14 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
413 sts_entry->hdr.entryType)); 414 sts_entry->hdr.entryType));
414 goto exit_prq_error; 415 goto exit_prq_error;
415 } 416 }
417 ((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
418 wmb();
416 } 419 }
417 420
418 /* 421 /*
419 * Done with responses, update the ISP For QLA4010, this also clears 422 * Tell ISP we're done with response(s). This also clears the interrupt.
420 * the interrupt.
421 */ 423 */
422 writel(ha->response_out, &ha->reg->rsp_q_out); 424 ha->isp_ops->complete_iocb(ha);
423 readl(&ha->reg->rsp_q_out);
424 425
425 return; 426 return;
426 427
@@ -430,9 +431,7 @@ exit_prq_invalid_handle:
430 sts_entry->completionStatus)); 431 sts_entry->completionStatus));
431 432
432exit_prq_error: 433exit_prq_error:
433 writel(ha->response_out, &ha->reg->rsp_q_out); 434 ha->isp_ops->complete_iocb(ha);
434 readl(&ha->reg->rsp_q_out);
435
436 set_bit(DPC_RESET_HA, &ha->dpc_flags); 435 set_bit(DPC_RESET_HA, &ha->dpc_flags);
437} 436}
438 437
@@ -448,7 +447,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
448 uint32_t mbox_status) 447 uint32_t mbox_status)
449{ 448{
450 int i; 449 int i;
451 uint32_t mbox_stat2, mbox_stat3; 450 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
452 451
453 if ((mbox_status == MBOX_STS_BUSY) || 452 if ((mbox_status == MBOX_STS_BUSY) ||
454 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || 453 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
@@ -460,27 +459,37 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
460 * Copy all mailbox registers to a temporary 459 * Copy all mailbox registers to a temporary
461 * location and set mailbox command done flag 460 * location and set mailbox command done flag
462 */ 461 */
463 for (i = 1; i < ha->mbox_status_count; i++) 462 for (i = 0; i < ha->mbox_status_count; i++)
464 ha->mbox_status[i] = 463 ha->mbox_status[i] = is_qla8022(ha)
465 readl(&ha->reg->mailbox[i]); 464 ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
465 : readl(&ha->reg->mailbox[i]);
466 466
467 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); 467 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
468
469 if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
470 complete(&ha->mbx_intr_comp);
468 } 471 }
469 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { 472 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
473 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
474 mbox_sts[i] = is_qla8022(ha)
475 ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
476 : readl(&ha->reg->mailbox[i]);
477
470 /* Immediately process the AENs that don't require much work. 478 /* Immediately process the AENs that don't require much work.
471 * Only queue the database_changed AENs */ 479 * Only queue the database_changed AENs */
472 if (ha->aen_log.count < MAX_AEN_ENTRIES) { 480 if (ha->aen_log.count < MAX_AEN_ENTRIES) {
473 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 481 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
474 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] = 482 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
475 readl(&ha->reg->mailbox[i]); 483 mbox_sts[i];
476 ha->aen_log.count++; 484 ha->aen_log.count++;
477 } 485 }
478 switch (mbox_status) { 486 switch (mbox_status) {
479 case MBOX_ASTS_SYSTEM_ERROR: 487 case MBOX_ASTS_SYSTEM_ERROR:
480 /* Log Mailbox registers */ 488 /* Log Mailbox registers */
489 ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
481 if (ql4xdontresethba) { 490 if (ql4xdontresethba) {
482 DEBUG2(printk("%s:Dont Reset HBA\n", 491 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
483 __func__)); 492 ha->host_no, __func__));
484 } else { 493 } else {
485 set_bit(AF_GET_CRASH_RECORD, &ha->flags); 494 set_bit(AF_GET_CRASH_RECORD, &ha->flags);
486 set_bit(DPC_RESET_HA, &ha->dpc_flags); 495 set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -502,18 +511,15 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
502 if (test_bit(AF_INIT_DONE, &ha->flags)) 511 if (test_bit(AF_INIT_DONE, &ha->flags))
503 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); 512 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
504 513
505 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter" 514 ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
506 " LINK UP\n", ha->host_no,
507 mbox_status));
508 break; 515 break;
509 516
510 case MBOX_ASTS_LINK_DOWN: 517 case MBOX_ASTS_LINK_DOWN:
511 clear_bit(AF_LINK_UP, &ha->flags); 518 clear_bit(AF_LINK_UP, &ha->flags);
512 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); 519 if (test_bit(AF_INIT_DONE, &ha->flags))
520 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
513 521
514 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter" 522 ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
515 " LINK DOWN\n", ha->host_no,
516 mbox_status));
517 break; 523 break;
518 524
519 case MBOX_ASTS_HEARTBEAT: 525 case MBOX_ASTS_HEARTBEAT:
@@ -539,12 +545,17 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
539 break; 545 break;
540 546
541 case MBOX_ASTS_IP_ADDR_STATE_CHANGED: 547 case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
542 mbox_stat2 = readl(&ha->reg->mailbox[2]); 548 printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
543 mbox_stat3 = readl(&ha->reg->mailbox[3]); 549 "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
544 550 mbox_sts[2], mbox_sts[3]);
545 if ((mbox_stat3 == 5) && (mbox_stat2 == 3)) 551
552 /* mbox_sts[2] = Old ACB state
553 * mbox_sts[3] = new ACB state */
554 if ((mbox_sts[3] == ACB_STATE_VALID) &&
555 (mbox_sts[2] == ACB_STATE_TENTATIVE))
546 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 556 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
547 else if ((mbox_stat3 == 2) && (mbox_stat2 == 5)) 557 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
558 (mbox_sts[2] == ACB_STATE_VALID))
548 set_bit(DPC_RESET_HA, &ha->dpc_flags); 559 set_bit(DPC_RESET_HA, &ha->dpc_flags);
549 break; 560 break;
550 561
@@ -553,9 +564,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
553 /* No action */ 564 /* No action */
554 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, " 565 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
555 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n", 566 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
556 ha->host_no, mbox_status, 567 ha->host_no, mbox_sts[0],
557 readl(&ha->reg->mailbox[1]), 568 mbox_sts[1], mbox_sts[2]));
558 readl(&ha->reg->mailbox[2])));
559 break; 569 break;
560 570
561 case MBOX_ASTS_SELF_TEST_FAILED: 571 case MBOX_ASTS_SELF_TEST_FAILED:
@@ -563,10 +573,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
563 /* No action */ 573 /* No action */
564 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, " 574 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
565 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n", 575 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
566 ha->host_no, mbox_status, 576 ha->host_no, mbox_sts[0], mbox_sts[1],
567 readl(&ha->reg->mailbox[1]), 577 mbox_sts[2], mbox_sts[3]));
568 readl(&ha->reg->mailbox[2]),
569 readl(&ha->reg->mailbox[3])));
570 break; 578 break;
571 579
572 case MBOX_ASTS_DATABASE_CHANGED: 580 case MBOX_ASTS_DATABASE_CHANGED:
@@ -577,21 +585,17 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
577 /* decrement available counter */ 585 /* decrement available counter */
578 ha->aen_q_count--; 586 ha->aen_q_count--;
579 587
580 for (i = 1; i < MBOX_AEN_REG_COUNT; i++) 588 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
581 ha->aen_q[ha->aen_in].mbox_sts[i] = 589 ha->aen_q[ha->aen_in].mbox_sts[i] =
582 readl(&ha->reg->mailbox[i]); 590 mbox_sts[i];
583
584 ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
585 591
586 /* print debug message */ 592 /* print debug message */
587 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued" 593 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
588 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n", 594 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
589 ha->host_no, ha->aen_in, 595 ha->host_no, ha->aen_in, mbox_sts[0],
590 mbox_status, 596 mbox_sts[1], mbox_sts[2], mbox_sts[3],
591 ha->aen_q[ha->aen_in].mbox_sts[1], 597 mbox_sts[4]));
592 ha->aen_q[ha->aen_in].mbox_sts[2], 598
593 ha->aen_q[ha->aen_in].mbox_sts[3],
594 ha->aen_q[ha->aen_in]. mbox_sts[4]));
595 /* advance pointer */ 599 /* advance pointer */
596 ha->aen_in++; 600 ha->aen_in++;
597 if (ha->aen_in == MAX_AEN_ENTRIES) 601 if (ha->aen_in == MAX_AEN_ENTRIES)
@@ -603,18 +607,16 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
603 DEBUG2(printk("scsi%ld: %s: aen %04x, queue " 607 DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
604 "overflowed! AEN LOST!!\n", 608 "overflowed! AEN LOST!!\n",
605 ha->host_no, __func__, 609 ha->host_no, __func__,
606 mbox_status)); 610 mbox_sts[0]));
607 611
608 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n", 612 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
609 ha->host_no)); 613 ha->host_no));
610 614
611 for (i = 0; i < MAX_AEN_ENTRIES; i++) { 615 for (i = 0; i < MAX_AEN_ENTRIES; i++) {
612 DEBUG2(printk("AEN[%d] %04x %04x %04x " 616 DEBUG2(printk("AEN[%d] %04x %04x %04x "
613 "%04x\n", i, 617 "%04x\n", i, mbox_sts[0],
614 ha->aen_q[i].mbox_sts[0], 618 mbox_sts[1], mbox_sts[2],
615 ha->aen_q[i].mbox_sts[1], 619 mbox_sts[3]));
616 ha->aen_q[i].mbox_sts[2],
617 ha->aen_q[i].mbox_sts[3]));
618 } 620 }
619 } 621 }
620 break; 622 break;
@@ -622,7 +624,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
622 default: 624 default:
623 DEBUG2(printk(KERN_WARNING 625 DEBUG2(printk(KERN_WARNING
624 "scsi%ld: AEN %04x UNKNOWN\n", 626 "scsi%ld: AEN %04x UNKNOWN\n",
625 ha->host_no, mbox_status)); 627 ha->host_no, mbox_sts[0]));
626 break; 628 break;
627 } 629 }
628 } else { 630 } else {
@@ -634,6 +636,30 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
634} 636}
635 637
636/** 638/**
639 * qla4_8xxx_interrupt_service_routine - isr
640 * @ha: pointer to host adapter structure.
641 *
642 * This is the main interrupt service routine.
643 * hardware_lock locked upon entry. runs in interrupt context.
644 **/
645void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
646 uint32_t intr_status)
647{
648 /* Process response queue interrupt. */
649 if (intr_status & HSRX_RISC_IOCB_INT)
650 qla4xxx_process_response_queue(ha);
651
652 /* Process mailbox/asynch event interrupt.*/
653 if (intr_status & HSRX_RISC_MB_INT)
654 qla4xxx_isr_decode_mailbox(ha,
655 readl(&ha->qla4_8xxx_reg->mailbox_out[0]));
656
657 /* clear the interrupt */
658 writel(0, &ha->qla4_8xxx_reg->host_int);
659 readl(&ha->qla4_8xxx_reg->host_int);
660}
661
662/**
637 * qla4xxx_interrupt_service_routine - isr 663 * qla4xxx_interrupt_service_routine - isr
638 * @ha: pointer to host adapter structure. 664 * @ha: pointer to host adapter structure.
639 * 665 *
@@ -660,6 +686,28 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
660} 686}
661 687
662/** 688/**
689 * qla4_8xxx_spurious_interrupt - processes spurious interrupt
690 * @ha: pointer to host adapter structure.
691 * @reqs_count: .
692 *
693 **/
694static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
695 uint8_t reqs_count)
696{
697 if (reqs_count)
698 return;
699
700 DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
701 if (is_qla8022(ha)) {
702 writel(0, &ha->qla4_8xxx_reg->host_int);
703 if (test_bit(AF_INTx_ENABLED, &ha->flags))
704 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
705 0xfbff);
706 }
707 ha->spurious_int_count++;
708}
709
710/**
663 * qla4xxx_intr_handler - hardware interrupt handler. 711 * qla4xxx_intr_handler - hardware interrupt handler.
664 * @irq: Unused 712 * @irq: Unused
665 * @dev_id: Pointer to host adapter structure 713 * @dev_id: Pointer to host adapter structure
@@ -689,15 +737,14 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
689 /* 737 /*
690 * Read interrupt status 738 * Read interrupt status
691 */ 739 */
692 if (le32_to_cpu(ha->shadow_regs->rsp_q_in) != 740 if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
693 ha->response_out) 741 ha->response_out)
694 intr_status = CSR_SCSI_COMPLETION_INTR; 742 intr_status = CSR_SCSI_COMPLETION_INTR;
695 else 743 else
696 intr_status = readl(&ha->reg->ctrl_status); 744 intr_status = readl(&ha->reg->ctrl_status);
697 745
698 if ((intr_status & 746 if ((intr_status &
699 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 747 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
700 0) {
701 if (reqs_count == 0) 748 if (reqs_count == 0)
702 ha->spurious_int_count++; 749 ha->spurious_int_count++;
703 break; 750 break;
@@ -739,22 +786,159 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
739 &ha->reg->ctrl_status); 786 &ha->reg->ctrl_status);
740 readl(&ha->reg->ctrl_status); 787 readl(&ha->reg->ctrl_status);
741 788
742 if (!ql4_mod_unload) 789 if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags))
743 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 790 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
744 791
745 break; 792 break;
746 } else if (intr_status & INTR_PENDING) { 793 } else if (intr_status & INTR_PENDING) {
747 qla4xxx_interrupt_service_routine(ha, intr_status); 794 ha->isp_ops->interrupt_service_routine(ha, intr_status);
748 ha->total_io_count++; 795 ha->total_io_count++;
749 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) 796 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
750 break; 797 break;
798 }
799 }
800
801 spin_unlock_irqrestore(&ha->hardware_lock, flags);
802
803 return IRQ_HANDLED;
804}
805
806/**
807 * qla4_8xxx_intr_handler - hardware interrupt handler.
808 * @irq: Unused
809 * @dev_id: Pointer to host adapter structure
810 **/
811irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
812{
813 struct scsi_qla_host *ha = dev_id;
814 uint32_t intr_status;
815 uint32_t status;
816 unsigned long flags = 0;
817 uint8_t reqs_count = 0;
818
819 ha->isr_count++;
820 status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
821 if (!(status & ha->nx_legacy_intr.int_vec_bit))
822 return IRQ_NONE;
823
824 status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG);
825 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
826 DEBUG2(ql4_printk(KERN_INFO, ha,
827 "%s legacy Int not triggered\n", __func__));
828 return IRQ_NONE;
829 }
830
831 /* clear the interrupt */
832 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
833
834 /* read twice to ensure write is flushed */
835 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
836 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
837
838 spin_lock_irqsave(&ha->hardware_lock, flags);
839 while (1) {
840 if (!(readl(&ha->qla4_8xxx_reg->host_int) &
841 ISRX_82XX_RISC_INT)) {
842 qla4_8xxx_spurious_interrupt(ha, reqs_count);
843 break;
844 }
845 intr_status = readl(&ha->qla4_8xxx_reg->host_status);
846 if ((intr_status &
847 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
848 qla4_8xxx_spurious_interrupt(ha, reqs_count);
849 break;
850 }
851
852 ha->isp_ops->interrupt_service_routine(ha, intr_status);
853
854 /* Enable Interrupt */
855 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
751 856
752 intr_status = 0; 857 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
858 break;
859 }
860
861 spin_unlock_irqrestore(&ha->hardware_lock, flags);
862 return IRQ_HANDLED;
863}
864
865irqreturn_t
866qla4_8xxx_msi_handler(int irq, void *dev_id)
867{
868 struct scsi_qla_host *ha;
869
870 ha = (struct scsi_qla_host *) dev_id;
871 if (!ha) {
872 DEBUG2(printk(KERN_INFO
873 "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
874 return IRQ_NONE;
875 }
876
877 ha->isr_count++;
878 /* clear the interrupt */
879 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
880
881 /* read twice to ensure write is flushed */
882 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
883 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
884
885 return qla4_8xxx_default_intr_handler(irq, dev_id);
886}
887
888/**
889 * qla4_8xxx_default_intr_handler - hardware interrupt handler.
890 * @irq: Unused
891 * @dev_id: Pointer to host adapter structure
892 *
893 * This interrupt handler is called directly for MSI-X, and
894 * called indirectly for MSI.
895 **/
896irqreturn_t
897qla4_8xxx_default_intr_handler(int irq, void *dev_id)
898{
899 struct scsi_qla_host *ha = dev_id;
900 unsigned long flags;
901 uint32_t intr_status;
902 uint8_t reqs_count = 0;
903
904 spin_lock_irqsave(&ha->hardware_lock, flags);
905 while (1) {
906 if (!(readl(&ha->qla4_8xxx_reg->host_int) &
907 ISRX_82XX_RISC_INT)) {
908 qla4_8xxx_spurious_interrupt(ha, reqs_count);
909 break;
910 }
911
912 intr_status = readl(&ha->qla4_8xxx_reg->host_status);
913 if ((intr_status &
914 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
915 qla4_8xxx_spurious_interrupt(ha, reqs_count);
916 break;
753 } 917 }
918
919 ha->isp_ops->interrupt_service_routine(ha, intr_status);
920
921 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
922 break;
754 } 923 }
755 924
925 ha->isr_count++;
756 spin_unlock_irqrestore(&ha->hardware_lock, flags); 926 spin_unlock_irqrestore(&ha->hardware_lock, flags);
927 return IRQ_HANDLED;
928}
757 929
930irqreturn_t
931qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
932{
933 struct scsi_qla_host *ha = dev_id;
934 unsigned long flags;
935
936 spin_lock_irqsave(&ha->hardware_lock, flags);
937 qla4xxx_process_response_queue(ha);
938 writel(0, &ha->qla4_8xxx_reg->host_int);
939 spin_unlock_irqrestore(&ha->hardware_lock, flags);
940
941 ha->isr_count++;
758 return IRQ_HANDLED; 942 return IRQ_HANDLED;
759} 943}
760 944
@@ -825,7 +1009,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
825 ((ddb_entry->default_time2wait + 1009 ((ddb_entry->default_time2wait +
826 4) * HZ); 1010 4) * HZ);
827 1011
828 DEBUG2(printk("scsi%ld: ddb index [%d] initate" 1012 DEBUG2(printk("scsi%ld: ddb [%d] initate"
829 " RELOGIN after %d seconds\n", 1013 " RELOGIN after %d seconds\n",
830 ha->host_no, 1014 ha->host_no,
831 ddb_entry->fw_ddb_index, 1015 ddb_entry->fw_ddb_index,
@@ -847,3 +1031,81 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1031 spin_unlock_irqrestore(&ha->hardware_lock, flags);
848} 1032}
849 1033
1034int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1035{
1036 int ret;
1037
1038 if (!is_qla8022(ha))
1039 goto try_intx;
1040
1041 if (ql4xenablemsix == 2)
1042 goto try_msi;
1043
1044 if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
1045 goto try_intx;
1046
1047 /* Trying MSI-X */
1048 ret = qla4_8xxx_enable_msix(ha);
1049 if (!ret) {
1050 DEBUG2(ql4_printk(KERN_INFO, ha,
1051 "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1052 goto irq_attached;
1053 }
1054
1055 ql4_printk(KERN_WARNING, ha,
1056 "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
1057
1058try_msi:
1059 /* Trying MSI */
1060 ret = pci_enable_msi(ha->pdev);
1061 if (!ret) {
1062 ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
1063 IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha);
1064 if (!ret) {
1065 DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1066 set_bit(AF_MSI_ENABLED, &ha->flags);
1067 goto irq_attached;
1068 } else {
1069 ql4_printk(KERN_WARNING, ha,
1070 "MSI: Failed to reserve interrupt %d "
1071 "already in use.\n", ha->pdev->irq);
1072 pci_disable_msi(ha->pdev);
1073 }
1074 }
1075 ql4_printk(KERN_WARNING, ha,
1076 "MSI: Falling back-to INTx mode -- %d.\n", ret);
1077
1078try_intx:
1079 /* Trying INTx */
1080 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1081 IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha);
1082 if (!ret) {
1083 DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
1084 set_bit(AF_INTx_ENABLED, &ha->flags);
1085 goto irq_attached;
1086
1087 } else {
1088 ql4_printk(KERN_WARNING, ha,
1089 "INTx: Failed to reserve interrupt %d already in"
1090 " use.\n", ha->pdev->irq);
1091 return ret;
1092 }
1093
1094irq_attached:
1095 set_bit(AF_IRQ_ATTACHED, &ha->flags);
1096 ha->host->irq = ha->pdev->irq;
1097 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1098 __func__, ha->pdev->irq);
1099 return ret;
1100}
1101
1102void qla4xxx_free_irqs(struct scsi_qla_host *ha)
1103{
1104 if (test_bit(AF_MSIX_ENABLED, &ha->flags))
1105 qla4_8xxx_disable_msix(ha);
1106 else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
1107 free_irq(ha->pdev->irq, ha);
1108 pci_disable_msi(ha->pdev);
1109 } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
1110 free_irq(ha->pdev->irq, ha);
1111}