diff options
author | Eilon Greenstein <eilong@broadcom.com> | 2009-02-12 03:36:15 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-02-16 02:31:05 -0500 |
commit | 8badd27aa0d7c02572fcd1a4a3c6b57d67f40b78 (patch) | |
tree | 1a207ceefdd4dd4269a1449a47356e43f96d5d81 /drivers/net/bnx2x_main.c | |
parent | 555f6c78373f969f14487253abe331d085449360 (diff) |
bnx2x: MSI support
Enabling MSI on top of MSI-X and INTA. Also changing the module parameter to
allow choosing INTA or MSI even when MSI-X is available. The default status
block should not be reversed for endianity. Since MSI can issue
re-configuration, the interrupt disable function now requires mmiowb
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 155 |
1 files changed, 108 insertions, 47 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 60762f769efc..db8506d08fa5 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -77,17 +77,19 @@ static int multi_mode = 1; | |||
77 | module_param(multi_mode, int, 0); | 77 | module_param(multi_mode, int, 0); |
78 | 78 | ||
79 | static int disable_tpa; | 79 | static int disable_tpa; |
80 | static int use_inta; | ||
81 | static int poll; | 80 | static int poll; |
82 | static int debug; | 81 | static int debug; |
83 | static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ | 82 | static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ |
84 | 83 | ||
85 | module_param(disable_tpa, int, 0); | 84 | module_param(disable_tpa, int, 0); |
86 | module_param(use_inta, int, 0); | 85 | |
86 | static int int_mode; | ||
87 | module_param(int_mode, int, 0); | ||
88 | MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)"); | ||
89 | |||
87 | module_param(poll, int, 0); | 90 | module_param(poll, int, 0); |
88 | module_param(debug, int, 0); | 91 | module_param(debug, int, 0); |
89 | MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature"); | 92 | MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature"); |
90 | MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); | ||
91 | MODULE_PARM_DESC(poll, "use polling (for debug)"); | 93 | MODULE_PARM_DESC(poll, "use polling (for debug)"); |
92 | MODULE_PARM_DESC(debug, "default debug msglevel"); | 94 | MODULE_PARM_DESC(debug, "default debug msglevel"); |
93 | 95 | ||
@@ -588,34 +590,41 @@ static void bnx2x_int_enable(struct bnx2x *bp) | |||
588 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | 590 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
589 | u32 val = REG_RD(bp, addr); | 591 | u32 val = REG_RD(bp, addr); |
590 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; | 592 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; |
593 | int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; | ||
591 | 594 | ||
592 | if (msix) { | 595 | if (msix) { |
593 | val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0; | 596 | val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | |
597 | HC_CONFIG_0_REG_INT_LINE_EN_0); | ||
594 | val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | | 598 | val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | |
595 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); | 599 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
600 | } else if (msi) { | ||
601 | val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; | ||
602 | val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | | ||
603 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | | ||
604 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); | ||
596 | } else { | 605 | } else { |
597 | val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | | 606 | val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | |
598 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | | 607 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | |
599 | HC_CONFIG_0_REG_INT_LINE_EN_0 | | 608 | HC_CONFIG_0_REG_INT_LINE_EN_0 | |
600 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); | 609 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
601 | 610 | ||
602 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n", | 611 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", |
603 | val, port, addr, msix); | 612 | val, port, addr); |
604 | 613 | ||
605 | REG_WR(bp, addr, val); | 614 | REG_WR(bp, addr, val); |
606 | 615 | ||
607 | val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; | 616 | val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; |
608 | } | 617 | } |
609 | 618 | ||
610 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n", | 619 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", |
611 | val, port, addr, msix); | 620 | val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); |
612 | 621 | ||
613 | REG_WR(bp, addr, val); | 622 | REG_WR(bp, addr, val); |
614 | 623 | ||
615 | if (CHIP_IS_E1H(bp)) { | 624 | if (CHIP_IS_E1H(bp)) { |
616 | /* init leading/trailing edge */ | 625 | /* init leading/trailing edge */ |
617 | if (IS_E1HMF(bp)) { | 626 | if (IS_E1HMF(bp)) { |
618 | val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4))); | 627 | val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); |
619 | if (bp->port.pmf) | 628 | if (bp->port.pmf) |
620 | /* enable nig attention */ | 629 | /* enable nig attention */ |
621 | val |= 0x0100; | 630 | val |= 0x0100; |
@@ -641,6 +650,9 @@ static void bnx2x_int_disable(struct bnx2x *bp) | |||
641 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", | 650 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", |
642 | val, port, addr); | 651 | val, port, addr); |
643 | 652 | ||
653 | /* flush all outstanding writes */ | ||
654 | mmiowb(); | ||
655 | |||
644 | REG_WR(bp, addr, val); | 656 | REG_WR(bp, addr, val); |
645 | if (REG_RD(bp, addr) != val) | 657 | if (REG_RD(bp, addr) != val) |
646 | BNX2X_ERR("BUG! proper val not read from IGU!\n"); | 658 | BNX2X_ERR("BUG! proper val not read from IGU!\n"); |
@@ -649,7 +661,7 @@ static void bnx2x_int_disable(struct bnx2x *bp) | |||
649 | static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) | 661 | static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) |
650 | { | 662 | { |
651 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; | 663 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; |
652 | int i; | 664 | int i, offset; |
653 | 665 | ||
654 | /* disable interrupt handling */ | 666 | /* disable interrupt handling */ |
655 | atomic_inc(&bp->intr_sem); | 667 | atomic_inc(&bp->intr_sem); |
@@ -659,11 +671,10 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) | |||
659 | 671 | ||
660 | /* make sure all ISRs are done */ | 672 | /* make sure all ISRs are done */ |
661 | if (msix) { | 673 | if (msix) { |
674 | synchronize_irq(bp->msix_table[0].vector); | ||
675 | offset = 1; | ||
662 | for_each_queue(bp, i) | 676 | for_each_queue(bp, i) |
663 | synchronize_irq(bp->msix_table[i].vector); | 677 | synchronize_irq(bp->msix_table[i + offset].vector); |
664 | |||
665 | /* one more for the Slow Path IRQ */ | ||
666 | synchronize_irq(bp->msix_table[i].vector); | ||
667 | } else | 678 | } else |
668 | synchronize_irq(bp->pdev->irq); | 679 | synchronize_irq(bp->pdev->irq); |
669 | 680 | ||
@@ -5198,6 +5209,8 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5198 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); | 5209 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); |
5199 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); | 5210 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); |
5200 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); | 5211 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); |
5212 | /* make sure this value is 0 */ | ||
5213 | REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); | ||
5201 | 5214 | ||
5202 | /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ | 5215 | /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ |
5203 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); | 5216 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); |
@@ -5648,10 +5661,17 @@ static int bnx2x_init_func(struct bnx2x *bp) | |||
5648 | { | 5661 | { |
5649 | int port = BP_PORT(bp); | 5662 | int port = BP_PORT(bp); |
5650 | int func = BP_FUNC(bp); | 5663 | int func = BP_FUNC(bp); |
5664 | u32 addr, val; | ||
5651 | int i; | 5665 | int i; |
5652 | 5666 | ||
5653 | DP(BNX2X_MSG_MCP, "starting func init func %x\n", func); | 5667 | DP(BNX2X_MSG_MCP, "starting func init func %x\n", func); |
5654 | 5668 | ||
5669 | /* set MSI reconfigure capability */ | ||
5670 | addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); | ||
5671 | val = REG_RD(bp, addr); | ||
5672 | val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; | ||
5673 | REG_WR(bp, addr, val); | ||
5674 | |||
5655 | i = FUNC_ILT_BASE(func); | 5675 | i = FUNC_ILT_BASE(func); |
5656 | 5676 | ||
5657 | bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context)); | 5677 | bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context)); |
@@ -6053,10 +6073,6 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp) | |||
6053 | "state %x\n", i, bp->msix_table[i + offset].vector, | 6073 | "state %x\n", i, bp->msix_table[i + offset].vector, |
6054 | bnx2x_fp(bp, i, state)); | 6074 | bnx2x_fp(bp, i, state)); |
6055 | 6075 | ||
6056 | if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) | ||
6057 | BNX2X_ERR("IRQ of fp #%d being freed while " | ||
6058 | "state != closed\n", i); | ||
6059 | |||
6060 | free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); | 6076 | free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); |
6061 | } | 6077 | } |
6062 | } | 6078 | } |
@@ -6068,21 +6084,25 @@ static void bnx2x_free_irq(struct bnx2x *bp) | |||
6068 | pci_disable_msix(bp->pdev); | 6084 | pci_disable_msix(bp->pdev); |
6069 | bp->flags &= ~USING_MSIX_FLAG; | 6085 | bp->flags &= ~USING_MSIX_FLAG; |
6070 | 6086 | ||
6087 | } else if (bp->flags & USING_MSI_FLAG) { | ||
6088 | free_irq(bp->pdev->irq, bp->dev); | ||
6089 | pci_disable_msi(bp->pdev); | ||
6090 | bp->flags &= ~USING_MSI_FLAG; | ||
6091 | |||
6071 | } else | 6092 | } else |
6072 | free_irq(bp->pdev->irq, bp->dev); | 6093 | free_irq(bp->pdev->irq, bp->dev); |
6073 | } | 6094 | } |
6074 | 6095 | ||
6075 | static int bnx2x_enable_msix(struct bnx2x *bp) | 6096 | static int bnx2x_enable_msix(struct bnx2x *bp) |
6076 | { | 6097 | { |
6077 | int i, rc, offset; | 6098 | int i, rc, offset = 1; |
6099 | int igu_vec = 0; | ||
6078 | 6100 | ||
6079 | bp->msix_table[0].entry = 0; | 6101 | bp->msix_table[0].entry = igu_vec; |
6080 | offset = 1; | 6102 | DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); |
6081 | DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n"); | ||
6082 | 6103 | ||
6083 | for_each_queue(bp, i) { | 6104 | for_each_queue(bp, i) { |
6084 | int igu_vec = offset + i + BP_L_ID(bp); | 6105 | igu_vec = BP_L_ID(bp) + offset + i; |
6085 | |||
6086 | bp->msix_table[i + offset].entry = igu_vec; | 6106 | bp->msix_table[i + offset].entry = igu_vec; |
6087 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " | 6107 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " |
6088 | "(fastpath #%u)\n", i + offset, igu_vec, i); | 6108 | "(fastpath #%u)\n", i + offset, igu_vec, i); |
@@ -6091,9 +6111,10 @@ static int bnx2x_enable_msix(struct bnx2x *bp) | |||
6091 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], | 6111 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], |
6092 | BNX2X_NUM_QUEUES(bp) + offset); | 6112 | BNX2X_NUM_QUEUES(bp) + offset); |
6093 | if (rc) { | 6113 | if (rc) { |
6094 | DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n"); | 6114 | DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); |
6095 | return -1; | 6115 | return rc; |
6096 | } | 6116 | } |
6117 | |||
6097 | bp->flags |= USING_MSIX_FLAG; | 6118 | bp->flags |= USING_MSIX_FLAG; |
6098 | 6119 | ||
6099 | return 0; | 6120 | return 0; |
@@ -6140,11 +6161,31 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
6140 | return 0; | 6161 | return 0; |
6141 | } | 6162 | } |
6142 | 6163 | ||
6164 | static int bnx2x_enable_msi(struct bnx2x *bp) | ||
6165 | { | ||
6166 | int rc; | ||
6167 | |||
6168 | rc = pci_enable_msi(bp->pdev); | ||
6169 | if (rc) { | ||
6170 | DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); | ||
6171 | return -1; | ||
6172 | } | ||
6173 | bp->flags |= USING_MSI_FLAG; | ||
6174 | |||
6175 | return 0; | ||
6176 | } | ||
6177 | |||
6143 | static int bnx2x_req_irq(struct bnx2x *bp) | 6178 | static int bnx2x_req_irq(struct bnx2x *bp) |
6144 | { | 6179 | { |
6180 | unsigned long flags; | ||
6145 | int rc; | 6181 | int rc; |
6146 | 6182 | ||
6147 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED, | 6183 | if (bp->flags & USING_MSI_FLAG) |
6184 | flags = 0; | ||
6185 | else | ||
6186 | flags = IRQF_SHARED; | ||
6187 | |||
6188 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, | ||
6148 | bp->dev->name, bp->dev); | 6189 | bp->dev->name, bp->dev); |
6149 | if (!rc) | 6190 | if (!rc) |
6150 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; | 6191 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; |
@@ -6365,28 +6406,23 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index) | |||
6365 | } | 6406 | } |
6366 | 6407 | ||
6367 | static int bnx2x_poll(struct napi_struct *napi, int budget); | 6408 | static int bnx2x_poll(struct napi_struct *napi, int budget); |
6368 | static void bnx2x_set_rx_mode(struct net_device *dev); | ||
6369 | 6409 | ||
6370 | /* must be called with rtnl_lock */ | 6410 | static void bnx2x_set_int_mode(struct bnx2x *bp) |
6371 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | ||
6372 | { | 6411 | { |
6373 | u32 load_code; | ||
6374 | int i, rc = 0; | ||
6375 | int num_queues; | 6412 | int num_queues; |
6376 | #ifdef BNX2X_STOP_ON_ERROR | ||
6377 | if (unlikely(bp->panic)) | ||
6378 | return -EPERM; | ||
6379 | #endif | ||
6380 | |||
6381 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | ||
6382 | 6413 | ||
6383 | if (use_inta) { | 6414 | switch (int_mode) { |
6415 | case INT_MODE_INTx: | ||
6416 | case INT_MODE_MSI: | ||
6384 | num_queues = 1; | 6417 | num_queues = 1; |
6385 | bp->num_rx_queues = num_queues; | 6418 | bp->num_rx_queues = num_queues; |
6386 | bp->num_tx_queues = num_queues; | 6419 | bp->num_tx_queues = num_queues; |
6387 | DP(NETIF_MSG_IFUP, | 6420 | DP(NETIF_MSG_IFUP, |
6388 | "set number of queues to %d\n", num_queues); | 6421 | "set number of queues to %d\n", num_queues); |
6389 | } else { | 6422 | break; |
6423 | |||
6424 | case INT_MODE_MSIX: | ||
6425 | default: | ||
6390 | if (bp->multi_mode == ETH_RSS_MODE_REGULAR) | 6426 | if (bp->multi_mode == ETH_RSS_MODE_REGULAR) |
6391 | num_queues = min_t(u32, num_online_cpus(), | 6427 | num_queues = min_t(u32, num_online_cpus(), |
6392 | BNX2X_MAX_QUEUES(bp)); | 6428 | BNX2X_MAX_QUEUES(bp)); |
@@ -6401,8 +6437,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6401 | * so try to enable MSI-X with the requested number of fp's | 6437 | * so try to enable MSI-X with the requested number of fp's |
6402 | * and fallback to MSI or legacy INTx with one fp | 6438 | * and fallback to MSI or legacy INTx with one fp |
6403 | */ | 6439 | */ |
6404 | rc = bnx2x_enable_msix(bp); | 6440 | if (bnx2x_enable_msix(bp)) { |
6405 | if (rc) { | ||
6406 | /* failed to enable MSI-X */ | 6441 | /* failed to enable MSI-X */ |
6407 | num_queues = 1; | 6442 | num_queues = 1; |
6408 | bp->num_rx_queues = num_queues; | 6443 | bp->num_rx_queues = num_queues; |
@@ -6412,8 +6447,27 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6412 | "enable MSI-X set number of " | 6447 | "enable MSI-X set number of " |
6413 | "queues to %d\n", num_queues); | 6448 | "queues to %d\n", num_queues); |
6414 | } | 6449 | } |
6450 | break; | ||
6415 | } | 6451 | } |
6416 | bp->dev->real_num_tx_queues = bp->num_tx_queues; | 6452 | bp->dev->real_num_tx_queues = bp->num_tx_queues; |
6453 | } | ||
6454 | |||
6455 | static void bnx2x_set_rx_mode(struct net_device *dev); | ||
6456 | |||
6457 | /* must be called with rtnl_lock */ | ||
6458 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | ||
6459 | { | ||
6460 | u32 load_code; | ||
6461 | int i, rc = 0; | ||
6462 | #ifdef BNX2X_STOP_ON_ERROR | ||
6463 | DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode); | ||
6464 | if (unlikely(bp->panic)) | ||
6465 | return -EPERM; | ||
6466 | #endif | ||
6467 | |||
6468 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | ||
6469 | |||
6470 | bnx2x_set_int_mode(bp); | ||
6417 | 6471 | ||
6418 | if (bnx2x_alloc_mem(bp)) | 6472 | if (bnx2x_alloc_mem(bp)) |
6419 | return -ENOMEM; | 6473 | return -ENOMEM; |
@@ -6445,14 +6499,22 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6445 | pci_disable_msix(bp->pdev); | 6499 | pci_disable_msix(bp->pdev); |
6446 | goto load_error1; | 6500 | goto load_error1; |
6447 | } | 6501 | } |
6448 | printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name); | ||
6449 | } else { | 6502 | } else { |
6503 | if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) | ||
6504 | bnx2x_enable_msi(bp); | ||
6450 | bnx2x_ack_int(bp); | 6505 | bnx2x_ack_int(bp); |
6451 | rc = bnx2x_req_irq(bp); | 6506 | rc = bnx2x_req_irq(bp); |
6452 | if (rc) { | 6507 | if (rc) { |
6453 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); | 6508 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); |
6509 | if (bp->flags & USING_MSI_FLAG) | ||
6510 | pci_disable_msi(bp->pdev); | ||
6454 | goto load_error1; | 6511 | goto load_error1; |
6455 | } | 6512 | } |
6513 | if (bp->flags & USING_MSI_FLAG) { | ||
6514 | bp->dev->irq = bp->pdev->irq; | ||
6515 | printk(KERN_INFO PFX "%s: using MSI IRQ %d\n", | ||
6516 | bp->dev->name, bp->pdev->irq); | ||
6517 | } | ||
6456 | } | 6518 | } |
6457 | 6519 | ||
6458 | /* Send LOAD_REQUEST command to MCP | 6520 | /* Send LOAD_REQUEST command to MCP |
@@ -6689,8 +6751,6 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
6689 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | 6751 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); |
6690 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | 6752 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); |
6691 | 6753 | ||
6692 | REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000); | ||
6693 | |||
6694 | /* Clear ILT */ | 6754 | /* Clear ILT */ |
6695 | base = FUNC_ILT_BASE(func); | 6755 | base = FUNC_ILT_BASE(func); |
6696 | for (i = base; i < base + ILT_PER_FUNC; i++) | 6756 | for (i = base; i < base + ILT_PER_FUNC; i++) |
@@ -7636,9 +7696,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
7636 | "MCP disabled, must load devices in order!\n"); | 7696 | "MCP disabled, must load devices in order!\n"); |
7637 | 7697 | ||
7638 | /* Set multi queue mode */ | 7698 | /* Set multi queue mode */ |
7639 | if ((multi_mode != ETH_RSS_MODE_DISABLED) && (!use_inta)) { | 7699 | if ((multi_mode != ETH_RSS_MODE_DISABLED) && |
7700 | ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { | ||
7640 | printk(KERN_ERR PFX | 7701 | printk(KERN_ERR PFX |
7641 | "Multi disabled since INTA is requested\n"); | 7702 | "Multi disabled since int_mode requested is not MSI-X\n"); |
7642 | multi_mode = ETH_RSS_MODE_DISABLED; | 7703 | multi_mode = ETH_RSS_MODE_DISABLED; |
7643 | } | 7704 | } |
7644 | bp->multi_mode = multi_mode; | 7705 | bp->multi_mode = multi_mode; |