aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/qlge
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/qlge.h20
-rw-r--r--drivers/net/qlge/qlge_dbg.c21
-rw-r--r--drivers/net/qlge/qlge_ethtool.c93
-rw-r--r--drivers/net/qlge/qlge_main.c164
-rw-r--r--drivers/net/qlge/qlge_mpi.c16
5 files changed, 167 insertions, 147 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a478786840a6..ca306fd5f588 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00.25.00.00-01" 19#define DRV_VERSION "v1.00.00.29.00.00-01"
20 20
21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
22 22
@@ -1996,6 +1996,7 @@ enum {
1996 QL_LB_LINK_UP = 10, 1996 QL_LB_LINK_UP = 10,
1997 QL_FRC_COREDUMP = 11, 1997 QL_FRC_COREDUMP = 11,
1998 QL_EEH_FATAL = 12, 1998 QL_EEH_FATAL = 12,
1999 QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
1999}; 2000};
2000 2001
2001/* link_status bit definitions */ 2002/* link_status bit definitions */
@@ -2083,6 +2084,7 @@ struct ql_adapter {
2083 u32 mailbox_in; 2084 u32 mailbox_in;
2084 u32 mailbox_out; 2085 u32 mailbox_out;
2085 struct mbox_params idc_mbc; 2086 struct mbox_params idc_mbc;
2087 struct mutex mpi_mutex;
2086 2088
2087 int tx_ring_size; 2089 int tx_ring_size;
2088 int rx_ring_size; 2090 int rx_ring_size;
@@ -2133,7 +2135,7 @@ struct ql_adapter {
2133 struct delayed_work mpi_idc_work; 2135 struct delayed_work mpi_idc_work;
2134 struct delayed_work mpi_core_to_log; 2136 struct delayed_work mpi_core_to_log;
2135 struct completion ide_completion; 2137 struct completion ide_completion;
2136 struct nic_operations *nic_ops; 2138 const struct nic_operations *nic_ops;
2137 u16 device_id; 2139 u16 device_id;
2138 struct timer_list timer; 2140 struct timer_list timer;
2139 atomic_t lb_count; 2141 atomic_t lb_count;
@@ -2221,12 +2223,12 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
2221int ql_unpause_mpi_risc(struct ql_adapter *qdev); 2223int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2222int ql_pause_mpi_risc(struct ql_adapter *qdev); 2224int ql_pause_mpi_risc(struct ql_adapter *qdev);
2223int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); 2225int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2226int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
2224int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, 2227int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2225 u32 ram_addr, int word_count); 2228 u32 ram_addr, int word_count);
2226int ql_core_dump(struct ql_adapter *qdev, 2229int ql_core_dump(struct ql_adapter *qdev,
2227 struct ql_mpi_coredump *mpi_coredump); 2230 struct ql_mpi_coredump *mpi_coredump);
2228int ql_mb_about_fw(struct ql_adapter *qdev); 2231int ql_mb_about_fw(struct ql_adapter *qdev);
2229int ql_wol(struct ql_adapter *qdev);
2230int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); 2232int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
2231int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); 2233int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
2232int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); 2234int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
@@ -2237,22 +2239,20 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
2237int ql_mb_get_port_cfg(struct ql_adapter *qdev); 2239int ql_mb_get_port_cfg(struct ql_adapter *qdev);
2238int ql_mb_set_port_cfg(struct ql_adapter *qdev); 2240int ql_mb_set_port_cfg(struct ql_adapter *qdev);
2239int ql_wait_fifo_empty(struct ql_adapter *qdev); 2241int ql_wait_fifo_empty(struct ql_adapter *qdev);
2242void ql_get_dump(struct ql_adapter *qdev, void *buff);
2240void ql_gen_reg_dump(struct ql_adapter *qdev, 2243void ql_gen_reg_dump(struct ql_adapter *qdev,
2241 struct ql_reg_dump *mpi_coredump); 2244 struct ql_reg_dump *mpi_coredump);
2242netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2245netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
2243void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); 2246void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
2244int ql_own_firmware(struct ql_adapter *qdev); 2247int ql_own_firmware(struct ql_adapter *qdev);
2245int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); 2248int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
2246void qlge_set_multicast_list(struct net_device *ndev);
2247 2249
2248#if 1 2250/* #define QL_ALL_DUMP */
2249#define QL_ALL_DUMP 2251/* #define QL_REG_DUMP */
2250#define QL_REG_DUMP 2252/* #define QL_DEV_DUMP */
2251#define QL_DEV_DUMP 2253/* #define QL_CB_DUMP */
2252#define QL_CB_DUMP
2253/* #define QL_IB_DUMP */ 2254/* #define QL_IB_DUMP */
2254/* #define QL_OB_DUMP */ 2255/* #define QL_OB_DUMP */
2255#endif
2256 2256
2257#ifdef QL_REG_DUMP 2257#ifdef QL_REG_DUMP
2258extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); 2258extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 4747492935ef..fca804f36d61 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1317,9 +1317,28 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1318 if (status) 1318 if (status)
1319 return; 1319 return;
1320}
1321
1322void ql_get_dump(struct ql_adapter *qdev, void *buff)
1323{
1324 /*
1325 * If the dump has already been taken and is stored
1326 * in our internal buffer and if force dump is set then
1327 * just start the spool to dump it to the log file
1328 * and also, take a snapshot of the general regs to
1329 * to the user's buffer or else take complete dump
1330 * to the user's buffer if force is not set.
1331 */
1320 1332
1321 if (test_bit(QL_FRC_COREDUMP, &qdev->flags)) 1333 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1334 if (!ql_core_dump(qdev, buff))
1335 ql_soft_reset_mpi_risc(qdev);
1336 else
1337 netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1338 } else {
1339 ql_gen_reg_dump(qdev, buff);
1322 ql_get_core_dump(qdev); 1340 ql_get_core_dump(qdev);
1341 }
1323} 1342}
1324 1343
1325/* Coredump to messages log file using separate worker thread */ 1344/* Coredump to messages log file using separate worker thread */
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 4892d64f4e05..19b00fa0eaf0 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -356,7 +356,7 @@ static int ql_get_settings(struct net_device *ndev,
356 ecmd->port = PORT_FIBRE; 356 ecmd->port = PORT_FIBRE;
357 } 357 }
358 358
359 ecmd->speed = SPEED_10000; 359 ethtool_cmd_speed_set(ecmd, SPEED_10000);
360 ecmd->duplex = DUPLEX_FULL; 360 ecmd->duplex = DUPLEX_FULL;
361 361
362 return 0; 362 return 0;
@@ -375,7 +375,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); 375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
376 drvinfo->n_stats = 0; 376 drvinfo->n_stats = 0;
377 drvinfo->testinfo_len = 0; 377 drvinfo->testinfo_len = 0;
378 drvinfo->regdump_len = 0; 378 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
379 drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
380 else
381 drvinfo->regdump_len = sizeof(struct ql_reg_dump);
379 drvinfo->eedump_len = 0; 382 drvinfo->eedump_len = 0;
380} 383}
381 384
@@ -409,31 +412,31 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
409 return 0; 412 return 0;
410} 413}
411 414
412static int ql_phys_id(struct net_device *ndev, u32 data) 415static int ql_set_phys_id(struct net_device *ndev,
416 enum ethtool_phys_id_state state)
417
413{ 418{
414 struct ql_adapter *qdev = netdev_priv(ndev); 419 struct ql_adapter *qdev = netdev_priv(ndev);
415 u32 led_reg, i;
416 int status;
417 420
418 /* Save the current LED settings */ 421 switch (state) {
419 status = ql_mb_get_led_cfg(qdev); 422 case ETHTOOL_ID_ACTIVE:
420 if (status) 423 /* Save the current LED settings */
421 return status; 424 if (ql_mb_get_led_cfg(qdev))
422 led_reg = qdev->led_config; 425 return -EIO;
423 426
424 /* Start blinking the led */ 427 /* Start blinking */
425 if (!data || data > 300)
426 data = 300;
427
428 for (i = 0; i < (data * 10); i++)
429 ql_mb_set_led_cfg(qdev, QL_LED_BLINK); 428 ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
429 return 0;
430 430
431 /* Restore LED settings */ 431 case ETHTOOL_ID_INACTIVE:
432 status = ql_mb_set_led_cfg(qdev, led_reg); 432 /* Restore LED settings */
433 if (status) 433 if (ql_mb_set_led_cfg(qdev, qdev->led_config))
434 return status; 434 return -EIO;
435 return 0;
435 436
436 return 0; 437 default:
438 return -EINVAL;
439 }
437} 440}
438 441
439static int ql_start_loopback(struct ql_adapter *qdev) 442static int ql_start_loopback(struct ql_adapter *qdev)
@@ -547,7 +550,12 @@ static void ql_self_test(struct net_device *ndev,
547 550
548static int ql_get_regs_len(struct net_device *ndev) 551static int ql_get_regs_len(struct net_device *ndev)
549{ 552{
550 return sizeof(struct ql_reg_dump); 553 struct ql_adapter *qdev = netdev_priv(ndev);
554
555 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
556 return sizeof(struct ql_mpi_coredump);
557 else
558 return sizeof(struct ql_reg_dump);
551} 559}
552 560
553static void ql_get_regs(struct net_device *ndev, 561static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@ static void ql_get_regs(struct net_device *ndev,
555{ 563{
556 struct ql_adapter *qdev = netdev_priv(ndev); 564 struct ql_adapter *qdev = netdev_priv(ndev);
557 565
558 ql_gen_reg_dump(qdev, p); 566 ql_get_dump(qdev, p);
567 qdev->core_is_dumped = 0;
568 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
569 regs->len = sizeof(struct ql_mpi_coredump);
570 else
571 regs->len = sizeof(struct ql_reg_dump);
559} 572}
560 573
561static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 574static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -642,32 +655,6 @@ static int ql_set_pauseparam(struct net_device *netdev,
642 return status; 655 return status;
643} 656}
644 657
645static u32 ql_get_rx_csum(struct net_device *netdev)
646{
647 struct ql_adapter *qdev = netdev_priv(netdev);
648 return qdev->rx_csum;
649}
650
651static int ql_set_rx_csum(struct net_device *netdev, uint32_t data)
652{
653 struct ql_adapter *qdev = netdev_priv(netdev);
654 qdev->rx_csum = data;
655 return 0;
656}
657
658static int ql_set_tso(struct net_device *ndev, uint32_t data)
659{
660
661 if (data) {
662 ndev->features |= NETIF_F_TSO;
663 ndev->features |= NETIF_F_TSO6;
664 } else {
665 ndev->features &= ~NETIF_F_TSO;
666 ndev->features &= ~NETIF_F_TSO6;
667 }
668 return 0;
669}
670
671static u32 ql_get_msglevel(struct net_device *ndev) 658static u32 ql_get_msglevel(struct net_device *ndev)
672{ 659{
673 struct ql_adapter *qdev = netdev_priv(ndev); 660 struct ql_adapter *qdev = netdev_priv(ndev);
@@ -690,18 +677,10 @@ const struct ethtool_ops qlge_ethtool_ops = {
690 .get_msglevel = ql_get_msglevel, 677 .get_msglevel = ql_get_msglevel,
691 .set_msglevel = ql_set_msglevel, 678 .set_msglevel = ql_set_msglevel,
692 .get_link = ethtool_op_get_link, 679 .get_link = ethtool_op_get_link,
693 .phys_id = ql_phys_id, 680 .set_phys_id = ql_set_phys_id,
694 .self_test = ql_self_test, 681 .self_test = ql_self_test,
695 .get_pauseparam = ql_get_pauseparam, 682 .get_pauseparam = ql_get_pauseparam,
696 .set_pauseparam = ql_set_pauseparam, 683 .set_pauseparam = ql_set_pauseparam,
697 .get_rx_csum = ql_get_rx_csum,
698 .set_rx_csum = ql_set_rx_csum,
699 .get_tx_csum = ethtool_op_get_tx_csum,
700 .set_tx_csum = ethtool_op_set_tx_csum,
701 .get_sg = ethtool_op_get_sg,
702 .set_sg = ethtool_op_set_sg,
703 .get_tso = ethtool_op_get_tso,
704 .set_tso = ql_set_tso,
705 .get_coalesce = ql_get_coalesce, 684 .get_coalesce = ql_get_coalesce,
706 .set_coalesce = ql_set_coalesce, 685 .set_coalesce = ql_set_coalesce,
707 .get_sset_count = ql_get_sset_count, 686 .get_sset_count = ql_get_sset_count,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 5f89e83501f4..6b4ff970972b 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -38,6 +38,7 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/mm.h> 39#include <linux/mm.h>
40#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
41#include <linux/prefetch.h>
41#include <net/ip6_checksum.h> 42#include <net/ip6_checksum.h>
42 43
43#include "qlge.h" 44#include "qlge.h"
@@ -62,15 +63,15 @@ static const u32 default_msg =
62/* NETIF_MSG_PKTDATA | */ 63/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0; 64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64 65
65static int debug = 0x00007fff; /* defaults above */ 66static int debug = -1; /* defaults above */
66module_param(debug, int, 0); 67module_param(debug, int, 0664);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68 69
69#define MSIX_IRQ 0 70#define MSIX_IRQ 0
70#define MSI_IRQ 1 71#define MSI_IRQ 1
71#define LEG_IRQ 2 72#define LEG_IRQ 2
72static int qlge_irq_type = MSIX_IRQ; 73static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ); 74module_param(qlge_irq_type, int, 0664);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 75MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 76
76static int qlge_mpi_coredump; 77static int qlge_mpi_coredump;
@@ -94,6 +95,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
94 95
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); 96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96 97
98static int ql_wol(struct ql_adapter *qdev);
99static void qlge_set_multicast_list(struct net_device *ndev);
100
97/* This hardware semaphore causes exclusive access to 101/* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware, 102 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver. 103 * FCOE firmware and the FC driver.
@@ -657,7 +661,7 @@ static void ql_disable_interrupts(struct ql_adapter *qdev)
657/* If we're running with multiple MSI-X vectors then we enable on the fly. 661/* If we're running with multiple MSI-X vectors then we enable on the fly.
658 * Otherwise, we may have multiple outstanding workers and don't want to 662 * Otherwise, we may have multiple outstanding workers and don't want to
659 * enable until the last one finishes. In this case, the irq_cnt gets 663 * enable until the last one finishes. In this case, the irq_cnt gets
660 * incremented everytime we queue a worker and decremented everytime 664 * incremented every time we queue a worker and decremented every time
661 * a worker finishes. Once it hits zero we enable the interrupt. 665 * a worker finishes. Once it hits zero we enable the interrupt.
662 */ 666 */
663u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 667u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
@@ -1566,9 +1570,9 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1566 rx_ring->rx_packets++; 1570 rx_ring->rx_packets++;
1567 rx_ring->rx_bytes += skb->len; 1571 rx_ring->rx_bytes += skb->len;
1568 skb->protocol = eth_type_trans(skb, ndev); 1572 skb->protocol = eth_type_trans(skb, ndev);
1569 skb->ip_summed = CHECKSUM_NONE; 1573 skb_checksum_none_assert(skb);
1570 1574
1571 if (qdev->rx_csum && 1575 if ((ndev->features & NETIF_F_RXCSUM) &&
1572 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 1576 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1573 /* TCP frame. */ 1577 /* TCP frame. */
1574 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 1578 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -1676,12 +1680,12 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1676 rx_ring->rx_packets++; 1680 rx_ring->rx_packets++;
1677 rx_ring->rx_bytes += skb->len; 1681 rx_ring->rx_bytes += skb->len;
1678 skb->protocol = eth_type_trans(skb, ndev); 1682 skb->protocol = eth_type_trans(skb, ndev);
1679 skb->ip_summed = CHECKSUM_NONE; 1683 skb_checksum_none_assert(skb);
1680 1684
1681 /* If rx checksum is on, and there are no 1685 /* If rx checksum is on, and there are no
1682 * csum or frame errors. 1686 * csum or frame errors.
1683 */ 1687 */
1684 if (qdev->rx_csum && 1688 if ((ndev->features & NETIF_F_RXCSUM) &&
1685 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 1689 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1686 /* TCP frame. */ 1690 /* TCP frame. */
1687 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 1691 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -1996,12 +2000,12 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1996 } 2000 }
1997 2001
1998 skb->protocol = eth_type_trans(skb, ndev); 2002 skb->protocol = eth_type_trans(skb, ndev);
1999 skb->ip_summed = CHECKSUM_NONE; 2003 skb_checksum_none_assert(skb);
2000 2004
2001 /* If rx checksum is on, and there are no 2005 /* If rx checksum is on, and there are no
2002 * csum or frame errors. 2006 * csum or frame errors.
2003 */ 2007 */
2004 if (qdev->rx_csum && 2008 if ((ndev->features & NETIF_F_RXCSUM) &&
2005 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 2009 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2006 /* TCP frame. */ 2010 /* TCP frame. */
2007 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 2011 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -2148,6 +2152,10 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
2148 * thread 2152 * thread
2149 */ 2153 */
2150 clear_bit(QL_ADAPTER_UP, &qdev->flags); 2154 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2155 /* Set asic recovery bit to indicate reset process that we are
2156 * in fatal error recovery process rather than normal close
2157 */
2158 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2151 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); 2159 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2152} 2160}
2153 2161
@@ -2162,23 +2170,20 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2162 return; 2170 return;
2163 2171
2164 case CAM_LOOKUP_ERR_EVENT: 2172 case CAM_LOOKUP_ERR_EVENT:
2165 netif_err(qdev, link, qdev->ndev, 2173 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2166 "Multiple CAM hits lookup occurred.\n"); 2174 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2167 netif_err(qdev, drv, qdev->ndev,
2168 "This event shouldn't occur.\n");
2169 ql_queue_asic_error(qdev); 2175 ql_queue_asic_error(qdev);
2170 return; 2176 return;
2171 2177
2172 case SOFT_ECC_ERROR_EVENT: 2178 case SOFT_ECC_ERROR_EVENT:
2173 netif_err(qdev, rx_err, qdev->ndev, 2179 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2174 "Soft ECC error detected.\n");
2175 ql_queue_asic_error(qdev); 2180 ql_queue_asic_error(qdev);
2176 break; 2181 break;
2177 2182
2178 case PCI_ERR_ANON_BUF_RD: 2183 case PCI_ERR_ANON_BUF_RD:
2179 netif_err(qdev, rx_err, qdev->ndev, 2184 netdev_err(qdev->ndev, "PCI error occurred when reading "
2180 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n", 2185 "anonymous buffers from rx_ring %d.\n",
2181 ib_ae_rsp->q_id); 2186 ib_ae_rsp->q_id);
2182 ql_queue_asic_error(qdev); 2187 ql_queue_asic_error(qdev);
2183 break; 2188 break;
2184 2189
@@ -2222,10 +2227,11 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2222 ql_update_cq(rx_ring); 2227 ql_update_cq(rx_ring);
2223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2228 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224 } 2229 }
2230 if (!net_rsp)
2231 return 0;
2225 ql_write_cq_idx(rx_ring); 2232 ql_write_cq_idx(rx_ring);
2226 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; 2233 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2227 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) && 2234 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2228 net_rsp != NULL) {
2229 if (atomic_read(&tx_ring->queue_stopped) && 2235 if (atomic_read(&tx_ring->queue_stopped) &&
2230 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) 2236 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2231 /* 2237 /*
@@ -2381,6 +2387,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2381 2387
2382} 2388}
2383 2389
2390static void qlge_restore_vlan(struct ql_adapter *qdev)
2391{
2392 qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
2393
2394 if (qdev->vlgrp) {
2395 u16 vid;
2396 for (vid = 0; vid < VLAN_N_VID; vid++) {
2397 if (!vlan_group_get_device(qdev->vlgrp, vid))
2398 continue;
2399 qlge_vlan_rx_add_vid(qdev->ndev, vid);
2400 }
2401 }
2402}
2403
2384/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ 2404/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2385static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) 2405static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2386{ 2406{
@@ -2418,11 +2438,10 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2418 */ 2438 */
2419 if (var & STS_FE) { 2439 if (var & STS_FE) {
2420 ql_queue_asic_error(qdev); 2440 ql_queue_asic_error(qdev);
2421 netif_err(qdev, intr, qdev->ndev, 2441 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2422 "Got fatal error, STS = %x.\n", var);
2423 var = ql_read32(qdev, ERR_STS); 2442 var = ql_read32(qdev, ERR_STS);
2424 netif_err(qdev, intr, qdev->ndev, 2443 netdev_err(qdev->ndev, "Resetting chip. "
2425 "Resetting chip. Error Status Register = 0x%x\n", var); 2444 "Error Status Register = 0x%x\n", var);
2426 return IRQ_HANDLED; 2445 return IRQ_HANDLED;
2427 } 2446 }
2428 2447
@@ -2571,7 +2590,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2571 2590
2572 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); 2591 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2573 2592
2574 if (qdev->vlgrp && vlan_tx_tag_present(skb)) { 2593 if (vlan_tx_tag_present(skb)) {
2575 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, 2594 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2576 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); 2595 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2577 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; 2596 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
@@ -3281,7 +3300,7 @@ msi:
3281 * will service it. An example would be if there are 3300 * will service it. An example would be if there are
3282 * 2 vectors (so 2 RSS rings) and 8 TX completion rings. 3301 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3283 * This would mean that vector 0 would service RSS ring 0 3302 * This would mean that vector 0 would service RSS ring 0
3284 * and TX competion rings 0,1,2 and 3. Vector 1 would 3303 * and TX completion rings 0,1,2 and 3. Vector 1 would
3285 * service RSS ring 1 and TX completion rings 4,5,6 and 7. 3304 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3286 */ 3305 */
3287static void ql_set_tx_vect(struct ql_adapter *qdev) 3306static void ql_set_tx_vect(struct ql_adapter *qdev)
@@ -3530,12 +3549,13 @@ err_irq:
3530 3549
3531static int ql_start_rss(struct ql_adapter *qdev) 3550static int ql_start_rss(struct ql_adapter *qdev)
3532{ 3551{
3533 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 3552 static const u8 init_hash_seed[] = {
3534 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 3553 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3535 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 3554 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3536 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 3555 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3537 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 3556 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3538 0xbe, 0xac, 0x01, 0xfa}; 3557 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3558 };
3539 struct ricb *ricb = &qdev->ricb; 3559 struct ricb *ricb = &qdev->ricb;
3540 int status = 0; 3560 int status = 0;
3541 int i; 3561 int i;
@@ -3798,11 +3818,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3798 end_jiffies = jiffies + 3818 end_jiffies = jiffies +
3799 max((unsigned long)1, usecs_to_jiffies(30)); 3819 max((unsigned long)1, usecs_to_jiffies(30));
3800 3820
3801 /* Stop management traffic. */ 3821 /* Check if bit is set then skip the mailbox command and
3802 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); 3822 * clear the bit, else we are in normal reset process.
3823 */
3824 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3825 /* Stop management traffic. */
3826 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3803 3827
3804 /* Wait for the NIC and MGMNT FIFOs to empty. */ 3828 /* Wait for the NIC and MGMNT FIFOs to empty. */
3805 ql_wait_fifo_empty(qdev); 3829 ql_wait_fifo_empty(qdev);
3830 } else
3831 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3806 3832
3807 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); 3833 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3808 3834
@@ -3826,7 +3852,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3826 3852
3827static void ql_display_dev_info(struct net_device *ndev) 3853static void ql_display_dev_info(struct net_device *ndev)
3828{ 3854{
3829 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3855 struct ql_adapter *qdev = netdev_priv(ndev);
3830 3856
3831 netif_info(qdev, probe, qdev->ndev, 3857 netif_info(qdev, probe, qdev->ndev,
3832 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " 3858 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@ -3841,7 +3867,7 @@ static void ql_display_dev_info(struct net_device *ndev)
3841 "MAC address %pM\n", ndev->dev_addr); 3867 "MAC address %pM\n", ndev->dev_addr);
3842} 3868}
3843 3869
3844int ql_wol(struct ql_adapter *qdev) 3870static int ql_wol(struct ql_adapter *qdev)
3845{ 3871{
3846 int status = 0; 3872 int status = 0;
3847 u32 wol = MB_WOL_DISABLE; 3873 u32 wol = MB_WOL_DISABLE;
@@ -3888,11 +3914,8 @@ int ql_wol(struct ql_adapter *qdev)
3888 return status; 3914 return status;
3889} 3915}
3890 3916
3891static int ql_adapter_down(struct ql_adapter *qdev) 3917static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3892{ 3918{
3893 int i, status = 0;
3894
3895 ql_link_off(qdev);
3896 3919
3897 /* Don't kill the reset worker thread if we 3920 /* Don't kill the reset worker thread if we
3898 * are in the process of recovery. 3921 * are in the process of recovery.
@@ -3904,6 +3927,15 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3904 cancel_delayed_work_sync(&qdev->mpi_idc_work); 3927 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3905 cancel_delayed_work_sync(&qdev->mpi_core_to_log); 3928 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3906 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 3929 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3930}
3931
3932static int ql_adapter_down(struct ql_adapter *qdev)
3933{
3934 int i, status = 0;
3935
3936 ql_link_off(qdev);
3937
3938 ql_cancel_all_work_sync(qdev);
3907 3939
3908 for (i = 0; i < qdev->rss_ring_count; i++) 3940 for (i = 0; i < qdev->rss_ring_count; i++)
3909 napi_disable(&qdev->rx_ring[i].napi); 3941 napi_disable(&qdev->rx_ring[i].napi);
@@ -3950,6 +3982,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3950 clear_bit(QL_PROMISCUOUS, &qdev->flags); 3982 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3951 qlge_set_multicast_list(qdev->ndev); 3983 qlge_set_multicast_list(qdev->ndev);
3952 3984
3985 /* Restore vlan setting. */
3986 qlge_restore_vlan(qdev);
3987
3953 ql_enable_interrupts(qdev); 3988 ql_enable_interrupts(qdev);
3954 ql_enable_all_completion_interrupts(qdev); 3989 ql_enable_all_completion_interrupts(qdev);
3955 netif_tx_start_all_queues(qdev->ndev); 3990 netif_tx_start_all_queues(qdev->ndev);
@@ -4124,7 +4159,7 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
4124 int i, status; 4159 int i, status;
4125 u32 lbq_buf_len; 4160 u32 lbq_buf_len;
4126 4161
4127 /* Wait for an oustanding reset to complete. */ 4162 /* Wait for an outstanding reset to complete. */
4128 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4163 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4129 int i = 3; 4164 int i = 3;
4130 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4165 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
@@ -4235,9 +4270,9 @@ static struct net_device_stats *qlge_get_stats(struct net_device
4235 return &ndev->stats; 4270 return &ndev->stats;
4236} 4271}
4237 4272
4238void qlge_set_multicast_list(struct net_device *ndev) 4273static void qlge_set_multicast_list(struct net_device *ndev)
4239{ 4274{
4240 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4275 struct ql_adapter *qdev = netdev_priv(ndev);
4241 struct netdev_hw_addr *ha; 4276 struct netdev_hw_addr *ha;
4242 int i, status; 4277 int i, status;
4243 4278
@@ -4253,7 +4288,7 @@ void qlge_set_multicast_list(struct net_device *ndev)
4253 if (ql_set_routing_reg 4288 if (ql_set_routing_reg
4254 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { 4289 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4255 netif_err(qdev, hw, qdev->ndev, 4290 netif_err(qdev, hw, qdev->ndev,
4256 "Failed to set promiscous mode.\n"); 4291 "Failed to set promiscuous mode.\n");
4257 } else { 4292 } else {
4258 set_bit(QL_PROMISCUOUS, &qdev->flags); 4293 set_bit(QL_PROMISCUOUS, &qdev->flags);
4259 } 4294 }
@@ -4263,7 +4298,7 @@ void qlge_set_multicast_list(struct net_device *ndev)
4263 if (ql_set_routing_reg 4298 if (ql_set_routing_reg
4264 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { 4299 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4265 netif_err(qdev, hw, qdev->ndev, 4300 netif_err(qdev, hw, qdev->ndev,
4266 "Failed to clear promiscous mode.\n"); 4301 "Failed to clear promiscuous mode.\n");
4267 } else { 4302 } else {
4268 clear_bit(QL_PROMISCUOUS, &qdev->flags); 4303 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4269 } 4304 }
@@ -4327,7 +4362,7 @@ exit:
4327 4362
4328static int qlge_set_mac_address(struct net_device *ndev, void *p) 4363static int qlge_set_mac_address(struct net_device *ndev, void *p)
4329{ 4364{
4330 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4365 struct ql_adapter *qdev = netdev_priv(ndev);
4331 struct sockaddr *addr = p; 4366 struct sockaddr *addr = p;
4332 int status; 4367 int status;
4333 4368
@@ -4350,7 +4385,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
4350 4385
4351static void qlge_tx_timeout(struct net_device *ndev) 4386static void qlge_tx_timeout(struct net_device *ndev)
4352{ 4387{
4353 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4388 struct ql_adapter *qdev = netdev_priv(ndev);
4354 ql_queue_asic_error(qdev); 4389 ql_queue_asic_error(qdev);
4355} 4390}
4356 4391
@@ -4384,12 +4419,12 @@ error:
4384 rtnl_unlock(); 4419 rtnl_unlock();
4385} 4420}
4386 4421
4387static struct nic_operations qla8012_nic_ops = { 4422static const struct nic_operations qla8012_nic_ops = {
4388 .get_flash = ql_get_8012_flash_params, 4423 .get_flash = ql_get_8012_flash_params,
4389 .port_initialize = ql_8012_port_initialize, 4424 .port_initialize = ql_8012_port_initialize,
4390}; 4425};
4391 4426
4392static struct nic_operations qla8000_nic_ops = { 4427static const struct nic_operations qla8000_nic_ops = {
4393 .get_flash = ql_get_8000_flash_params, 4428 .get_flash = ql_get_8000_flash_params,
4394 .port_initialize = ql_8000_port_initialize, 4429 .port_initialize = ql_8000_port_initialize,
4395}; 4430};
@@ -4593,7 +4628,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4593 /* 4628 /*
4594 * Set up the operating parameters. 4629 * Set up the operating parameters.
4595 */ 4630 */
4596 qdev->rx_csum = 1;
4597 qdev->workqueue = create_singlethread_workqueue(ndev->name); 4631 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4598 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); 4632 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4599 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); 4633 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
@@ -4602,6 +4636,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4602 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 4636 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4603 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); 4637 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4604 init_completion(&qdev->ide_completion); 4638 init_completion(&qdev->ide_completion);
4639 mutex_init(&qdev->mpi_mutex);
4605 4640
4606 if (!cards_found) { 4641 if (!cards_found) {
4607 dev_info(&pdev->dev, "%s\n", DRV_STRING); 4642 dev_info(&pdev->dev, "%s\n", DRV_STRING);
@@ -4666,15 +4701,11 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
4666 4701
4667 qdev = netdev_priv(ndev); 4702 qdev = netdev_priv(ndev);
4668 SET_NETDEV_DEV(ndev, &pdev->dev); 4703 SET_NETDEV_DEV(ndev, &pdev->dev);
4669 ndev->features = (0 4704 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4670 | NETIF_F_IP_CSUM 4705 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4671 | NETIF_F_SG 4706 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4672 | NETIF_F_TSO 4707 ndev->features = ndev->hw_features |
4673 | NETIF_F_TSO6 4708 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4674 | NETIF_F_TSO_ECN
4675 | NETIF_F_HW_VLAN_TX
4676 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4677 ndev->features |= NETIF_F_GRO;
4678 4709
4679 if (test_bit(QL_DMA64, &qdev->flags)) 4710 if (test_bit(QL_DMA64, &qdev->flags))
4680 ndev->features |= NETIF_F_HIGHDMA; 4711 ndev->features |= NETIF_F_HIGHDMA;
@@ -4726,6 +4757,7 @@ static void __devexit qlge_remove(struct pci_dev *pdev)
4726 struct net_device *ndev = pci_get_drvdata(pdev); 4757 struct net_device *ndev = pci_get_drvdata(pdev);
4727 struct ql_adapter *qdev = netdev_priv(ndev); 4758 struct ql_adapter *qdev = netdev_priv(ndev);
4728 del_timer_sync(&qdev->timer); 4759 del_timer_sync(&qdev->timer);
4760 ql_cancel_all_work_sync(qdev);
4729 unregister_netdev(ndev); 4761 unregister_netdev(ndev);
4730 ql_release_all(pdev); 4762 ql_release_all(pdev);
4731 pci_disable_device(pdev); 4763 pci_disable_device(pdev);
@@ -4745,13 +4777,7 @@ static void ql_eeh_close(struct net_device *ndev)
4745 4777
4746 /* Disabling the timer */ 4778 /* Disabling the timer */
4747 del_timer_sync(&qdev->timer); 4779 del_timer_sync(&qdev->timer);
4748 if (test_bit(QL_ADAPTER_UP, &qdev->flags)) 4780 ql_cancel_all_work_sync(qdev);
4749 cancel_delayed_work_sync(&qdev->asic_reset_work);
4750 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4751 cancel_delayed_work_sync(&qdev->mpi_work);
4752 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4753 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4754 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4755 4781
4756 for (i = 0; i < qdev->rss_ring_count; i++) 4782 for (i = 0; i < qdev->rss_ring_count; i++)
4757 netif_napi_del(&qdev->rx_ring[i].napi); 4783 netif_napi_del(&qdev->rx_ring[i].napi);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index f84e8570c7cb..ff2bf8a4e247 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -534,6 +534,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
534 int status; 534 int status;
535 unsigned long count; 535 unsigned long count;
536 536
537 mutex_lock(&qdev->mpi_mutex);
537 538
538 /* Begin polled mode for MPI */ 539 /* Begin polled mode for MPI */
539 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 540 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -603,6 +604,7 @@ done:
603end: 604end:
604 /* End polled mode for MPI */ 605 /* End polled mode for MPI */
605 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 606 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
607 mutex_unlock(&qdev->mpi_mutex);
606 return status; 608 return status;
607} 609}
608 610
@@ -681,7 +683,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
681/* Send and ACK mailbox command to the firmware to 683/* Send and ACK mailbox command to the firmware to
682 * let it continue with the change. 684 * let it continue with the change.
683 */ 685 */
684int ql_mb_idc_ack(struct ql_adapter *qdev) 686static int ql_mb_idc_ack(struct ql_adapter *qdev)
685{ 687{
686 struct mbox_params mbc; 688 struct mbox_params mbc;
687 struct mbox_params *mbcp = &mbc; 689 struct mbox_params *mbcp = &mbc;
@@ -744,7 +746,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
744 return status; 746 return status;
745} 747}
746 748
747int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, 749static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
748 u32 size) 750 u32 size)
749{ 751{
750 int status = 0; 752 int status = 0;
@@ -1099,9 +1101,7 @@ int ql_wait_fifo_empty(struct ql_adapter *qdev)
1099static int ql_set_port_cfg(struct ql_adapter *qdev) 1101static int ql_set_port_cfg(struct ql_adapter *qdev)
1100{ 1102{
1101 int status; 1103 int status;
1102 rtnl_lock();
1103 status = ql_mb_set_port_cfg(qdev); 1104 status = ql_mb_set_port_cfg(qdev);
1104 rtnl_unlock();
1105 if (status) 1105 if (status)
1106 return status; 1106 return status;
1107 status = ql_idc_wait(qdev); 1107 status = ql_idc_wait(qdev);
@@ -1122,9 +1122,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
1122 container_of(work, struct ql_adapter, mpi_port_cfg_work.work); 1122 container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
1123 int status; 1123 int status;
1124 1124
1125 rtnl_lock();
1126 status = ql_mb_get_port_cfg(qdev); 1125 status = ql_mb_get_port_cfg(qdev);
1127 rtnl_unlock();
1128 if (status) { 1126 if (status) {
1129 netif_err(qdev, drv, qdev->ndev, 1127 netif_err(qdev, drv, qdev->ndev,
1130 "Bug: Failed to get port config data.\n"); 1128 "Bug: Failed to get port config data.\n");
@@ -1167,7 +1165,6 @@ void ql_mpi_idc_work(struct work_struct *work)
1167 u32 aen; 1165 u32 aen;
1168 int timeout; 1166 int timeout;
1169 1167
1170 rtnl_lock();
1171 aen = mbcp->mbox_out[1] >> 16; 1168 aen = mbcp->mbox_out[1] >> 16;
1172 timeout = (mbcp->mbox_out[1] >> 8) & 0xf; 1169 timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
1173 1170
@@ -1231,7 +1228,6 @@ void ql_mpi_idc_work(struct work_struct *work)
1231 } 1228 }
1232 break; 1229 break;
1233 } 1230 }
1234 rtnl_unlock();
1235} 1231}
1236 1232
1237void ql_mpi_work(struct work_struct *work) 1233void ql_mpi_work(struct work_struct *work)
@@ -1242,7 +1238,7 @@ void ql_mpi_work(struct work_struct *work)
1242 struct mbox_params *mbcp = &mbc; 1238 struct mbox_params *mbcp = &mbc;
1243 int err = 0; 1239 int err = 0;
1244 1240
1245 rtnl_lock(); 1241 mutex_lock(&qdev->mpi_mutex);
1246 /* Begin polled mode for MPI */ 1242 /* Begin polled mode for MPI */
1247 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 1243 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
1248 1244
@@ -1259,7 +1255,7 @@ void ql_mpi_work(struct work_struct *work)
1259 1255
1260 /* End polled mode for MPI */ 1256 /* End polled mode for MPI */
1261 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 1257 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
1262 rtnl_unlock(); 1258 mutex_unlock(&qdev->mpi_mutex);
1263 ql_enable_completion_interrupt(qdev, 0); 1259 ql_enable_completion_interrupt(qdev, 0);
1264} 1260}
1265 1261