aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge/qlge_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge/qlge_main.c')
-rw-r--r--drivers/net/qlge/qlge_main.c164
1 files changed, 95 insertions, 69 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 5f89e83501f4..6b4ff970972b 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -38,6 +38,7 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/mm.h> 39#include <linux/mm.h>
40#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
41#include <linux/prefetch.h>
41#include <net/ip6_checksum.h> 42#include <net/ip6_checksum.h>
42 43
43#include "qlge.h" 44#include "qlge.h"
@@ -62,15 +63,15 @@ static const u32 default_msg =
62/* NETIF_MSG_PKTDATA | */ 63/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0; 64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64 65
65static int debug = 0x00007fff; /* defaults above */ 66static int debug = -1; /* defaults above */
66module_param(debug, int, 0); 67module_param(debug, int, 0664);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68 69
69#define MSIX_IRQ 0 70#define MSIX_IRQ 0
70#define MSI_IRQ 1 71#define MSI_IRQ 1
71#define LEG_IRQ 2 72#define LEG_IRQ 2
72static int qlge_irq_type = MSIX_IRQ; 73static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ); 74module_param(qlge_irq_type, int, 0664);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 75MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 76
76static int qlge_mpi_coredump; 77static int qlge_mpi_coredump;
@@ -94,6 +95,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
94 95
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); 96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96 97
98static int ql_wol(struct ql_adapter *qdev);
99static void qlge_set_multicast_list(struct net_device *ndev);
100
97/* This hardware semaphore causes exclusive access to 101/* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware, 102 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver. 103 * FCOE firmware and the FC driver.
@@ -657,7 +661,7 @@ static void ql_disable_interrupts(struct ql_adapter *qdev)
657/* If we're running with multiple MSI-X vectors then we enable on the fly. 661/* If we're running with multiple MSI-X vectors then we enable on the fly.
658 * Otherwise, we may have multiple outstanding workers and don't want to 662 * Otherwise, we may have multiple outstanding workers and don't want to
659 * enable until the last one finishes. In this case, the irq_cnt gets 663 * enable until the last one finishes. In this case, the irq_cnt gets
660 * incremented everytime we queue a worker and decremented everytime 664 * incremented every time we queue a worker and decremented every time
661 * a worker finishes. Once it hits zero we enable the interrupt. 665 * a worker finishes. Once it hits zero we enable the interrupt.
662 */ 666 */
663u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 667u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
@@ -1566,9 +1570,9 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1566 rx_ring->rx_packets++; 1570 rx_ring->rx_packets++;
1567 rx_ring->rx_bytes += skb->len; 1571 rx_ring->rx_bytes += skb->len;
1568 skb->protocol = eth_type_trans(skb, ndev); 1572 skb->protocol = eth_type_trans(skb, ndev);
1569 skb->ip_summed = CHECKSUM_NONE; 1573 skb_checksum_none_assert(skb);
1570 1574
1571 if (qdev->rx_csum && 1575 if ((ndev->features & NETIF_F_RXCSUM) &&
1572 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 1576 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1573 /* TCP frame. */ 1577 /* TCP frame. */
1574 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 1578 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -1676,12 +1680,12 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1676 rx_ring->rx_packets++; 1680 rx_ring->rx_packets++;
1677 rx_ring->rx_bytes += skb->len; 1681 rx_ring->rx_bytes += skb->len;
1678 skb->protocol = eth_type_trans(skb, ndev); 1682 skb->protocol = eth_type_trans(skb, ndev);
1679 skb->ip_summed = CHECKSUM_NONE; 1683 skb_checksum_none_assert(skb);
1680 1684
1681 /* If rx checksum is on, and there are no 1685 /* If rx checksum is on, and there are no
1682 * csum or frame errors. 1686 * csum or frame errors.
1683 */ 1687 */
1684 if (qdev->rx_csum && 1688 if ((ndev->features & NETIF_F_RXCSUM) &&
1685 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 1689 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1686 /* TCP frame. */ 1690 /* TCP frame. */
1687 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 1691 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -1996,12 +2000,12 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1996 } 2000 }
1997 2001
1998 skb->protocol = eth_type_trans(skb, ndev); 2002 skb->protocol = eth_type_trans(skb, ndev);
1999 skb->ip_summed = CHECKSUM_NONE; 2003 skb_checksum_none_assert(skb);
2000 2004
2001 /* If rx checksum is on, and there are no 2005 /* If rx checksum is on, and there are no
2002 * csum or frame errors. 2006 * csum or frame errors.
2003 */ 2007 */
2004 if (qdev->rx_csum && 2008 if ((ndev->features & NETIF_F_RXCSUM) &&
2005 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 2009 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2006 /* TCP frame. */ 2010 /* TCP frame. */
2007 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 2011 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -2148,6 +2152,10 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
2148 * thread 2152 * thread
2149 */ 2153 */
2150 clear_bit(QL_ADAPTER_UP, &qdev->flags); 2154 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2155 /* Set asic recovery bit to indicate reset process that we are
2156 * in fatal error recovery process rather than normal close
2157 */
2158 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2151 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); 2159 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2152} 2160}
2153 2161
@@ -2162,23 +2170,20 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2162 return; 2170 return;
2163 2171
2164 case CAM_LOOKUP_ERR_EVENT: 2172 case CAM_LOOKUP_ERR_EVENT:
2165 netif_err(qdev, link, qdev->ndev, 2173 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2166 "Multiple CAM hits lookup occurred.\n"); 2174 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2167 netif_err(qdev, drv, qdev->ndev,
2168 "This event shouldn't occur.\n");
2169 ql_queue_asic_error(qdev); 2175 ql_queue_asic_error(qdev);
2170 return; 2176 return;
2171 2177
2172 case SOFT_ECC_ERROR_EVENT: 2178 case SOFT_ECC_ERROR_EVENT:
2173 netif_err(qdev, rx_err, qdev->ndev, 2179 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2174 "Soft ECC error detected.\n");
2175 ql_queue_asic_error(qdev); 2180 ql_queue_asic_error(qdev);
2176 break; 2181 break;
2177 2182
2178 case PCI_ERR_ANON_BUF_RD: 2183 case PCI_ERR_ANON_BUF_RD:
2179 netif_err(qdev, rx_err, qdev->ndev, 2184 netdev_err(qdev->ndev, "PCI error occurred when reading "
2180 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n", 2185 "anonymous buffers from rx_ring %d.\n",
2181 ib_ae_rsp->q_id); 2186 ib_ae_rsp->q_id);
2182 ql_queue_asic_error(qdev); 2187 ql_queue_asic_error(qdev);
2183 break; 2188 break;
2184 2189
@@ -2222,10 +2227,11 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2222 ql_update_cq(rx_ring); 2227 ql_update_cq(rx_ring);
2223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2228 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224 } 2229 }
2230 if (!net_rsp)
2231 return 0;
2225 ql_write_cq_idx(rx_ring); 2232 ql_write_cq_idx(rx_ring);
2226 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; 2233 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2227 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) && 2234 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2228 net_rsp != NULL) {
2229 if (atomic_read(&tx_ring->queue_stopped) && 2235 if (atomic_read(&tx_ring->queue_stopped) &&
2230 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) 2236 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2231 /* 2237 /*
@@ -2381,6 +2387,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2381 2387
2382} 2388}
2383 2389
2390static void qlge_restore_vlan(struct ql_adapter *qdev)
2391{
2392 qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
2393
2394 if (qdev->vlgrp) {
2395 u16 vid;
2396 for (vid = 0; vid < VLAN_N_VID; vid++) {
2397 if (!vlan_group_get_device(qdev->vlgrp, vid))
2398 continue;
2399 qlge_vlan_rx_add_vid(qdev->ndev, vid);
2400 }
2401 }
2402}
2403
2384/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ 2404/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2385static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) 2405static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2386{ 2406{
@@ -2418,11 +2438,10 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2418 */ 2438 */
2419 if (var & STS_FE) { 2439 if (var & STS_FE) {
2420 ql_queue_asic_error(qdev); 2440 ql_queue_asic_error(qdev);
2421 netif_err(qdev, intr, qdev->ndev, 2441 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2422 "Got fatal error, STS = %x.\n", var);
2423 var = ql_read32(qdev, ERR_STS); 2442 var = ql_read32(qdev, ERR_STS);
2424 netif_err(qdev, intr, qdev->ndev, 2443 netdev_err(qdev->ndev, "Resetting chip. "
2425 "Resetting chip. Error Status Register = 0x%x\n", var); 2444 "Error Status Register = 0x%x\n", var);
2426 return IRQ_HANDLED; 2445 return IRQ_HANDLED;
2427 } 2446 }
2428 2447
@@ -2571,7 +2590,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2571 2590
2572 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); 2591 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2573 2592
2574 if (qdev->vlgrp && vlan_tx_tag_present(skb)) { 2593 if (vlan_tx_tag_present(skb)) {
2575 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, 2594 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2576 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); 2595 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2577 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; 2596 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
@@ -3281,7 +3300,7 @@ msi:
3281 * will service it. An example would be if there are 3300 * will service it. An example would be if there are
3282 * 2 vectors (so 2 RSS rings) and 8 TX completion rings. 3301 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3283 * This would mean that vector 0 would service RSS ring 0 3302 * This would mean that vector 0 would service RSS ring 0
3284 * and TX competion rings 0,1,2 and 3. Vector 1 would 3303 * and TX completion rings 0,1,2 and 3. Vector 1 would
3285 * service RSS ring 1 and TX completion rings 4,5,6 and 7. 3304 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3286 */ 3305 */
3287static void ql_set_tx_vect(struct ql_adapter *qdev) 3306static void ql_set_tx_vect(struct ql_adapter *qdev)
@@ -3530,12 +3549,13 @@ err_irq:
3530 3549
3531static int ql_start_rss(struct ql_adapter *qdev) 3550static int ql_start_rss(struct ql_adapter *qdev)
3532{ 3551{
3533 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 3552 static const u8 init_hash_seed[] = {
3534 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 3553 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3535 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 3554 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3536 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 3555 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3537 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 3556 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3538 0xbe, 0xac, 0x01, 0xfa}; 3557 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3558 };
3539 struct ricb *ricb = &qdev->ricb; 3559 struct ricb *ricb = &qdev->ricb;
3540 int status = 0; 3560 int status = 0;
3541 int i; 3561 int i;
@@ -3798,11 +3818,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3798 end_jiffies = jiffies + 3818 end_jiffies = jiffies +
3799 max((unsigned long)1, usecs_to_jiffies(30)); 3819 max((unsigned long)1, usecs_to_jiffies(30));
3800 3820
3801 /* Stop management traffic. */ 3821 /* Check if bit is set then skip the mailbox command and
3802 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); 3822 * clear the bit, else we are in normal reset process.
3823 */
3824 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3825 /* Stop management traffic. */
3826 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3803 3827
3804 /* Wait for the NIC and MGMNT FIFOs to empty. */ 3828 /* Wait for the NIC and MGMNT FIFOs to empty. */
3805 ql_wait_fifo_empty(qdev); 3829 ql_wait_fifo_empty(qdev);
3830 } else
3831 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3806 3832
3807 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); 3833 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3808 3834
@@ -3826,7 +3852,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3826 3852
3827static void ql_display_dev_info(struct net_device *ndev) 3853static void ql_display_dev_info(struct net_device *ndev)
3828{ 3854{
3829 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3855 struct ql_adapter *qdev = netdev_priv(ndev);
3830 3856
3831 netif_info(qdev, probe, qdev->ndev, 3857 netif_info(qdev, probe, qdev->ndev,
3832 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " 3858 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@ -3841,7 +3867,7 @@ static void ql_display_dev_info(struct net_device *ndev)
3841 "MAC address %pM\n", ndev->dev_addr); 3867 "MAC address %pM\n", ndev->dev_addr);
3842} 3868}
3843 3869
3844int ql_wol(struct ql_adapter *qdev) 3870static int ql_wol(struct ql_adapter *qdev)
3845{ 3871{
3846 int status = 0; 3872 int status = 0;
3847 u32 wol = MB_WOL_DISABLE; 3873 u32 wol = MB_WOL_DISABLE;
@@ -3888,11 +3914,8 @@ int ql_wol(struct ql_adapter *qdev)
3888 return status; 3914 return status;
3889} 3915}
3890 3916
3891static int ql_adapter_down(struct ql_adapter *qdev) 3917static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3892{ 3918{
3893 int i, status = 0;
3894
3895 ql_link_off(qdev);
3896 3919
3897 /* Don't kill the reset worker thread if we 3920 /* Don't kill the reset worker thread if we
3898 * are in the process of recovery. 3921 * are in the process of recovery.
@@ -3904,6 +3927,15 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3904 cancel_delayed_work_sync(&qdev->mpi_idc_work); 3927 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3905 cancel_delayed_work_sync(&qdev->mpi_core_to_log); 3928 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3906 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 3929 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3930}
3931
3932static int ql_adapter_down(struct ql_adapter *qdev)
3933{
3934 int i, status = 0;
3935
3936 ql_link_off(qdev);
3937
3938 ql_cancel_all_work_sync(qdev);
3907 3939
3908 for (i = 0; i < qdev->rss_ring_count; i++) 3940 for (i = 0; i < qdev->rss_ring_count; i++)
3909 napi_disable(&qdev->rx_ring[i].napi); 3941 napi_disable(&qdev->rx_ring[i].napi);
@@ -3950,6 +3982,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3950 clear_bit(QL_PROMISCUOUS, &qdev->flags); 3982 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3951 qlge_set_multicast_list(qdev->ndev); 3983 qlge_set_multicast_list(qdev->ndev);
3952 3984
3985 /* Restore vlan setting. */
3986 qlge_restore_vlan(qdev);
3987
3953 ql_enable_interrupts(qdev); 3988 ql_enable_interrupts(qdev);
3954 ql_enable_all_completion_interrupts(qdev); 3989 ql_enable_all_completion_interrupts(qdev);
3955 netif_tx_start_all_queues(qdev->ndev); 3990 netif_tx_start_all_queues(qdev->ndev);
@@ -4124,7 +4159,7 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
4124 int i, status; 4159 int i, status;
4125 u32 lbq_buf_len; 4160 u32 lbq_buf_len;
4126 4161
4127 /* Wait for an oustanding reset to complete. */ 4162 /* Wait for an outstanding reset to complete. */
4128 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4163 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4129 int i = 3; 4164 int i = 3;
4130 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4165 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
@@ -4235,9 +4270,9 @@ static struct net_device_stats *qlge_get_stats(struct net_device
4235 return &ndev->stats; 4270 return &ndev->stats;
4236} 4271}
4237 4272
4238void qlge_set_multicast_list(struct net_device *ndev) 4273static void qlge_set_multicast_list(struct net_device *ndev)
4239{ 4274{
4240 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4275 struct ql_adapter *qdev = netdev_priv(ndev);
4241 struct netdev_hw_addr *ha; 4276 struct netdev_hw_addr *ha;
4242 int i, status; 4277 int i, status;
4243 4278
@@ -4253,7 +4288,7 @@ void qlge_set_multicast_list(struct net_device *ndev)
4253 if (ql_set_routing_reg 4288 if (ql_set_routing_reg
4254 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { 4289 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4255 netif_err(qdev, hw, qdev->ndev, 4290 netif_err(qdev, hw, qdev->ndev,
4256 "Failed to set promiscous mode.\n"); 4291 "Failed to set promiscuous mode.\n");
4257 } else { 4292 } else {
4258 set_bit(QL_PROMISCUOUS, &qdev->flags); 4293 set_bit(QL_PROMISCUOUS, &qdev->flags);
4259 } 4294 }
@@ -4263,7 +4298,7 @@ void qlge_set_multicast_list(struct net_device *ndev)
4263 if (ql_set_routing_reg 4298 if (ql_set_routing_reg
4264 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { 4299 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4265 netif_err(qdev, hw, qdev->ndev, 4300 netif_err(qdev, hw, qdev->ndev,
4266 "Failed to clear promiscous mode.\n"); 4301 "Failed to clear promiscuous mode.\n");
4267 } else { 4302 } else {
4268 clear_bit(QL_PROMISCUOUS, &qdev->flags); 4303 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4269 } 4304 }
@@ -4327,7 +4362,7 @@ exit:
4327 4362
4328static int qlge_set_mac_address(struct net_device *ndev, void *p) 4363static int qlge_set_mac_address(struct net_device *ndev, void *p)
4329{ 4364{
4330 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4365 struct ql_adapter *qdev = netdev_priv(ndev);
4331 struct sockaddr *addr = p; 4366 struct sockaddr *addr = p;
4332 int status; 4367 int status;
4333 4368
@@ -4350,7 +4385,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
4350 4385
4351static void qlge_tx_timeout(struct net_device *ndev) 4386static void qlge_tx_timeout(struct net_device *ndev)
4352{ 4387{
4353 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4388 struct ql_adapter *qdev = netdev_priv(ndev);
4354 ql_queue_asic_error(qdev); 4389 ql_queue_asic_error(qdev);
4355} 4390}
4356 4391
@@ -4384,12 +4419,12 @@ error:
4384 rtnl_unlock(); 4419 rtnl_unlock();
4385} 4420}
4386 4421
4387static struct nic_operations qla8012_nic_ops = { 4422static const struct nic_operations qla8012_nic_ops = {
4388 .get_flash = ql_get_8012_flash_params, 4423 .get_flash = ql_get_8012_flash_params,
4389 .port_initialize = ql_8012_port_initialize, 4424 .port_initialize = ql_8012_port_initialize,
4390}; 4425};
4391 4426
4392static struct nic_operations qla8000_nic_ops = { 4427static const struct nic_operations qla8000_nic_ops = {
4393 .get_flash = ql_get_8000_flash_params, 4428 .get_flash = ql_get_8000_flash_params,
4394 .port_initialize = ql_8000_port_initialize, 4429 .port_initialize = ql_8000_port_initialize,
4395}; 4430};
@@ -4593,7 +4628,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4593 /* 4628 /*
4594 * Set up the operating parameters. 4629 * Set up the operating parameters.
4595 */ 4630 */
4596 qdev->rx_csum = 1;
4597 qdev->workqueue = create_singlethread_workqueue(ndev->name); 4631 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4598 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); 4632 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4599 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); 4633 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
@@ -4602,6 +4636,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4602 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 4636 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4603 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); 4637 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4604 init_completion(&qdev->ide_completion); 4638 init_completion(&qdev->ide_completion);
4639 mutex_init(&qdev->mpi_mutex);
4605 4640
4606 if (!cards_found) { 4641 if (!cards_found) {
4607 dev_info(&pdev->dev, "%s\n", DRV_STRING); 4642 dev_info(&pdev->dev, "%s\n", DRV_STRING);
@@ -4666,15 +4701,11 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
4666 4701
4667 qdev = netdev_priv(ndev); 4702 qdev = netdev_priv(ndev);
4668 SET_NETDEV_DEV(ndev, &pdev->dev); 4703 SET_NETDEV_DEV(ndev, &pdev->dev);
4669 ndev->features = (0 4704 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4670 | NETIF_F_IP_CSUM 4705 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4671 | NETIF_F_SG 4706 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4672 | NETIF_F_TSO 4707 ndev->features = ndev->hw_features |
4673 | NETIF_F_TSO6 4708 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4674 | NETIF_F_TSO_ECN
4675 | NETIF_F_HW_VLAN_TX
4676 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4677 ndev->features |= NETIF_F_GRO;
4678 4709
4679 if (test_bit(QL_DMA64, &qdev->flags)) 4710 if (test_bit(QL_DMA64, &qdev->flags))
4680 ndev->features |= NETIF_F_HIGHDMA; 4711 ndev->features |= NETIF_F_HIGHDMA;
@@ -4726,6 +4757,7 @@ static void __devexit qlge_remove(struct pci_dev *pdev)
4726 struct net_device *ndev = pci_get_drvdata(pdev); 4757 struct net_device *ndev = pci_get_drvdata(pdev);
4727 struct ql_adapter *qdev = netdev_priv(ndev); 4758 struct ql_adapter *qdev = netdev_priv(ndev);
4728 del_timer_sync(&qdev->timer); 4759 del_timer_sync(&qdev->timer);
4760 ql_cancel_all_work_sync(qdev);
4729 unregister_netdev(ndev); 4761 unregister_netdev(ndev);
4730 ql_release_all(pdev); 4762 ql_release_all(pdev);
4731 pci_disable_device(pdev); 4763 pci_disable_device(pdev);
@@ -4745,13 +4777,7 @@ static void ql_eeh_close(struct net_device *ndev)
4745 4777
4746 /* Disabling the timer */ 4778 /* Disabling the timer */
4747 del_timer_sync(&qdev->timer); 4779 del_timer_sync(&qdev->timer);
4748 if (test_bit(QL_ADAPTER_UP, &qdev->flags)) 4780 ql_cancel_all_work_sync(qdev);
4749 cancel_delayed_work_sync(&qdev->asic_reset_work);
4750 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4751 cancel_delayed_work_sync(&qdev->mpi_work);
4752 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4753 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4754 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4755 4781
4756 for (i = 0; i < qdev->rss_ring_count; i++) 4782 for (i = 0; i < qdev->rss_ring_count; i++)
4757 netif_napi_del(&qdev->rx_ring[i].napi); 4783 netif_napi_del(&qdev->rx_ring[i].napi);