diff options
author | James Morris <jmorris@namei.org> | 2009-03-23 19:52:46 -0400 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2009-03-23 19:52:46 -0400 |
commit | 703a3cd72817e99201cef84a8a7aecc60b2b3581 (patch) | |
tree | 3e943755178ff410694722bb031f523136fbc432 /drivers/net/qlge/qlge_main.c | |
parent | df7f54c012b92ec93d56b68547351dcdf8a163d3 (diff) | |
parent | 8e0ee43bc2c3e19db56a4adaa9a9b04ce885cd84 (diff) |
Merge branch 'master' into next
Diffstat (limited to 'drivers/net/qlge/qlge_main.c')
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 94 |
1 files changed, 65 insertions, 29 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 3d1d7b6e55a..91191f761fb 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -898,6 +898,7 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
898 | lbq_desc->index); | 898 | lbq_desc->index); |
899 | lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); | 899 | lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); |
900 | if (lbq_desc->p.lbq_page == NULL) { | 900 | if (lbq_desc->p.lbq_page == NULL) { |
901 | rx_ring->lbq_clean_idx = clean_idx; | ||
901 | QPRINTK(qdev, RX_STATUS, ERR, | 902 | QPRINTK(qdev, RX_STATUS, ERR, |
902 | "Couldn't get a page.\n"); | 903 | "Couldn't get a page.\n"); |
903 | return; | 904 | return; |
@@ -907,6 +908,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
907 | 0, PAGE_SIZE, | 908 | 0, PAGE_SIZE, |
908 | PCI_DMA_FROMDEVICE); | 909 | PCI_DMA_FROMDEVICE); |
909 | if (pci_dma_mapping_error(qdev->pdev, map)) { | 910 | if (pci_dma_mapping_error(qdev->pdev, map)) { |
911 | rx_ring->lbq_clean_idx = clean_idx; | ||
912 | put_page(lbq_desc->p.lbq_page); | ||
913 | lbq_desc->p.lbq_page = NULL; | ||
910 | QPRINTK(qdev, RX_STATUS, ERR, | 914 | QPRINTK(qdev, RX_STATUS, ERR, |
911 | "PCI mapping failed.\n"); | 915 | "PCI mapping failed.\n"); |
912 | return; | 916 | return; |
@@ -968,6 +972,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
968 | if (pci_dma_mapping_error(qdev->pdev, map)) { | 972 | if (pci_dma_mapping_error(qdev->pdev, map)) { |
969 | QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); | 973 | QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); |
970 | rx_ring->sbq_clean_idx = clean_idx; | 974 | rx_ring->sbq_clean_idx = clean_idx; |
975 | dev_kfree_skb_any(sbq_desc->p.skb); | ||
976 | sbq_desc->p.skb = NULL; | ||
971 | return; | 977 | return; |
972 | } | 978 | } |
973 | pci_unmap_addr_set(sbq_desc, mapaddr, map); | 979 | pci_unmap_addr_set(sbq_desc, mapaddr, map); |
@@ -1430,18 +1436,32 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, | |||
1430 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { | 1436 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { |
1431 | QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); | 1437 | QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); |
1432 | } | 1438 | } |
1433 | if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) { | 1439 | |
1434 | QPRINTK(qdev, RX_STATUS, ERR, | 1440 | skb->protocol = eth_type_trans(skb, ndev); |
1435 | "Bad checksum for this %s packet.\n", | 1441 | skb->ip_summed = CHECKSUM_NONE; |
1436 | ((ib_mac_rsp-> | 1442 | |
1437 | flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP")); | 1443 | /* If rx checksum is on, and there are no |
1438 | skb->ip_summed = CHECKSUM_NONE; | 1444 | * csum or frame errors. |
1439 | } else if (qdev->rx_csum && | 1445 | */ |
1440 | ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) || | 1446 | if (qdev->rx_csum && |
1441 | ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | 1447 | !(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) && |
1442 | !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) { | 1448 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { |
1443 | QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n"); | 1449 | /* TCP frame. */ |
1444 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1450 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { |
1451 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1452 | "TCP checksum done!\n"); | ||
1453 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1454 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | ||
1455 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { | ||
1456 | /* Unfragmented ipv4 UDP frame. */ | ||
1457 | struct iphdr *iph = (struct iphdr *) skb->data; | ||
1458 | if (!(iph->frag_off & | ||
1459 | cpu_to_be16(IP_MF|IP_OFFSET))) { | ||
1460 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1461 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1462 | "TCP checksum done!\n"); | ||
1463 | } | ||
1464 | } | ||
1445 | } | 1465 | } |
1446 | qdev->stats.rx_packets++; | 1466 | qdev->stats.rx_packets++; |
1447 | qdev->stats.rx_bytes += skb->len; | 1467 | qdev->stats.rx_bytes += skb->len; |
@@ -1449,12 +1469,12 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, | |||
1449 | if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { | 1469 | if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { |
1450 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1470 | QPRINTK(qdev, RX_STATUS, DEBUG, |
1451 | "Passing a VLAN packet upstream.\n"); | 1471 | "Passing a VLAN packet upstream.\n"); |
1452 | vlan_hwaccel_rx(skb, qdev->vlgrp, | 1472 | vlan_hwaccel_receive_skb(skb, qdev->vlgrp, |
1453 | le16_to_cpu(ib_mac_rsp->vlan_id)); | 1473 | le16_to_cpu(ib_mac_rsp->vlan_id)); |
1454 | } else { | 1474 | } else { |
1455 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1475 | QPRINTK(qdev, RX_STATUS, DEBUG, |
1456 | "Passing a normal packet upstream.\n"); | 1476 | "Passing a normal packet upstream.\n"); |
1457 | netif_rx(skb); | 1477 | netif_receive_skb(skb); |
1458 | } | 1478 | } |
1459 | } | 1479 | } |
1460 | 1480 | ||
@@ -1511,6 +1531,11 @@ void ql_queue_asic_error(struct ql_adapter *qdev) | |||
1511 | netif_stop_queue(qdev->ndev); | 1531 | netif_stop_queue(qdev->ndev); |
1512 | netif_carrier_off(qdev->ndev); | 1532 | netif_carrier_off(qdev->ndev); |
1513 | ql_disable_interrupts(qdev); | 1533 | ql_disable_interrupts(qdev); |
1534 | /* Clear adapter up bit to signal the recovery | ||
1535 | * process that it shouldn't kill the reset worker | ||
1536 | * thread | ||
1537 | */ | ||
1538 | clear_bit(QL_ADAPTER_UP, &qdev->flags); | ||
1514 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); | 1539 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); |
1515 | } | 1540 | } |
1516 | 1541 | ||
@@ -1916,6 +1941,9 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||
1916 | 1941 | ||
1917 | tx_ring = &qdev->tx_ring[tx_ring_idx]; | 1942 | tx_ring = &qdev->tx_ring[tx_ring_idx]; |
1918 | 1943 | ||
1944 | if (skb_padto(skb, ETH_ZLEN)) | ||
1945 | return NETDEV_TX_OK; | ||
1946 | |||
1919 | if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { | 1947 | if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { |
1920 | QPRINTK(qdev, TX_QUEUED, INFO, | 1948 | QPRINTK(qdev, TX_QUEUED, INFO, |
1921 | "%s: shutting down tx queue %d du to lack of resources.\n", | 1949 | "%s: shutting down tx queue %d du to lack of resources.\n", |
@@ -1927,10 +1955,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||
1927 | tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; | 1955 | tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; |
1928 | mac_iocb_ptr = tx_ring_desc->queue_entry; | 1956 | mac_iocb_ptr = tx_ring_desc->queue_entry; |
1929 | memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); | 1957 | memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); |
1930 | if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) { | ||
1931 | QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n"); | ||
1932 | return NETDEV_TX_BUSY; | ||
1933 | } | ||
1934 | 1958 | ||
1935 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; | 1959 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; |
1936 | mac_iocb_ptr->tid = tx_ring_desc->index; | 1960 | mac_iocb_ptr->tid = tx_ring_desc->index; |
@@ -1956,6 +1980,12 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||
1956 | ql_hw_csum_setup(skb, | 1980 | ql_hw_csum_setup(skb, |
1957 | (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); | 1981 | (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); |
1958 | } | 1982 | } |
1983 | if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != | ||
1984 | NETDEV_TX_OK) { | ||
1985 | QPRINTK(qdev, TX_QUEUED, ERR, | ||
1986 | "Could not map the segments.\n"); | ||
1987 | return NETDEV_TX_BUSY; | ||
1988 | } | ||
1959 | QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); | 1989 | QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); |
1960 | tx_ring->prod_idx++; | 1990 | tx_ring->prod_idx++; |
1961 | if (tx_ring->prod_idx == tx_ring->wq_len) | 1991 | if (tx_ring->prod_idx == tx_ring->wq_len) |
@@ -2873,8 +2903,8 @@ static int ql_start_rss(struct ql_adapter *qdev) | |||
2873 | /* | 2903 | /* |
2874 | * Fill out the Indirection Table. | 2904 | * Fill out the Indirection Table. |
2875 | */ | 2905 | */ |
2876 | for (i = 0; i < 32; i++) | 2906 | for (i = 0; i < 256; i++) |
2877 | hash_id[i] = i & 1; | 2907 | hash_id[i] = i & (qdev->rss_ring_count - 1); |
2878 | 2908 | ||
2879 | /* | 2909 | /* |
2880 | * Random values for the IPv6 and IPv4 Hash Keys. | 2910 | * Random values for the IPv6 and IPv4 Hash Keys. |
@@ -2957,9 +2987,9 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) | |||
2957 | mask = value << 16; | 2987 | mask = value << 16; |
2958 | ql_write32(qdev, SYS, mask | value); | 2988 | ql_write32(qdev, SYS, mask | value); |
2959 | 2989 | ||
2960 | /* Set the default queue. */ | 2990 | /* Set the default queue, and VLAN behavior. */ |
2961 | value = NIC_RCV_CFG_DFQ; | 2991 | value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; |
2962 | mask = NIC_RCV_CFG_DFQ_MASK; | 2992 | mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); |
2963 | ql_write32(qdev, NIC_RCV_CFG, (mask | value)); | 2993 | ql_write32(qdev, NIC_RCV_CFG, (mask | value)); |
2964 | 2994 | ||
2965 | /* Set the MPI interrupt to enabled. */ | 2995 | /* Set the MPI interrupt to enabled. */ |
@@ -3100,7 +3130,11 @@ static int ql_adapter_down(struct ql_adapter *qdev) | |||
3100 | netif_stop_queue(ndev); | 3130 | netif_stop_queue(ndev); |
3101 | netif_carrier_off(ndev); | 3131 | netif_carrier_off(ndev); |
3102 | 3132 | ||
3103 | cancel_delayed_work_sync(&qdev->asic_reset_work); | 3133 | /* Don't kill the reset worker thread if we |
3134 | * are in the process of recovery. | ||
3135 | */ | ||
3136 | if (test_bit(QL_ADAPTER_UP, &qdev->flags)) | ||
3137 | cancel_delayed_work_sync(&qdev->asic_reset_work); | ||
3104 | cancel_delayed_work_sync(&qdev->mpi_reset_work); | 3138 | cancel_delayed_work_sync(&qdev->mpi_reset_work); |
3105 | cancel_delayed_work_sync(&qdev->mpi_work); | 3139 | cancel_delayed_work_sync(&qdev->mpi_work); |
3106 | 3140 | ||
@@ -3132,6 +3166,11 @@ static int ql_adapter_down(struct ql_adapter *qdev) | |||
3132 | 3166 | ||
3133 | ql_tx_ring_clean(qdev); | 3167 | ql_tx_ring_clean(qdev); |
3134 | 3168 | ||
3169 | /* Call netif_napi_del() from common point. | ||
3170 | */ | ||
3171 | for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) | ||
3172 | netif_napi_del(&qdev->rx_ring[i].napi); | ||
3173 | |||
3135 | spin_lock(&qdev->hw_lock); | 3174 | spin_lock(&qdev->hw_lock); |
3136 | status = ql_adapter_reset(qdev); | 3175 | status = ql_adapter_reset(qdev); |
3137 | if (status) | 3176 | if (status) |
@@ -3501,7 +3540,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) | |||
3501 | static void qlge_tx_timeout(struct net_device *ndev) | 3540 | static void qlge_tx_timeout(struct net_device *ndev) |
3502 | { | 3541 | { |
3503 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | 3542 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); |
3504 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); | 3543 | ql_queue_asic_error(qdev); |
3505 | } | 3544 | } |
3506 | 3545 | ||
3507 | static void ql_asic_reset_work(struct work_struct *work) | 3546 | static void ql_asic_reset_work(struct work_struct *work) |
@@ -3836,7 +3875,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3836 | { | 3875 | { |
3837 | struct net_device *ndev = pci_get_drvdata(pdev); | 3876 | struct net_device *ndev = pci_get_drvdata(pdev); |
3838 | struct ql_adapter *qdev = netdev_priv(ndev); | 3877 | struct ql_adapter *qdev = netdev_priv(ndev); |
3839 | int err, i; | 3878 | int err; |
3840 | 3879 | ||
3841 | netif_device_detach(ndev); | 3880 | netif_device_detach(ndev); |
3842 | 3881 | ||
@@ -3846,9 +3885,6 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3846 | return err; | 3885 | return err; |
3847 | } | 3886 | } |
3848 | 3887 | ||
3849 | for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) | ||
3850 | netif_napi_del(&qdev->rx_ring[i].napi); | ||
3851 | |||
3852 | err = pci_save_state(pdev); | 3888 | err = pci_save_state(pdev); |
3853 | if (err) | 3889 | if (err) |
3854 | return err; | 3890 | return err; |