aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-02-15 02:12:00 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-15 02:12:00 -0500
commit5e30589521518bff36fd2638b3c3d69679c50436 (patch)
tree6ac985658a06b0787e4354d0d16d380ea9b16a5a /drivers/net/qlge
parentac178ef0ae9eb44fd527d87aa9b6394e05f56e1f (diff)
parentd2f8d7ee1a9b4650b4e43325b321801264f7c37a (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: drivers/net/wireless/iwlwifi/iwl-agn.c drivers/net/wireless/iwlwifi/iwl3945-base.c
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/qlge_main.c37
1 files changed, 27 insertions, 10 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 04bf2122264a..fd515afb1aa5 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -898,6 +898,7 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
898 lbq_desc->index); 898 lbq_desc->index);
899 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); 899 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
900 if (lbq_desc->p.lbq_page == NULL) { 900 if (lbq_desc->p.lbq_page == NULL) {
901 rx_ring->lbq_clean_idx = clean_idx;
901 QPRINTK(qdev, RX_STATUS, ERR, 902 QPRINTK(qdev, RX_STATUS, ERR,
902 "Couldn't get a page.\n"); 903 "Couldn't get a page.\n");
903 return; 904 return;
@@ -907,6 +908,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
907 0, PAGE_SIZE, 908 0, PAGE_SIZE,
908 PCI_DMA_FROMDEVICE); 909 PCI_DMA_FROMDEVICE);
909 if (pci_dma_mapping_error(qdev->pdev, map)) { 910 if (pci_dma_mapping_error(qdev->pdev, map)) {
911 rx_ring->lbq_clean_idx = clean_idx;
912 put_page(lbq_desc->p.lbq_page);
913 lbq_desc->p.lbq_page = NULL;
910 QPRINTK(qdev, RX_STATUS, ERR, 914 QPRINTK(qdev, RX_STATUS, ERR,
911 "PCI mapping failed.\n"); 915 "PCI mapping failed.\n");
912 return; 916 return;
@@ -968,6 +972,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
968 if (pci_dma_mapping_error(qdev->pdev, map)) { 972 if (pci_dma_mapping_error(qdev->pdev, map)) {
969 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); 973 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
970 rx_ring->sbq_clean_idx = clean_idx; 974 rx_ring->sbq_clean_idx = clean_idx;
975 dev_kfree_skb_any(sbq_desc->p.skb);
976 sbq_desc->p.skb = NULL;
971 return; 977 return;
972 } 978 }
973 pci_unmap_addr_set(sbq_desc, mapaddr, map); 979 pci_unmap_addr_set(sbq_desc, mapaddr, map);
@@ -1450,12 +1456,12 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1450 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { 1456 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1451 QPRINTK(qdev, RX_STATUS, DEBUG, 1457 QPRINTK(qdev, RX_STATUS, DEBUG,
1452 "Passing a VLAN packet upstream.\n"); 1458 "Passing a VLAN packet upstream.\n");
1453 vlan_hwaccel_rx(skb, qdev->vlgrp, 1459 vlan_hwaccel_receive_skb(skb, qdev->vlgrp,
1454 le16_to_cpu(ib_mac_rsp->vlan_id)); 1460 le16_to_cpu(ib_mac_rsp->vlan_id));
1455 } else { 1461 } else {
1456 QPRINTK(qdev, RX_STATUS, DEBUG, 1462 QPRINTK(qdev, RX_STATUS, DEBUG,
1457 "Passing a normal packet upstream.\n"); 1463 "Passing a normal packet upstream.\n");
1458 netif_rx(skb); 1464 netif_receive_skb(skb);
1459 } 1465 }
1460} 1466}
1461 1467
@@ -1512,6 +1518,11 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
1512 netif_stop_queue(qdev->ndev); 1518 netif_stop_queue(qdev->ndev);
1513 netif_carrier_off(qdev->ndev); 1519 netif_carrier_off(qdev->ndev);
1514 ql_disable_interrupts(qdev); 1520 ql_disable_interrupts(qdev);
1521 /* Clear adapter up bit to signal the recovery
1522 * process that it shouldn't kill the reset worker
1523 * thread
1524 */
1525 clear_bit(QL_ADAPTER_UP, &qdev->flags);
1515 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); 1526 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1516} 1527}
1517 1528
@@ -1928,10 +1939,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1928 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; 1939 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1929 mac_iocb_ptr = tx_ring_desc->queue_entry; 1940 mac_iocb_ptr = tx_ring_desc->queue_entry;
1930 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); 1941 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
1931 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
1932 QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
1933 return NETDEV_TX_BUSY;
1934 }
1935 1942
1936 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; 1943 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1937 mac_iocb_ptr->tid = tx_ring_desc->index; 1944 mac_iocb_ptr->tid = tx_ring_desc->index;
@@ -1957,6 +1964,12 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1957 ql_hw_csum_setup(skb, 1964 ql_hw_csum_setup(skb,
1958 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); 1965 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1959 } 1966 }
1967 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
1968 NETDEV_TX_OK) {
1969 QPRINTK(qdev, TX_QUEUED, ERR,
1970 "Could not map the segments.\n");
1971 return NETDEV_TX_BUSY;
1972 }
1960 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); 1973 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1961 tx_ring->prod_idx++; 1974 tx_ring->prod_idx++;
1962 if (tx_ring->prod_idx == tx_ring->wq_len) 1975 if (tx_ring->prod_idx == tx_ring->wq_len)
@@ -2874,8 +2887,8 @@ static int ql_start_rss(struct ql_adapter *qdev)
2874 /* 2887 /*
2875 * Fill out the Indirection Table. 2888 * Fill out the Indirection Table.
2876 */ 2889 */
2877 for (i = 0; i < 32; i++) 2890 for (i = 0; i < 256; i++)
2878 hash_id[i] = i & 1; 2891 hash_id[i] = i & (qdev->rss_ring_count - 1);
2879 2892
2880 /* 2893 /*
2881 * Random values for the IPv6 and IPv4 Hash Keys. 2894 * Random values for the IPv6 and IPv4 Hash Keys.
@@ -3101,7 +3114,11 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3101 netif_stop_queue(ndev); 3114 netif_stop_queue(ndev);
3102 netif_carrier_off(ndev); 3115 netif_carrier_off(ndev);
3103 3116
3104 cancel_delayed_work_sync(&qdev->asic_reset_work); 3117 /* Don't kill the reset worker thread if we
3118 * are in the process of recovery.
3119 */
3120 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3121 cancel_delayed_work_sync(&qdev->asic_reset_work);
3105 cancel_delayed_work_sync(&qdev->mpi_reset_work); 3122 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3106 cancel_delayed_work_sync(&qdev->mpi_work); 3123 cancel_delayed_work_sync(&qdev->mpi_work);
3107 3124
@@ -3502,7 +3519,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3502static void qlge_tx_timeout(struct net_device *ndev) 3519static void qlge_tx_timeout(struct net_device *ndev)
3503{ 3520{
3504 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3521 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3505 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); 3522 ql_queue_asic_error(qdev);
3506} 3523}
3507 3524
3508static void ql_asic_reset_work(struct work_struct *work) 3525static void ql_asic_reset_work(struct work_struct *work)