diff options
Diffstat (limited to 'drivers/net/qlge/qlge_main.c')
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 37 |
1 files changed, 27 insertions, 10 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 3d1d7b6e55aa..8ea72dc60f79 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -898,6 +898,7 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
898 | lbq_desc->index); | 898 | lbq_desc->index); |
899 | lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); | 899 | lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); |
900 | if (lbq_desc->p.lbq_page == NULL) { | 900 | if (lbq_desc->p.lbq_page == NULL) { |
901 | rx_ring->lbq_clean_idx = clean_idx; | ||
901 | QPRINTK(qdev, RX_STATUS, ERR, | 902 | QPRINTK(qdev, RX_STATUS, ERR, |
902 | "Couldn't get a page.\n"); | 903 | "Couldn't get a page.\n"); |
903 | return; | 904 | return; |
@@ -907,6 +908,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
907 | 0, PAGE_SIZE, | 908 | 0, PAGE_SIZE, |
908 | PCI_DMA_FROMDEVICE); | 909 | PCI_DMA_FROMDEVICE); |
909 | if (pci_dma_mapping_error(qdev->pdev, map)) { | 910 | if (pci_dma_mapping_error(qdev->pdev, map)) { |
911 | rx_ring->lbq_clean_idx = clean_idx; | ||
912 | put_page(lbq_desc->p.lbq_page); | ||
913 | lbq_desc->p.lbq_page = NULL; | ||
910 | QPRINTK(qdev, RX_STATUS, ERR, | 914 | QPRINTK(qdev, RX_STATUS, ERR, |
911 | "PCI mapping failed.\n"); | 915 | "PCI mapping failed.\n"); |
912 | return; | 916 | return; |
@@ -968,6 +972,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
968 | if (pci_dma_mapping_error(qdev->pdev, map)) { | 972 | if (pci_dma_mapping_error(qdev->pdev, map)) { |
969 | QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); | 973 | QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); |
970 | rx_ring->sbq_clean_idx = clean_idx; | 974 | rx_ring->sbq_clean_idx = clean_idx; |
975 | dev_kfree_skb_any(sbq_desc->p.skb); | ||
976 | sbq_desc->p.skb = NULL; | ||
971 | return; | 977 | return; |
972 | } | 978 | } |
973 | pci_unmap_addr_set(sbq_desc, mapaddr, map); | 979 | pci_unmap_addr_set(sbq_desc, mapaddr, map); |
@@ -1449,12 +1455,12 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, | |||
1449 | if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { | 1455 | if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { |
1450 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1456 | QPRINTK(qdev, RX_STATUS, DEBUG, |
1451 | "Passing a VLAN packet upstream.\n"); | 1457 | "Passing a VLAN packet upstream.\n"); |
1452 | vlan_hwaccel_rx(skb, qdev->vlgrp, | 1458 | vlan_hwaccel_receive_skb(skb, qdev->vlgrp, |
1453 | le16_to_cpu(ib_mac_rsp->vlan_id)); | 1459 | le16_to_cpu(ib_mac_rsp->vlan_id)); |
1454 | } else { | 1460 | } else { |
1455 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1461 | QPRINTK(qdev, RX_STATUS, DEBUG, |
1456 | "Passing a normal packet upstream.\n"); | 1462 | "Passing a normal packet upstream.\n"); |
1457 | netif_rx(skb); | 1463 | netif_receive_skb(skb); |
1458 | } | 1464 | } |
1459 | } | 1465 | } |
1460 | 1466 | ||
@@ -1511,6 +1517,11 @@ void ql_queue_asic_error(struct ql_adapter *qdev) | |||
1511 | netif_stop_queue(qdev->ndev); | 1517 | netif_stop_queue(qdev->ndev); |
1512 | netif_carrier_off(qdev->ndev); | 1518 | netif_carrier_off(qdev->ndev); |
1513 | ql_disable_interrupts(qdev); | 1519 | ql_disable_interrupts(qdev); |
1520 | /* Clear adapter up bit to signal the recovery | ||
1521 | * process that it shouldn't kill the reset worker | ||
1522 | * thread | ||
1523 | */ | ||
1524 | clear_bit(QL_ADAPTER_UP, &qdev->flags); | ||
1514 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); | 1525 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); |
1515 | } | 1526 | } |
1516 | 1527 | ||
@@ -1927,10 +1938,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||
1927 | tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; | 1938 | tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; |
1928 | mac_iocb_ptr = tx_ring_desc->queue_entry; | 1939 | mac_iocb_ptr = tx_ring_desc->queue_entry; |
1929 | memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); | 1940 | memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); |
1930 | if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) { | ||
1931 | QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n"); | ||
1932 | return NETDEV_TX_BUSY; | ||
1933 | } | ||
1934 | 1941 | ||
1935 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; | 1942 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; |
1936 | mac_iocb_ptr->tid = tx_ring_desc->index; | 1943 | mac_iocb_ptr->tid = tx_ring_desc->index; |
@@ -1956,6 +1963,12 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||
1956 | ql_hw_csum_setup(skb, | 1963 | ql_hw_csum_setup(skb, |
1957 | (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); | 1964 | (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); |
1958 | } | 1965 | } |
1966 | if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != | ||
1967 | NETDEV_TX_OK) { | ||
1968 | QPRINTK(qdev, TX_QUEUED, ERR, | ||
1969 | "Could not map the segments.\n"); | ||
1970 | return NETDEV_TX_BUSY; | ||
1971 | } | ||
1959 | QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); | 1972 | QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); |
1960 | tx_ring->prod_idx++; | 1973 | tx_ring->prod_idx++; |
1961 | if (tx_ring->prod_idx == tx_ring->wq_len) | 1974 | if (tx_ring->prod_idx == tx_ring->wq_len) |
@@ -2873,8 +2886,8 @@ static int ql_start_rss(struct ql_adapter *qdev) | |||
2873 | /* | 2886 | /* |
2874 | * Fill out the Indirection Table. | 2887 | * Fill out the Indirection Table. |
2875 | */ | 2888 | */ |
2876 | for (i = 0; i < 32; i++) | 2889 | for (i = 0; i < 256; i++) |
2877 | hash_id[i] = i & 1; | 2890 | hash_id[i] = i & (qdev->rss_ring_count - 1); |
2878 | 2891 | ||
2879 | /* | 2892 | /* |
2880 | * Random values for the IPv6 and IPv4 Hash Keys. | 2893 | * Random values for the IPv6 and IPv4 Hash Keys. |
@@ -3100,7 +3113,11 @@ static int ql_adapter_down(struct ql_adapter *qdev) | |||
3100 | netif_stop_queue(ndev); | 3113 | netif_stop_queue(ndev); |
3101 | netif_carrier_off(ndev); | 3114 | netif_carrier_off(ndev); |
3102 | 3115 | ||
3103 | cancel_delayed_work_sync(&qdev->asic_reset_work); | 3116 | /* Don't kill the reset worker thread if we |
3117 | * are in the process of recovery. | ||
3118 | */ | ||
3119 | if (test_bit(QL_ADAPTER_UP, &qdev->flags)) | ||
3120 | cancel_delayed_work_sync(&qdev->asic_reset_work); | ||
3104 | cancel_delayed_work_sync(&qdev->mpi_reset_work); | 3121 | cancel_delayed_work_sync(&qdev->mpi_reset_work); |
3105 | cancel_delayed_work_sync(&qdev->mpi_work); | 3122 | cancel_delayed_work_sync(&qdev->mpi_work); |
3106 | 3123 | ||
@@ -3501,7 +3518,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) | |||
3501 | static void qlge_tx_timeout(struct net_device *ndev) | 3518 | static void qlge_tx_timeout(struct net_device *ndev) |
3502 | { | 3519 | { |
3503 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | 3520 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); |
3504 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); | 3521 | ql_queue_asic_error(qdev); |
3505 | } | 3522 | } |
3506 | 3523 | ||
3507 | static void ql_asic_reset_work(struct work_struct *work) | 3524 | static void ql_asic_reset_work(struct work_struct *work) |