diff options
author | Jing Huang <huangj@Brocade.COM> | 2012-04-04 01:43:18 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-04-04 18:19:17 -0400 |
commit | 01b54b1451853593739816a392485c4e2bee7dda (patch) | |
tree | 5b9d69d6da18ee0dc770f48e01e9139530743534 | |
parent | f96c1d24be47afcdc6376b03fe8f44a5250a202b (diff) |
bna: tx rx cleanup fix
This patch removes busy wait in tx/rx cleanup. bnad_cb_tx_cleanup() and
bnad_cb_rx_cleanup() functions are called from irq context, and currently
they do busy wait for the in-flight transmit or the currently executing napi
polling routine to complete. To fix the issue, we create a workqueue to defer
tx & rx cleanup processing, an in the tx rx cleanup handler, we will
wait respective in flight processing to complete, before freeing the buffers.
Signed-off-by: Jing Huang <huangj@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bnad.c | 201 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bnad.h | 4 |
2 files changed, 125 insertions, 80 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ff78f770dec9..032a306c0569 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -80,8 +80,6 @@ do { \ | |||
80 | (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ | 80 | (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ |
81 | } while (0) | 81 | } while (0) |
82 | 82 | ||
83 | #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ | ||
84 | |||
85 | static void | 83 | static void |
86 | bnad_add_to_list(struct bnad *bnad) | 84 | bnad_add_to_list(struct bnad *bnad) |
87 | { | 85 | { |
@@ -141,7 +139,8 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array, | |||
141 | 139 | ||
142 | for (j = 0; j < frag; j++) { | 140 | for (j = 0; j < frag; j++) { |
143 | dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), | 141 | dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), |
144 | skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); | 142 | skb_frag_size(&skb_shinfo(skb)->frags[j]), |
143 | DMA_TO_DEVICE); | ||
145 | dma_unmap_addr_set(&array[index], dma_addr, 0); | 144 | dma_unmap_addr_set(&array[index], dma_addr, 0); |
146 | BNA_QE_INDX_ADD(index, 1, depth); | 145 | BNA_QE_INDX_ADD(index, 1, depth); |
147 | } | 146 | } |
@@ -453,12 +452,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
453 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; | 452 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; |
454 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); | 453 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); |
455 | 454 | ||
456 | set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); | 455 | if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) |
457 | |||
458 | if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) { | ||
459 | clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); | ||
460 | return 0; | 456 | return 0; |
461 | } | ||
462 | 457 | ||
463 | prefetch(bnad->netdev); | 458 | prefetch(bnad->netdev); |
464 | BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, | 459 | BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, |
@@ -533,9 +528,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
533 | 528 | ||
534 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) | 529 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) |
535 | napi_gro_receive(&rx_ctrl->napi, skb); | 530 | napi_gro_receive(&rx_ctrl->napi, skb); |
536 | else { | 531 | else |
537 | netif_receive_skb(skb); | 532 | netif_receive_skb(skb); |
538 | } | ||
539 | 533 | ||
540 | next: | 534 | next: |
541 | cmpl->valid = 0; | 535 | cmpl->valid = 0; |
@@ -839,20 +833,9 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) | |||
839 | { | 833 | { |
840 | struct bnad_tx_info *tx_info = | 834 | struct bnad_tx_info *tx_info = |
841 | (struct bnad_tx_info *)tcb->txq->tx->priv; | 835 | (struct bnad_tx_info *)tcb->txq->tx->priv; |
842 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; | ||
843 | |||
844 | while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | ||
845 | cpu_relax(); | ||
846 | |||
847 | bnad_free_all_txbufs(bnad, tcb); | ||
848 | |||
849 | unmap_q->producer_index = 0; | ||
850 | unmap_q->consumer_index = 0; | ||
851 | |||
852 | smp_mb__before_clear_bit(); | ||
853 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | ||
854 | 836 | ||
855 | tx_info->tcb[tcb->id] = NULL; | 837 | tx_info->tcb[tcb->id] = NULL; |
838 | tcb->priv = NULL; | ||
856 | } | 839 | } |
857 | 840 | ||
858 | static void | 841 | static void |
@@ -866,12 +849,6 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) | |||
866 | } | 849 | } |
867 | 850 | ||
868 | static void | 851 | static void |
869 | bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb) | ||
870 | { | ||
871 | bnad_free_all_rxbufs(bnad, rcb); | ||
872 | } | ||
873 | |||
874 | static void | ||
875 | bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) | 852 | bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) |
876 | { | 853 | { |
877 | struct bnad_rx_info *rx_info = | 854 | struct bnad_rx_info *rx_info = |
@@ -916,7 +893,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) | |||
916 | { | 893 | { |
917 | struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; | 894 | struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; |
918 | struct bna_tcb *tcb; | 895 | struct bna_tcb *tcb; |
919 | struct bnad_unmap_q *unmap_q; | ||
920 | u32 txq_id; | 896 | u32 txq_id; |
921 | int i; | 897 | int i; |
922 | 898 | ||
@@ -926,23 +902,9 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) | |||
926 | continue; | 902 | continue; |
927 | txq_id = tcb->id; | 903 | txq_id = tcb->id; |
928 | 904 | ||
929 | unmap_q = tcb->unmap_q; | 905 | BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)); |
930 | |||
931 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) | ||
932 | continue; | ||
933 | |||
934 | while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | ||
935 | cpu_relax(); | ||
936 | |||
937 | bnad_free_all_txbufs(bnad, tcb); | ||
938 | |||
939 | unmap_q->producer_index = 0; | ||
940 | unmap_q->consumer_index = 0; | ||
941 | |||
942 | smp_mb__before_clear_bit(); | ||
943 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | ||
944 | |||
945 | set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); | 906 | set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); |
907 | BUG_ON(*(tcb->hw_consumer_index) != 0); | ||
946 | 908 | ||
947 | if (netif_carrier_ok(bnad->netdev)) { | 909 | if (netif_carrier_ok(bnad->netdev)) { |
948 | printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", | 910 | printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", |
@@ -963,6 +925,54 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) | |||
963 | } | 925 | } |
964 | } | 926 | } |
965 | 927 | ||
928 | /* | ||
929 | * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. | ||
930 | */ | ||
931 | static void | ||
932 | bnad_tx_cleanup(struct delayed_work *work) | ||
933 | { | ||
934 | struct bnad_tx_info *tx_info = | ||
935 | container_of(work, struct bnad_tx_info, tx_cleanup_work); | ||
936 | struct bnad *bnad = NULL; | ||
937 | struct bnad_unmap_q *unmap_q; | ||
938 | struct bna_tcb *tcb; | ||
939 | unsigned long flags; | ||
940 | uint32_t i, pending = 0; | ||
941 | |||
942 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { | ||
943 | tcb = tx_info->tcb[i]; | ||
944 | if (!tcb) | ||
945 | continue; | ||
946 | |||
947 | bnad = tcb->bnad; | ||
948 | |||
949 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { | ||
950 | pending++; | ||
951 | continue; | ||
952 | } | ||
953 | |||
954 | bnad_free_all_txbufs(bnad, tcb); | ||
955 | |||
956 | unmap_q = tcb->unmap_q; | ||
957 | unmap_q->producer_index = 0; | ||
958 | unmap_q->consumer_index = 0; | ||
959 | |||
960 | smp_mb__before_clear_bit(); | ||
961 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | ||
962 | } | ||
963 | |||
964 | if (pending) { | ||
965 | queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, | ||
966 | msecs_to_jiffies(1)); | ||
967 | return; | ||
968 | } | ||
969 | |||
970 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
971 | bna_tx_cleanup_complete(tx_info->tx); | ||
972 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
973 | } | ||
974 | |||
975 | |||
966 | static void | 976 | static void |
967 | bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) | 977 | bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) |
968 | { | 978 | { |
@@ -976,8 +986,7 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) | |||
976 | continue; | 986 | continue; |
977 | } | 987 | } |
978 | 988 | ||
979 | mdelay(BNAD_TXRX_SYNC_MDELAY); | 989 | queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0); |
980 | bna_tx_cleanup_complete(tx); | ||
981 | } | 990 | } |
982 | 991 | ||
983 | static void | 992 | static void |
@@ -1001,6 +1010,44 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) | |||
1001 | } | 1010 | } |
1002 | } | 1011 | } |
1003 | 1012 | ||
1013 | /* | ||
1014 | * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. | ||
1015 | */ | ||
1016 | static void | ||
1017 | bnad_rx_cleanup(void *work) | ||
1018 | { | ||
1019 | struct bnad_rx_info *rx_info = | ||
1020 | container_of(work, struct bnad_rx_info, rx_cleanup_work); | ||
1021 | struct bnad_rx_ctrl *rx_ctrl; | ||
1022 | struct bnad *bnad = NULL; | ||
1023 | unsigned long flags; | ||
1024 | uint32_t i; | ||
1025 | |||
1026 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { | ||
1027 | rx_ctrl = &rx_info->rx_ctrl[i]; | ||
1028 | |||
1029 | if (!rx_ctrl->ccb) | ||
1030 | continue; | ||
1031 | |||
1032 | bnad = rx_ctrl->ccb->bnad; | ||
1033 | |||
1034 | /* | ||
1035 | * Wait till the poll handler has exited | ||
1036 | * and nothing can be scheduled anymore | ||
1037 | */ | ||
1038 | napi_disable(&rx_ctrl->napi); | ||
1039 | |||
1040 | bnad_cq_cmpl_init(bnad, rx_ctrl->ccb); | ||
1041 | bnad_free_all_rxbufs(bnad, rx_ctrl->ccb->rcb[0]); | ||
1042 | if (rx_ctrl->ccb->rcb[1]) | ||
1043 | bnad_free_all_rxbufs(bnad, rx_ctrl->ccb->rcb[1]); | ||
1044 | } | ||
1045 | |||
1046 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
1047 | bna_rx_cleanup_complete(rx_info->rx); | ||
1048 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
1049 | } | ||
1050 | |||
1004 | static void | 1051 | static void |
1005 | bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) | 1052 | bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) |
1006 | { | 1053 | { |
@@ -1009,8 +1056,6 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) | |||
1009 | struct bnad_rx_ctrl *rx_ctrl; | 1056 | struct bnad_rx_ctrl *rx_ctrl; |
1010 | int i; | 1057 | int i; |
1011 | 1058 | ||
1012 | mdelay(BNAD_TXRX_SYNC_MDELAY); | ||
1013 | |||
1014 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { | 1059 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { |
1015 | rx_ctrl = &rx_info->rx_ctrl[i]; | 1060 | rx_ctrl = &rx_info->rx_ctrl[i]; |
1016 | ccb = rx_ctrl->ccb; | 1061 | ccb = rx_ctrl->ccb; |
@@ -1021,12 +1066,9 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) | |||
1021 | 1066 | ||
1022 | if (ccb->rcb[1]) | 1067 | if (ccb->rcb[1]) |
1023 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); | 1068 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); |
1024 | |||
1025 | while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags)) | ||
1026 | cpu_relax(); | ||
1027 | } | 1069 | } |
1028 | 1070 | ||
1029 | bna_rx_cleanup_complete(rx); | 1071 | queue_work(bnad->work_q, &rx_info->rx_cleanup_work); |
1030 | } | 1072 | } |
1031 | 1073 | ||
1032 | static void | 1074 | static void |
@@ -1046,13 +1088,12 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) | |||
1046 | if (!ccb) | 1088 | if (!ccb) |
1047 | continue; | 1089 | continue; |
1048 | 1090 | ||
1049 | bnad_cq_cmpl_init(bnad, ccb); | 1091 | napi_enable(&rx_ctrl->napi); |
1050 | 1092 | ||
1051 | for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { | 1093 | for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { |
1052 | rcb = ccb->rcb[j]; | 1094 | rcb = ccb->rcb[j]; |
1053 | if (!rcb) | 1095 | if (!rcb) |
1054 | continue; | 1096 | continue; |
1055 | bnad_free_all_rxbufs(bnad, rcb); | ||
1056 | 1097 | ||
1057 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); | 1098 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); |
1058 | set_bit(BNAD_RXQ_POST_OK, &rcb->flags); | 1099 | set_bit(BNAD_RXQ_POST_OK, &rcb->flags); |
@@ -1704,7 +1745,7 @@ poll_exit: | |||
1704 | 1745 | ||
1705 | #define BNAD_NAPI_POLL_QUOTA 64 | 1746 | #define BNAD_NAPI_POLL_QUOTA 64 |
1706 | static void | 1747 | static void |
1707 | bnad_napi_init(struct bnad *bnad, u32 rx_id) | 1748 | bnad_napi_add(struct bnad *bnad, u32 rx_id) |
1708 | { | 1749 | { |
1709 | struct bnad_rx_ctrl *rx_ctrl; | 1750 | struct bnad_rx_ctrl *rx_ctrl; |
1710 | int i; | 1751 | int i; |
@@ -1718,29 +1759,13 @@ bnad_napi_init(struct bnad *bnad, u32 rx_id) | |||
1718 | } | 1759 | } |
1719 | 1760 | ||
1720 | static void | 1761 | static void |
1721 | bnad_napi_enable(struct bnad *bnad, u32 rx_id) | 1762 | bnad_napi_delete(struct bnad *bnad, u32 rx_id) |
1722 | { | ||
1723 | struct bnad_rx_ctrl *rx_ctrl; | ||
1724 | int i; | ||
1725 | |||
1726 | /* Initialize & enable NAPI */ | ||
1727 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { | ||
1728 | rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; | ||
1729 | |||
1730 | napi_enable(&rx_ctrl->napi); | ||
1731 | } | ||
1732 | } | ||
1733 | |||
1734 | static void | ||
1735 | bnad_napi_disable(struct bnad *bnad, u32 rx_id) | ||
1736 | { | 1763 | { |
1737 | int i; | 1764 | int i; |
1738 | 1765 | ||
1739 | /* First disable and then clean up */ | 1766 | /* First disable and then clean up */ |
1740 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { | 1767 | for (i = 0; i < bnad->num_rxp_per_rx; i++) |
1741 | napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi); | ||
1742 | netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); | 1768 | netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); |
1743 | } | ||
1744 | } | 1769 | } |
1745 | 1770 | ||
1746 | /* Should be held with conf_lock held */ | 1771 | /* Should be held with conf_lock held */ |
@@ -1832,6 +1857,9 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) | |||
1832 | goto err_return; | 1857 | goto err_return; |
1833 | tx_info->tx = tx; | 1858 | tx_info->tx = tx; |
1834 | 1859 | ||
1860 | INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, | ||
1861 | (work_func_t)bnad_tx_cleanup); | ||
1862 | |||
1835 | /* Register ISR for the Tx object */ | 1863 | /* Register ISR for the Tx object */ |
1836 | if (intr_info->intr_type == BNA_INTR_T_MSIX) { | 1864 | if (intr_info->intr_type == BNA_INTR_T_MSIX) { |
1837 | err = bnad_tx_msix_register(bnad, tx_info, | 1865 | err = bnad_tx_msix_register(bnad, tx_info, |
@@ -1928,7 +1956,7 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) | |||
1928 | if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) | 1956 | if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) |
1929 | bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); | 1957 | bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); |
1930 | 1958 | ||
1931 | bnad_napi_disable(bnad, rx_id); | 1959 | bnad_napi_delete(bnad, rx_id); |
1932 | 1960 | ||
1933 | spin_lock_irqsave(&bnad->bna_lock, flags); | 1961 | spin_lock_irqsave(&bnad->bna_lock, flags); |
1934 | bna_rx_destroy(rx_info->rx); | 1962 | bna_rx_destroy(rx_info->rx); |
@@ -1952,7 +1980,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) | |||
1952 | struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; | 1980 | struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; |
1953 | static const struct bna_rx_event_cbfn rx_cbfn = { | 1981 | static const struct bna_rx_event_cbfn rx_cbfn = { |
1954 | .rcb_setup_cbfn = bnad_cb_rcb_setup, | 1982 | .rcb_setup_cbfn = bnad_cb_rcb_setup, |
1955 | .rcb_destroy_cbfn = bnad_cb_rcb_destroy, | 1983 | .rcb_destroy_cbfn = NULL, |
1956 | .ccb_setup_cbfn = bnad_cb_ccb_setup, | 1984 | .ccb_setup_cbfn = bnad_cb_ccb_setup, |
1957 | .ccb_destroy_cbfn = bnad_cb_ccb_destroy, | 1985 | .ccb_destroy_cbfn = bnad_cb_ccb_destroy, |
1958 | .rx_stall_cbfn = bnad_cb_rx_stall, | 1986 | .rx_stall_cbfn = bnad_cb_rx_stall, |
@@ -1998,11 +2026,14 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) | |||
1998 | rx_info->rx = rx; | 2026 | rx_info->rx = rx; |
1999 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 2027 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2000 | 2028 | ||
2029 | INIT_WORK(&rx_info->rx_cleanup_work, | ||
2030 | (work_func_t)(bnad_rx_cleanup)); | ||
2031 | |||
2001 | /* | 2032 | /* |
2002 | * Init NAPI, so that state is set to NAPI_STATE_SCHED, | 2033 | * Init NAPI, so that state is set to NAPI_STATE_SCHED, |
2003 | * so that IRQ handler cannot schedule NAPI at this point. | 2034 | * so that IRQ handler cannot schedule NAPI at this point. |
2004 | */ | 2035 | */ |
2005 | bnad_napi_init(bnad, rx_id); | 2036 | bnad_napi_add(bnad, rx_id); |
2006 | 2037 | ||
2007 | /* Register ISR for the Rx object */ | 2038 | /* Register ISR for the Rx object */ |
2008 | if (intr_info->intr_type == BNA_INTR_T_MSIX) { | 2039 | if (intr_info->intr_type == BNA_INTR_T_MSIX) { |
@@ -2028,9 +2059,6 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) | |||
2028 | bna_rx_enable(rx); | 2059 | bna_rx_enable(rx); |
2029 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 2060 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2030 | 2061 | ||
2031 | /* Enable scheduling of NAPI */ | ||
2032 | bnad_napi_enable(bnad, rx_id); | ||
2033 | |||
2034 | return 0; | 2062 | return 0; |
2035 | 2063 | ||
2036 | err_return: | 2064 | err_return: |
@@ -3129,6 +3157,7 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac) | |||
3129 | * 2. Setup netdev pointer in pci_dev | 3157 | * 2. Setup netdev pointer in pci_dev |
3130 | * 3. Initialze Tx free tasklet | 3158 | * 3. Initialze Tx free tasklet |
3131 | * 4. Initialize no. of TxQ & CQs & MSIX vectors | 3159 | * 4. Initialize no. of TxQ & CQs & MSIX vectors |
3160 | * 5. Initialize work queue. | ||
3132 | */ | 3161 | */ |
3133 | static int | 3162 | static int |
3134 | bnad_init(struct bnad *bnad, | 3163 | bnad_init(struct bnad *bnad, |
@@ -3174,6 +3203,12 @@ bnad_init(struct bnad *bnad, | |||
3174 | tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet, | 3203 | tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet, |
3175 | (unsigned long)bnad); | 3204 | (unsigned long)bnad); |
3176 | 3205 | ||
3206 | sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); | ||
3207 | bnad->work_q = create_singlethread_workqueue(bnad->wq_name); | ||
3208 | |||
3209 | if (!bnad->work_q) | ||
3210 | return -ENOMEM; | ||
3211 | |||
3177 | return 0; | 3212 | return 0; |
3178 | } | 3213 | } |
3179 | 3214 | ||
@@ -3185,6 +3220,12 @@ bnad_init(struct bnad *bnad, | |||
3185 | static void | 3220 | static void |
3186 | bnad_uninit(struct bnad *bnad) | 3221 | bnad_uninit(struct bnad *bnad) |
3187 | { | 3222 | { |
3223 | if (bnad->work_q) { | ||
3224 | flush_workqueue(bnad->work_q); | ||
3225 | destroy_workqueue(bnad->work_q); | ||
3226 | bnad->work_q = NULL; | ||
3227 | } | ||
3228 | |||
3188 | if (bnad->bar0) | 3229 | if (bnad->bar0) |
3189 | iounmap(bnad->bar0); | 3230 | iounmap(bnad->bar0); |
3190 | pci_set_drvdata(bnad->pcidev, NULL); | 3231 | pci_set_drvdata(bnad->pcidev, NULL); |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 55824d92699f..ff129aa7cb66 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h | |||
@@ -210,6 +210,7 @@ struct bnad_tx_info { | |||
210 | struct bna_tx *tx; /* 1:1 between tx_info & tx */ | 210 | struct bna_tx *tx; /* 1:1 between tx_info & tx */ |
211 | struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; | 211 | struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; |
212 | u32 tx_id; | 212 | u32 tx_id; |
213 | struct delayed_work tx_cleanup_work; | ||
213 | } ____cacheline_aligned; | 214 | } ____cacheline_aligned; |
214 | 215 | ||
215 | struct bnad_rx_info { | 216 | struct bnad_rx_info { |
@@ -217,6 +218,7 @@ struct bnad_rx_info { | |||
217 | 218 | ||
218 | struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; | 219 | struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; |
219 | u32 rx_id; | 220 | u32 rx_id; |
221 | struct work_struct rx_cleanup_work; | ||
220 | } ____cacheline_aligned; | 222 | } ____cacheline_aligned; |
221 | 223 | ||
222 | /* Unmap queues for Tx / Rx cleanup */ | 224 | /* Unmap queues for Tx / Rx cleanup */ |
@@ -319,6 +321,7 @@ struct bnad { | |||
319 | mac_t perm_addr; | 321 | mac_t perm_addr; |
320 | 322 | ||
321 | struct tasklet_struct tx_free_tasklet; | 323 | struct tasklet_struct tx_free_tasklet; |
324 | struct workqueue_struct *work_q; | ||
322 | 325 | ||
323 | /* Statistics */ | 326 | /* Statistics */ |
324 | struct bnad_stats stats; | 327 | struct bnad_stats stats; |
@@ -328,6 +331,7 @@ struct bnad { | |||
328 | char adapter_name[BNAD_NAME_LEN]; | 331 | char adapter_name[BNAD_NAME_LEN]; |
329 | char port_name[BNAD_NAME_LEN]; | 332 | char port_name[BNAD_NAME_LEN]; |
330 | char mbox_irq_name[BNAD_NAME_LEN]; | 333 | char mbox_irq_name[BNAD_NAME_LEN]; |
334 | char wq_name[BNAD_NAME_LEN]; | ||
331 | 335 | ||
332 | /* debugfs specific data */ | 336 | /* debugfs specific data */ |
333 | char *regdata; | 337 | char *regdata; |