aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJon Mason <jon.mason@exar.com>2010-07-15 04:47:25 -0400
committerDavid S. Miller <davem@davemloft.net>2010-07-15 23:46:22 -0400
commit98f45da247c5b8023d4f3677d65f21b64692f543 (patch)
treec42c4fb4a38469888f49f8448d6bad0dbc677ed6 /drivers
parent7adf7d1b0d50075e252aa82505fb473af38c3f20 (diff)
vxge: NETIF_F_LLTX removal
NETIF_F_LLTX and it's usage of local transmit locks are depricated in favor of using the netdev queue's transmit lock. Remove the local lock and all references to it, and use the netdev queue transmit lock in the transmit completion handler. Signed-off-by: Jon Mason <jon.mason@exar.com> Signed-off-by: Ramkrishna Vepa <ramkrishna.vepa@exar.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/vxge/vxge-main.c149
-rw-r--r--drivers/net/vxge/vxge-main.h15
-rw-r--r--drivers/net/vxge/vxge-traffic.c4
3 files changed, 39 insertions, 129 deletions
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 66d914c1ccb..48f17321a66 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -87,7 +87,6 @@ static inline int is_vxge_card_up(struct vxgedev *vdev)
87 87
88static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) 88static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
89{ 89{
90 unsigned long flags = 0;
91 struct sk_buff **skb_ptr = NULL; 90 struct sk_buff **skb_ptr = NULL;
92 struct sk_buff **temp; 91 struct sk_buff **temp;
93#define NR_SKB_COMPLETED 128 92#define NR_SKB_COMPLETED 128
@@ -98,15 +97,16 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
98 more = 0; 97 more = 0;
99 skb_ptr = completed; 98 skb_ptr = completed;
100 99
101 if (spin_trylock_irqsave(&fifo->tx_lock, flags)) { 100 if (__netif_tx_trylock(fifo->txq)) {
102 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr, 101 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
103 NR_SKB_COMPLETED, &more); 102 NR_SKB_COMPLETED, &more);
104 spin_unlock_irqrestore(&fifo->tx_lock, flags); 103 __netif_tx_unlock(fifo->txq);
105 } 104 }
105
106 /* free SKBs */ 106 /* free SKBs */
107 for (temp = completed; temp != skb_ptr; temp++) 107 for (temp = completed; temp != skb_ptr; temp++)
108 dev_kfree_skb_irq(*temp); 108 dev_kfree_skb_irq(*temp);
109 } while (more) ; 109 } while (more);
110} 110}
111 111
112static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) 112static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
@@ -131,53 +131,6 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
131} 131}
132 132
133/* 133/*
134 * MultiQ manipulation helper functions
135 */
136static inline int vxge_netif_queue_stopped(struct vxge_fifo *fifo)
137{
138 struct net_device *dev = fifo->ndev;
139 struct netdev_queue *txq = NULL;
140 int vpath_no = fifo->driver_id;
141 int ret = 0;
142
143 if (fifo->tx_steering_type)
144 txq = netdev_get_tx_queue(dev, vpath_no);
145 else
146 txq = netdev_get_tx_queue(dev, 0);
147
148 ret = netif_tx_queue_stopped(txq);
149 return ret;
150}
151
152void vxge_stop_tx_queue(struct vxge_fifo *fifo)
153{
154 struct net_device *dev = fifo->ndev;
155 struct netdev_queue *txq = NULL;
156
157 if (fifo->tx_steering_type)
158 txq = netdev_get_tx_queue(dev, fifo->driver_id);
159 else
160 txq = netdev_get_tx_queue(dev, 0);
161
162 netif_tx_stop_queue(txq);
163}
164
165void vxge_wake_tx_queue(struct vxge_fifo *fifo)
166{
167 struct net_device *dev = fifo->ndev;
168 struct netdev_queue *txq = NULL;
169 int vpath_no = fifo->driver_id;
170
171 if (fifo->tx_steering_type)
172 txq = netdev_get_tx_queue(dev, vpath_no);
173 else
174 txq = netdev_get_tx_queue(dev, 0);
175
176 if (netif_tx_queue_stopped(txq))
177 netif_tx_wake_queue(txq);
178}
179
180/*
181 * vxge_callback_link_up 134 * vxge_callback_link_up
182 * 135 *
183 * This function is called during interrupt context to notify link up state 136 * This function is called during interrupt context to notify link up state
@@ -650,7 +603,8 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
650 &dtr, &t_code) == VXGE_HW_OK); 603 &dtr, &t_code) == VXGE_HW_OK);
651 604
652 *skb_ptr = done_skb; 605 *skb_ptr = done_skb;
653 vxge_wake_tx_queue(fifo); 606 if (netif_tx_queue_stopped(fifo->txq))
607 netif_tx_wake_queue(fifo->txq);
654 608
655 vxge_debug_entryexit(VXGE_TRACE, 609 vxge_debug_entryexit(VXGE_TRACE,
656 "%s: %s:%d Exiting...", 610 "%s: %s:%d Exiting...",
@@ -659,8 +613,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
659} 613}
660 614
661/* select a vpath to transmit the packet */ 615/* select a vpath to transmit the packet */
662static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb, 616static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
663 int *do_lock)
664{ 617{
665 u16 queue_len, counter = 0; 618 u16 queue_len, counter = 0;
666 if (skb->protocol == htons(ETH_P_IP)) { 619 if (skb->protocol == htons(ETH_P_IP)) {
@@ -679,12 +632,6 @@ static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
679 vdev->vpath_selector[queue_len - 1]; 632 vdev->vpath_selector[queue_len - 1];
680 if (counter >= queue_len) 633 if (counter >= queue_len)
681 counter = queue_len - 1; 634 counter = queue_len - 1;
682
683 if (ip->protocol == IPPROTO_UDP) {
684#ifdef NETIF_F_LLTX
685 *do_lock = 0;
686#endif
687 }
688 } 635 }
689 } 636 }
690 return counter; 637 return counter;
@@ -781,8 +728,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
781 * 728 *
782 * This function is the Tx entry point of the driver. Neterion NIC supports 729 * This function is the Tx entry point of the driver. Neterion NIC supports
783 * certain protocol assist features on Tx side, namely CSO, S/G, LSO. 730 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
784 * NOTE: when device cant queue the pkt, just the trans_start variable will
785 * not be upadted.
786*/ 731*/
787static netdev_tx_t 732static netdev_tx_t
788vxge_xmit(struct sk_buff *skb, struct net_device *dev) 733vxge_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -799,9 +744,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
799 struct vxge_tx_priv *txdl_priv = NULL; 744 struct vxge_tx_priv *txdl_priv = NULL;
800 struct __vxge_hw_fifo *fifo_hw; 745 struct __vxge_hw_fifo *fifo_hw;
801 int offload_type; 746 int offload_type;
802 unsigned long flags = 0;
803 int vpath_no = 0; 747 int vpath_no = 0;
804 int do_spin_tx_lock = 1;
805 748
806 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 749 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
807 dev->name, __func__, __LINE__); 750 dev->name, __func__, __LINE__);
@@ -837,7 +780,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
837 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) 780 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
838 vpath_no = skb_get_queue_mapping(skb); 781 vpath_no = skb_get_queue_mapping(skb);
839 else if (vdev->config.tx_steering_type == TX_PORT_STEERING) 782 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
840 vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock); 783 vpath_no = vxge_get_vpath_no(vdev, skb);
841 784
842 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no); 785 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
843 786
@@ -847,40 +790,29 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
847 fifo = &vdev->vpaths[vpath_no].fifo; 790 fifo = &vdev->vpaths[vpath_no].fifo;
848 fifo_hw = fifo->handle; 791 fifo_hw = fifo->handle;
849 792
850 if (do_spin_tx_lock) 793 if (netif_tx_queue_stopped(fifo->txq))
851 spin_lock_irqsave(&fifo->tx_lock, flags);
852 else {
853 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
854 return NETDEV_TX_LOCKED;
855 }
856
857 if (vxge_netif_queue_stopped(fifo)) {
858 spin_unlock_irqrestore(&fifo->tx_lock, flags);
859 return NETDEV_TX_BUSY; 794 return NETDEV_TX_BUSY;
860 }
861 795
862 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); 796 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
863 if (avail == 0) { 797 if (avail == 0) {
864 vxge_debug_tx(VXGE_ERR, 798 vxge_debug_tx(VXGE_ERR,
865 "%s: No free TXDs available", dev->name); 799 "%s: No free TXDs available", dev->name);
866 fifo->stats.txd_not_free++; 800 fifo->stats.txd_not_free++;
867 vxge_stop_tx_queue(fifo); 801 goto _exit0;
868 goto _exit2;
869 } 802 }
870 803
871 /* Last TXD? Stop tx queue to avoid dropping packets. TX 804 /* Last TXD? Stop tx queue to avoid dropping packets. TX
872 * completion will resume the queue. 805 * completion will resume the queue.
873 */ 806 */
874 if (avail == 1) 807 if (avail == 1)
875 vxge_stop_tx_queue(fifo); 808 netif_tx_stop_queue(fifo->txq);
876 809
877 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); 810 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
878 if (unlikely(status != VXGE_HW_OK)) { 811 if (unlikely(status != VXGE_HW_OK)) {
879 vxge_debug_tx(VXGE_ERR, 812 vxge_debug_tx(VXGE_ERR,
880 "%s: Out of descriptors .", dev->name); 813 "%s: Out of descriptors .", dev->name);
881 fifo->stats.txd_out_of_desc++; 814 fifo->stats.txd_out_of_desc++;
882 vxge_stop_tx_queue(fifo); 815 goto _exit0;
883 goto _exit2;
884 } 816 }
885 817
886 vxge_debug_tx(VXGE_TRACE, 818 vxge_debug_tx(VXGE_TRACE,
@@ -900,9 +832,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
900 832
901 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { 833 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
902 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 834 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
903 vxge_stop_tx_queue(fifo);
904 fifo->stats.pci_map_fail++; 835 fifo->stats.pci_map_fail++;
905 goto _exit2; 836 goto _exit0;
906 } 837 }
907 838
908 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr); 839 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
@@ -925,13 +856,12 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
925 if (!frag->size) 856 if (!frag->size)
926 continue; 857 continue;
927 858
928 dma_pointer = 859 dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
929 (u64)pci_map_page(fifo->pdev, frag->page,
930 frag->page_offset, frag->size, 860 frag->page_offset, frag->size,
931 PCI_DMA_TODEVICE); 861 PCI_DMA_TODEVICE);
932 862
933 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) 863 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
934 goto _exit0; 864 goto _exit2;
935 vxge_debug_tx(VXGE_TRACE, 865 vxge_debug_tx(VXGE_TRACE,
936 "%s: %s:%d frag = %d dma_pointer = 0x%llx", 866 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
937 dev->name, __func__, __LINE__, i, 867 dev->name, __func__, __LINE__, i,
@@ -946,11 +876,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
946 offload_type = vxge_offload_type(skb); 876 offload_type = vxge_offload_type(skb);
947 877
948 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 878 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
949
950 int mss = vxge_tcp_mss(skb); 879 int mss = vxge_tcp_mss(skb);
951 if (mss) { 880 if (mss) {
952 vxge_debug_tx(VXGE_TRACE, 881 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
953 "%s: %s:%d mss = %d",
954 dev->name, __func__, __LINE__, mss); 882 dev->name, __func__, __LINE__, mss);
955 vxge_hw_fifo_txdl_mss_set(dtr, mss); 883 vxge_hw_fifo_txdl_mss_set(dtr, mss);
956 } else { 884 } else {
@@ -968,19 +896,13 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
968 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); 896 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
969 897
970 vxge_hw_fifo_txdl_post(fifo_hw, dtr); 898 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
971#ifdef NETIF_F_LLTX
972 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
973#endif
974 spin_unlock_irqrestore(&fifo->tx_lock, flags);
975 899
976 VXGE_COMPLETE_VPATH_TX(fifo);
977 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", 900 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
978 dev->name, __func__, __LINE__); 901 dev->name, __func__, __LINE__);
979 return NETDEV_TX_OK; 902 return NETDEV_TX_OK;
980 903
981_exit0: 904_exit2:
982 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); 905 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
983
984_exit1: 906_exit1:
985 j = 0; 907 j = 0;
986 frag = &skb_shinfo(skb)->frags[0]; 908 frag = &skb_shinfo(skb)->frags[0];
@@ -995,10 +917,9 @@ _exit1:
995 } 917 }
996 918
997 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 919 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
998_exit2: 920_exit0:
921 netif_tx_stop_queue(fifo->txq);
999 dev_kfree_skb(skb); 922 dev_kfree_skb(skb);
1000 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1001 VXGE_COMPLETE_VPATH_TX(fifo);
1002 923
1003 return NETDEV_TX_OK; 924 return NETDEV_TX_OK;
1004} 925}
@@ -1448,7 +1369,8 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1448 clear_bit(vp_id, &vdev->vp_reset); 1369 clear_bit(vp_id, &vdev->vp_reset);
1449 1370
1450 /* Start the vpath queue */ 1371 /* Start the vpath queue */
1451 vxge_wake_tx_queue(&vpath->fifo); 1372 if (netif_tx_queue_stopped(vpath->fifo.txq))
1373 netif_tx_wake_queue(vpath->fifo.txq);
1452 1374
1453 return ret; 1375 return ret;
1454} 1376}
@@ -2078,6 +2000,12 @@ int vxge_open_vpaths(struct vxgedev *vdev)
2078 vdev->config.tx_steering_type; 2000 vdev->config.tx_steering_type;
2079 vpath->fifo.ndev = vdev->ndev; 2001 vpath->fifo.ndev = vdev->ndev;
2080 vpath->fifo.pdev = vdev->pdev; 2002 vpath->fifo.pdev = vdev->pdev;
2003 if (vdev->config.tx_steering_type)
2004 vpath->fifo.txq =
2005 netdev_get_tx_queue(vdev->ndev, i);
2006 else
2007 vpath->fifo.txq =
2008 netdev_get_tx_queue(vdev->ndev, 0);
2081 vpath->fifo.indicate_max_pkts = 2009 vpath->fifo.indicate_max_pkts =
2082 vdev->config.fifo_indicate_max_pkts; 2010 vdev->config.fifo_indicate_max_pkts;
2083 vpath->ring.rx_vector_no = 0; 2011 vpath->ring.rx_vector_no = 0;
@@ -2564,7 +2492,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
2564 vxge_vpath_intr_disable(vdev, i); 2492 vxge_vpath_intr_disable(vdev, i);
2565 2493
2566 /* stop the queue for this vpath */ 2494 /* stop the queue for this vpath */
2567 vxge_stop_tx_queue(&vpath->fifo); 2495 netif_tx_stop_queue(vpath->fifo.txq);
2568 continue; 2496 continue;
2569 } 2497 }
2570 } 2498 }
@@ -2627,7 +2555,6 @@ vxge_open(struct net_device *dev)
2627 goto out1; 2555 goto out1;
2628 } 2556 }
2629 2557
2630
2631 if (vdev->config.intr_type != MSI_X) { 2558 if (vdev->config.intr_type != MSI_X) {
2632 netif_napi_add(dev, &vdev->napi, vxge_poll_inta, 2559 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2633 vdev->config.napi_weight); 2560 vdev->config.napi_weight);
@@ -3200,7 +3127,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3200 struct net_device *ndev; 3127 struct net_device *ndev;
3201 enum vxge_hw_status status = VXGE_HW_OK; 3128 enum vxge_hw_status status = VXGE_HW_OK;
3202 struct vxgedev *vdev; 3129 struct vxgedev *vdev;
3203 int i, ret = 0, no_of_queue = 1; 3130 int ret = 0, no_of_queue = 1;
3204 u64 stat; 3131 u64 stat;
3205 3132
3206 *vdev_out = NULL; 3133 *vdev_out = NULL;
@@ -3273,13 +3200,6 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3273 if (vdev->config.gro_enable) 3200 if (vdev->config.gro_enable)
3274 ndev->features |= NETIF_F_GRO; 3201 ndev->features |= NETIF_F_GRO;
3275 3202
3276#ifdef NETIF_F_LLTX
3277 ndev->features |= NETIF_F_LLTX;
3278#endif
3279
3280 for (i = 0; i < no_of_vpath; i++)
3281 spin_lock_init(&vdev->vpaths[i].fifo.tx_lock);
3282
3283 if (register_netdev(ndev)) { 3203 if (register_netdev(ndev)) {
3284 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3204 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3285 "%s: %s : device registration failed!", 3205 "%s: %s : device registration failed!",
@@ -3379,6 +3299,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3379{ 3299{
3380 struct net_device *dev = hldev->ndev; 3300 struct net_device *dev = hldev->ndev;
3381 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 3301 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
3302 struct vxge_vpath *vpath = NULL;
3382 int vpath_idx; 3303 int vpath_idx;
3383 3304
3384 vxge_debug_entryexit(vdev->level_trace, 3305 vxge_debug_entryexit(vdev->level_trace,
@@ -3389,9 +3310,11 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3389 */ 3310 */
3390 vdev->cric_err_event = type; 3311 vdev->cric_err_event = type;
3391 3312
3392 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) 3313 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3393 if (vdev->vpaths[vpath_idx].device_id == vp_id) 3314 vpath = &vdev->vpaths[vpath_idx];
3315 if (vpath->device_id == vp_id)
3394 break; 3316 break;
3317 }
3395 3318
3396 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) { 3319 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3397 if (type == VXGE_HW_EVENT_SLOT_FREEZE) { 3320 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
@@ -3428,8 +3351,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3428 vxge_vpath_intr_disable(vdev, vpath_idx); 3351 vxge_vpath_intr_disable(vdev, vpath_idx);
3429 3352
3430 /* stop the queue for this vpath */ 3353 /* stop the queue for this vpath */
3431 vxge_stop_tx_queue(&vdev->vpaths[vpath_idx]. 3354 netif_tx_stop_queue(vpath->fifo.txq);
3432 fifo);
3433 } 3355 }
3434 } 3356 }
3435 } 3357 }
@@ -4274,7 +4196,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4274 4196
4275 vdev->vpaths[j].is_configured = 1; 4197 vdev->vpaths[j].is_configured = 1;
4276 vdev->vpaths[j].device_id = i; 4198 vdev->vpaths[j].device_id = i;
4277 vdev->vpaths[j].fifo.driver_id = j;
4278 vdev->vpaths[j].ring.driver_id = j; 4199 vdev->vpaths[j].ring.driver_id = j;
4279 vdev->vpaths[j].vdev = vdev; 4200 vdev->vpaths[j].vdev = vdev;
4280 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; 4201 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index a3845822d46..5982396787f 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -217,17 +217,13 @@ struct vxge_fifo_stats {
217}; 217};
218 218
219struct vxge_fifo { 219struct vxge_fifo {
220 struct net_device *ndev; 220 struct net_device *ndev;
221 struct pci_dev *pdev; 221 struct pci_dev *pdev;
222 struct __vxge_hw_fifo *handle; 222 struct __vxge_hw_fifo *handle;
223 struct netdev_queue *txq;
223 224
224 /* The vpath id maintained in the driver -
225 * 0 to 'maximum_vpaths_in_function - 1'
226 */
227 int driver_id;
228 int tx_steering_type; 225 int tx_steering_type;
229 int indicate_max_pkts; 226 int indicate_max_pkts;
230 spinlock_t tx_lock;
231 227
232 /* Tx stats */ 228 /* Tx stats */
233 struct vxge_fifo_stats stats; 229 struct vxge_fifo_stats stats;
@@ -275,7 +271,6 @@ struct vxge_ring {
275} ____cacheline_aligned; 271} ____cacheline_aligned;
276 272
277struct vxge_vpath { 273struct vxge_vpath {
278
279 struct vxge_fifo fifo; 274 struct vxge_fifo fifo;
280 struct vxge_ring ring; 275 struct vxge_ring ring;
281 276
@@ -443,10 +438,6 @@ int vxge_open_vpaths(struct vxgedev *vdev);
443 438
444enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); 439enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
445 440
446void vxge_stop_tx_queue(struct vxge_fifo *fifo);
447
448void vxge_wake_tx_queue(struct vxge_fifo *fifo);
449
450enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, 441enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
451 struct macInfo *mac); 442 struct macInfo *mac);
452 443
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 6cc1dd79b40..1a7078304ad 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -2466,14 +2466,12 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2466 * the same. 2466 * the same.
2467 * @fifo: Handle to the fifo object used for non offload send 2467 * @fifo: Handle to the fifo object used for non offload send
2468 * 2468 *
2469 * The function polls the Tx for the completed descriptors and calls 2469 * The function polls the Tx for the completed descriptors and calls
2470 * the driver via supplied completion callback. 2470 * the driver via supplied completion callback.
2471 * 2471 *
2472 * Returns: VXGE_HW_OK, if the polling is completed successful. 2472 * Returns: VXGE_HW_OK, if the polling is completed successful.
2473 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed 2473 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2474 * descriptors available which are yet to be processed. 2474 * descriptors available which are yet to be processed.
2475 *
2476 * See also: vxge_hw_vpath_poll_tx().
2477 */ 2475 */
2478enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, 2476enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2479 struct sk_buff ***skb_ptr, int nr_skb, 2477 struct sk_buff ***skb_ptr, int nr_skb,