diff options
author | Jon Mason <jon.mason@exar.com> | 2010-07-15 04:47:23 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-07-15 23:46:21 -0400 |
commit | d03848e057cb33ab4261264903b5ebee0738a8dc (patch) | |
tree | 5fc9f3516c3e037a0000ae9c8253d5442c48573e /drivers/net | |
parent | bb7a0bd600ac2e09a8747ef89e692a2967ed8c97 (diff) |
vxge: Remove queue_state references
Remove queue_state references, as they are no longer necessary.
Also, The driver needs to start/stop the queue regardless of which type
of steering is enabled. Remove checks for TX_MULTIQ_STEERING only and
start/stop for all steering types.
Signed-off-by: Jon Mason <jon.mason@exar.com>
Signed-off-by: Sreenivasa Honnur <sreenivasa.honnur@exar.com>
Signed-off-by: Ramkrishna Vepa <ramkrishna.vepa@exar.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/vxge/vxge-main.c | 118 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-main.h | 10 |
2 files changed, 42 insertions, 86 deletions
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index ed1786598c9e..e78703d9e381 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -133,75 +133,48 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) | |||
133 | /* | 133 | /* |
134 | * MultiQ manipulation helper functions | 134 | * MultiQ manipulation helper functions |
135 | */ | 135 | */ |
136 | void vxge_stop_all_tx_queue(struct vxgedev *vdev) | 136 | static inline int vxge_netif_queue_stopped(struct vxge_fifo *fifo) |
137 | { | 137 | { |
138 | int i; | 138 | struct net_device *dev = fifo->ndev; |
139 | struct net_device *dev = vdev->ndev; | 139 | struct netdev_queue *txq = NULL; |
140 | int vpath_no = fifo->driver_id; | ||
141 | int ret = 0; | ||
140 | 142 | ||
141 | if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { | 143 | if (fifo->tx_steering_type) |
142 | for (i = 0; i < vdev->no_of_vpath; i++) | 144 | txq = netdev_get_tx_queue(dev, vpath_no); |
143 | vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP; | 145 | else |
144 | } | 146 | txq = netdev_get_tx_queue(dev, 0); |
145 | netif_tx_stop_all_queues(dev); | 147 | |
148 | ret = netif_tx_queue_stopped(txq); | ||
149 | return ret; | ||
146 | } | 150 | } |
147 | 151 | ||
148 | void vxge_stop_tx_queue(struct vxge_fifo *fifo) | 152 | void vxge_stop_tx_queue(struct vxge_fifo *fifo) |
149 | { | 153 | { |
150 | struct net_device *dev = fifo->ndev; | 154 | struct net_device *dev = fifo->ndev; |
151 | |||
152 | struct netdev_queue *txq = NULL; | 155 | struct netdev_queue *txq = NULL; |
153 | if (fifo->tx_steering_type == TX_MULTIQ_STEERING) | 156 | |
157 | if (fifo->tx_steering_type) | ||
154 | txq = netdev_get_tx_queue(dev, fifo->driver_id); | 158 | txq = netdev_get_tx_queue(dev, fifo->driver_id); |
155 | else { | 159 | else |
156 | txq = netdev_get_tx_queue(dev, 0); | 160 | txq = netdev_get_tx_queue(dev, 0); |
157 | fifo->queue_state = VPATH_QUEUE_STOP; | ||
158 | } | ||
159 | 161 | ||
160 | netif_tx_stop_queue(txq); | 162 | netif_tx_stop_queue(txq); |
161 | } | 163 | } |
162 | 164 | ||
163 | void vxge_start_all_tx_queue(struct vxgedev *vdev) | 165 | void vxge_wake_tx_queue(struct vxge_fifo *fifo) |
164 | { | ||
165 | int i; | ||
166 | struct net_device *dev = vdev->ndev; | ||
167 | |||
168 | if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { | ||
169 | for (i = 0; i < vdev->no_of_vpath; i++) | ||
170 | vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START; | ||
171 | } | ||
172 | netif_tx_start_all_queues(dev); | ||
173 | } | ||
174 | |||
175 | static void vxge_wake_all_tx_queue(struct vxgedev *vdev) | ||
176 | { | ||
177 | int i; | ||
178 | struct net_device *dev = vdev->ndev; | ||
179 | |||
180 | if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { | ||
181 | for (i = 0; i < vdev->no_of_vpath; i++) | ||
182 | vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START; | ||
183 | } | ||
184 | netif_tx_wake_all_queues(dev); | ||
185 | } | ||
186 | |||
187 | void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb) | ||
188 | { | 166 | { |
189 | struct net_device *dev = fifo->ndev; | 167 | struct net_device *dev = fifo->ndev; |
190 | |||
191 | int vpath_no = fifo->driver_id; | ||
192 | struct netdev_queue *txq = NULL; | 168 | struct netdev_queue *txq = NULL; |
193 | if (fifo->tx_steering_type == TX_MULTIQ_STEERING) { | 169 | int vpath_no = fifo->driver_id; |
170 | |||
171 | if (fifo->tx_steering_type) | ||
194 | txq = netdev_get_tx_queue(dev, vpath_no); | 172 | txq = netdev_get_tx_queue(dev, vpath_no); |
195 | if (netif_tx_queue_stopped(txq)) | 173 | else |
196 | netif_tx_wake_queue(txq); | ||
197 | } else { | ||
198 | txq = netdev_get_tx_queue(dev, 0); | 174 | txq = netdev_get_tx_queue(dev, 0); |
199 | if (fifo->queue_state == VPATH_QUEUE_STOP) | 175 | |
200 | if (netif_tx_queue_stopped(txq)) { | 176 | if (netif_tx_queue_stopped(txq)) |
201 | fifo->queue_state = VPATH_QUEUE_START; | 177 | netif_tx_wake_queue(txq); |
202 | netif_tx_wake_queue(txq); | ||
203 | } | ||
204 | } | ||
205 | } | 178 | } |
206 | 179 | ||
207 | /* | 180 | /* |
@@ -222,7 +195,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev) | |||
222 | vdev->stats.link_up++; | 195 | vdev->stats.link_up++; |
223 | 196 | ||
224 | netif_carrier_on(vdev->ndev); | 197 | netif_carrier_on(vdev->ndev); |
225 | vxge_wake_all_tx_queue(vdev); | 198 | netif_tx_wake_all_queues(vdev->ndev); |
226 | 199 | ||
227 | vxge_debug_entryexit(VXGE_TRACE, | 200 | vxge_debug_entryexit(VXGE_TRACE, |
228 | "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); | 201 | "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); |
@@ -246,7 +219,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev) | |||
246 | 219 | ||
247 | vdev->stats.link_down++; | 220 | vdev->stats.link_down++; |
248 | netif_carrier_off(vdev->ndev); | 221 | netif_carrier_off(vdev->ndev); |
249 | vxge_stop_all_tx_queue(vdev); | 222 | netif_tx_stop_all_queues(vdev->ndev); |
250 | 223 | ||
251 | vxge_debug_entryexit(VXGE_TRACE, | 224 | vxge_debug_entryexit(VXGE_TRACE, |
252 | "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); | 225 | "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); |
@@ -677,7 +650,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, | |||
677 | &dtr, &t_code) == VXGE_HW_OK); | 650 | &dtr, &t_code) == VXGE_HW_OK); |
678 | 651 | ||
679 | *skb_ptr = done_skb; | 652 | *skb_ptr = done_skb; |
680 | vxge_wake_tx_queue(fifo, skb); | 653 | vxge_wake_tx_queue(fifo); |
681 | 654 | ||
682 | vxge_debug_entryexit(VXGE_TRACE, | 655 | vxge_debug_entryexit(VXGE_TRACE, |
683 | "%s: %s:%d Exiting...", | 656 | "%s: %s:%d Exiting...", |
@@ -881,17 +854,11 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) | |||
881 | return NETDEV_TX_LOCKED; | 854 | return NETDEV_TX_LOCKED; |
882 | } | 855 | } |
883 | 856 | ||
884 | if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) { | 857 | if (vxge_netif_queue_stopped(fifo)) { |
885 | if (netif_subqueue_stopped(dev, skb)) { | 858 | spin_unlock_irqrestore(&fifo->tx_lock, flags); |
886 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | 859 | return NETDEV_TX_BUSY; |
887 | return NETDEV_TX_BUSY; | ||
888 | } | ||
889 | } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) { | ||
890 | if (netif_queue_stopped(dev)) { | ||
891 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | ||
892 | return NETDEV_TX_BUSY; | ||
893 | } | ||
894 | } | 860 | } |
861 | |||
895 | avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); | 862 | avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); |
896 | if (avail == 0) { | 863 | if (avail == 0) { |
897 | vxge_debug_tx(VXGE_ERR, | 864 | vxge_debug_tx(VXGE_ERR, |
@@ -1478,7 +1445,7 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) | |||
1478 | clear_bit(vp_id, &vdev->vp_reset); | 1445 | clear_bit(vp_id, &vdev->vp_reset); |
1479 | 1446 | ||
1480 | /* Start the vpath queue */ | 1447 | /* Start the vpath queue */ |
1481 | vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL); | 1448 | vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo); |
1482 | 1449 | ||
1483 | return ret; | 1450 | return ret; |
1484 | } | 1451 | } |
@@ -1513,7 +1480,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1513 | "%s: execution mode is debug, returning..", | 1480 | "%s: execution mode is debug, returning..", |
1514 | vdev->ndev->name); | 1481 | vdev->ndev->name); |
1515 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); | 1482 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); |
1516 | vxge_stop_all_tx_queue(vdev); | 1483 | netif_tx_stop_all_queues(vdev->ndev); |
1517 | return 0; | 1484 | return 0; |
1518 | } | 1485 | } |
1519 | } | 1486 | } |
@@ -1523,7 +1490,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1523 | 1490 | ||
1524 | switch (vdev->cric_err_event) { | 1491 | switch (vdev->cric_err_event) { |
1525 | case VXGE_HW_EVENT_UNKNOWN: | 1492 | case VXGE_HW_EVENT_UNKNOWN: |
1526 | vxge_stop_all_tx_queue(vdev); | 1493 | netif_tx_stop_all_queues(vdev->ndev); |
1527 | vxge_debug_init(VXGE_ERR, | 1494 | vxge_debug_init(VXGE_ERR, |
1528 | "fatal: %s: Disabling device due to" | 1495 | "fatal: %s: Disabling device due to" |
1529 | "unknown error", | 1496 | "unknown error", |
@@ -1544,7 +1511,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1544 | case VXGE_HW_EVENT_VPATH_ERR: | 1511 | case VXGE_HW_EVENT_VPATH_ERR: |
1545 | break; | 1512 | break; |
1546 | case VXGE_HW_EVENT_CRITICAL_ERR: | 1513 | case VXGE_HW_EVENT_CRITICAL_ERR: |
1547 | vxge_stop_all_tx_queue(vdev); | 1514 | netif_tx_stop_all_queues(vdev->ndev); |
1548 | vxge_debug_init(VXGE_ERR, | 1515 | vxge_debug_init(VXGE_ERR, |
1549 | "fatal: %s: Disabling device due to" | 1516 | "fatal: %s: Disabling device due to" |
1550 | "serious error", | 1517 | "serious error", |
@@ -1554,7 +1521,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1554 | ret = -EPERM; | 1521 | ret = -EPERM; |
1555 | goto out; | 1522 | goto out; |
1556 | case VXGE_HW_EVENT_SERR: | 1523 | case VXGE_HW_EVENT_SERR: |
1557 | vxge_stop_all_tx_queue(vdev); | 1524 | netif_tx_stop_all_queues(vdev->ndev); |
1558 | vxge_debug_init(VXGE_ERR, | 1525 | vxge_debug_init(VXGE_ERR, |
1559 | "fatal: %s: Disabling device due to" | 1526 | "fatal: %s: Disabling device due to" |
1560 | "serious error", | 1527 | "serious error", |
@@ -1566,7 +1533,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1566 | ret = -EPERM; | 1533 | ret = -EPERM; |
1567 | goto out; | 1534 | goto out; |
1568 | case VXGE_HW_EVENT_SLOT_FREEZE: | 1535 | case VXGE_HW_EVENT_SLOT_FREEZE: |
1569 | vxge_stop_all_tx_queue(vdev); | 1536 | netif_tx_stop_all_queues(vdev->ndev); |
1570 | vxge_debug_init(VXGE_ERR, | 1537 | vxge_debug_init(VXGE_ERR, |
1571 | "fatal: %s: Disabling device due to" | 1538 | "fatal: %s: Disabling device due to" |
1572 | "slot freeze", | 1539 | "slot freeze", |
@@ -1580,7 +1547,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1580 | } | 1547 | } |
1581 | 1548 | ||
1582 | if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) | 1549 | if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) |
1583 | vxge_stop_all_tx_queue(vdev); | 1550 | netif_tx_stop_all_queues(vdev->ndev); |
1584 | 1551 | ||
1585 | if (event == VXGE_LL_FULL_RESET) { | 1552 | if (event == VXGE_LL_FULL_RESET) { |
1586 | status = vxge_reset_all_vpaths(vdev); | 1553 | status = vxge_reset_all_vpaths(vdev); |
@@ -1640,7 +1607,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1640 | vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); | 1607 | vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); |
1641 | } | 1608 | } |
1642 | 1609 | ||
1643 | vxge_wake_all_tx_queue(vdev); | 1610 | netif_tx_wake_all_queues(vdev->ndev); |
1644 | } | 1611 | } |
1645 | 1612 | ||
1646 | out: | 1613 | out: |
@@ -2779,7 +2746,7 @@ vxge_open(struct net_device *dev) | |||
2779 | vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); | 2746 | vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); |
2780 | } | 2747 | } |
2781 | 2748 | ||
2782 | vxge_start_all_tx_queue(vdev); | 2749 | netif_tx_start_all_queues(vdev->ndev); |
2783 | goto out0; | 2750 | goto out0; |
2784 | 2751 | ||
2785 | out2: | 2752 | out2: |
@@ -2902,7 +2869,7 @@ int do_vxge_close(struct net_device *dev, int do_io) | |||
2902 | 2869 | ||
2903 | netif_carrier_off(vdev->ndev); | 2870 | netif_carrier_off(vdev->ndev); |
2904 | printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name); | 2871 | printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name); |
2905 | vxge_stop_all_tx_queue(vdev); | 2872 | netif_tx_stop_all_queues(vdev->ndev); |
2906 | 2873 | ||
2907 | /* Note that at this point xmit() is stopped by upper layer */ | 2874 | /* Note that at this point xmit() is stopped by upper layer */ |
2908 | if (do_io) | 2875 | if (do_io) |
@@ -3215,7 +3182,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, | |||
3215 | u64 stat; | 3182 | u64 stat; |
3216 | 3183 | ||
3217 | *vdev_out = NULL; | 3184 | *vdev_out = NULL; |
3218 | if (config->tx_steering_type == TX_MULTIQ_STEERING) | 3185 | if (config->tx_steering_type) |
3219 | no_of_queue = no_of_vpath; | 3186 | no_of_queue = no_of_vpath; |
3220 | 3187 | ||
3221 | ndev = alloc_etherdev_mq(sizeof(struct vxgedev), | 3188 | ndev = alloc_etherdev_mq(sizeof(struct vxgedev), |
@@ -3284,9 +3251,6 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, | |||
3284 | if (vdev->config.gro_enable) | 3251 | if (vdev->config.gro_enable) |
3285 | ndev->features |= NETIF_F_GRO; | 3252 | ndev->features |= NETIF_F_GRO; |
3286 | 3253 | ||
3287 | if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) | ||
3288 | ndev->real_num_tx_queues = no_of_vpath; | ||
3289 | |||
3290 | #ifdef NETIF_F_LLTX | 3254 | #ifdef NETIF_F_LLTX |
3291 | ndev->features |= NETIF_F_LLTX; | 3255 | ndev->features |= NETIF_F_LLTX; |
3292 | #endif | 3256 | #endif |
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h index 60276b20fa5e..a3845822d46e 100644 --- a/drivers/net/vxge/vxge-main.h +++ b/drivers/net/vxge/vxge-main.h | |||
@@ -228,10 +228,6 @@ struct vxge_fifo { | |||
228 | int tx_steering_type; | 228 | int tx_steering_type; |
229 | int indicate_max_pkts; | 229 | int indicate_max_pkts; |
230 | spinlock_t tx_lock; | 230 | spinlock_t tx_lock; |
231 | /* flag used to maintain queue state when MULTIQ is not enabled */ | ||
232 | #define VPATH_QUEUE_START 0 | ||
233 | #define VPATH_QUEUE_STOP 1 | ||
234 | int queue_state; | ||
235 | 231 | ||
236 | /* Tx stats */ | 232 | /* Tx stats */ |
237 | struct vxge_fifo_stats stats; | 233 | struct vxge_fifo_stats stats; |
@@ -447,13 +443,9 @@ int vxge_open_vpaths(struct vxgedev *vdev); | |||
447 | 443 | ||
448 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); | 444 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); |
449 | 445 | ||
450 | void vxge_stop_all_tx_queue(struct vxgedev *vdev); | ||
451 | |||
452 | void vxge_stop_tx_queue(struct vxge_fifo *fifo); | 446 | void vxge_stop_tx_queue(struct vxge_fifo *fifo); |
453 | 447 | ||
454 | void vxge_start_all_tx_queue(struct vxgedev *vdev); | 448 | void vxge_wake_tx_queue(struct vxge_fifo *fifo); |
455 | |||
456 | void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb); | ||
457 | 449 | ||
458 | enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, | 450 | enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, |
459 | struct macInfo *mac); | 451 | struct macInfo *mac); |