diff options
author | Sandeep Gopalpet <Sandeep.Kumar@freescale.com> | 2009-11-02 02:03:15 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-03 02:40:57 -0500 |
commit | fba4ed030cfae7efdb6b79a57b0c5a9d72c9de83 (patch) | |
tree | ddee54010c64517a01ea112ca16e5bc1fee0938c /drivers/net/gianfar.c | |
parent | f4983704a63b3764418905a77d48105a8cbce97f (diff) |
gianfar: Add Multiple Queue Support
This patch introduces multiple Tx and Rx queues.
The incoming packets can be classified into different queues
based on filer rules (out of scope of this patch). The number
of queues enabled will be based on a DTS entries fsl,num_tx_queues
and fsl,num_rx_queues.
Although we are enabling multiple queues, the interrupt coalescing
is on per device level (etsec-1.7 doesn't support multiple rxics
and txics).
Signed-off-by: Sandeep Gopalpet <Sandeep.Kumar@freescale.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r-- | drivers/net/gianfar.c | 695 |
1 files changed, 468 insertions, 227 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index fa0188ea9233..aa258e899261 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -143,6 +143,7 @@ void gfar_start(struct net_device *dev); | |||
143 | static void gfar_clear_exact_match(struct net_device *dev); | 143 | static void gfar_clear_exact_match(struct net_device *dev); |
144 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); | 144 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); |
145 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 145 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
146 | u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb); | ||
146 | 147 | ||
147 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | 148 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
148 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | 149 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); |
@@ -171,71 +172,89 @@ static int gfar_init_bds(struct net_device *ndev) | |||
171 | struct gfar_priv_rx_q *rx_queue = NULL; | 172 | struct gfar_priv_rx_q *rx_queue = NULL; |
172 | struct txbd8 *txbdp; | 173 | struct txbd8 *txbdp; |
173 | struct rxbd8 *rxbdp; | 174 | struct rxbd8 *rxbdp; |
174 | int i; | 175 | int i, j; |
175 | |||
176 | tx_queue = priv->tx_queue; | ||
177 | rx_queue = priv->rx_queue; | ||
178 | 176 | ||
179 | /* Initialize some variables in our dev structure */ | 177 | for (i = 0; i < priv->num_tx_queues; i++) { |
180 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | 178 | tx_queue = priv->tx_queue[i]; |
181 | tx_queue->dirty_tx = tx_queue->cur_tx = tx_queue->tx_bd_base; | 179 | /* Initialize some variables in our dev structure */ |
182 | rx_queue->cur_rx = rx_queue->rx_bd_base; | 180 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; |
183 | tx_queue->skb_curtx = tx_queue->skb_dirtytx = 0; | 181 | tx_queue->dirty_tx = tx_queue->tx_bd_base; |
184 | rx_queue->skb_currx = 0; | 182 | tx_queue->cur_tx = tx_queue->tx_bd_base; |
183 | tx_queue->skb_curtx = 0; | ||
184 | tx_queue->skb_dirtytx = 0; | ||
185 | |||
186 | /* Initialize Transmit Descriptor Ring */ | ||
187 | txbdp = tx_queue->tx_bd_base; | ||
188 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | ||
189 | txbdp->lstatus = 0; | ||
190 | txbdp->bufPtr = 0; | ||
191 | txbdp++; | ||
192 | } | ||
185 | 193 | ||
186 | /* Initialize Transmit Descriptor Ring */ | 194 | /* Set the last descriptor in the ring to indicate wrap */ |
187 | txbdp = tx_queue->tx_bd_base; | 195 | txbdp--; |
188 | for (i = 0; i < tx_queue->tx_ring_size; i++) { | 196 | txbdp->status |= TXBD_WRAP; |
189 | txbdp->lstatus = 0; | ||
190 | txbdp->bufPtr = 0; | ||
191 | txbdp++; | ||
192 | } | 197 | } |
193 | 198 | ||
194 | /* Set the last descriptor in the ring to indicate wrap */ | 199 | for (i = 0; i < priv->num_rx_queues; i++) { |
195 | txbdp--; | 200 | rx_queue = priv->rx_queue[i]; |
196 | txbdp->status |= TXBD_WRAP; | 201 | rx_queue->cur_rx = rx_queue->rx_bd_base; |
202 | rx_queue->skb_currx = 0; | ||
203 | rxbdp = rx_queue->rx_bd_base; | ||
197 | 204 | ||
198 | rxbdp = rx_queue->rx_bd_base; | 205 | for (j = 0; j < rx_queue->rx_ring_size; j++) { |
199 | for (i = 0; i < rx_queue->rx_ring_size; i++) { | 206 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; |
200 | struct sk_buff *skb = rx_queue->rx_skbuff[i]; | ||
201 | 207 | ||
202 | if (skb) { | 208 | if (skb) { |
203 | gfar_init_rxbdp(rx_queue, rxbdp, rxbdp->bufPtr); | 209 | gfar_init_rxbdp(rx_queue, rxbdp, |
204 | } else { | 210 | rxbdp->bufPtr); |
205 | skb = gfar_new_skb(ndev); | 211 | } else { |
206 | if (!skb) { | 212 | skb = gfar_new_skb(ndev); |
207 | pr_err("%s: Can't allocate RX buffers\n", | 213 | if (!skb) { |
208 | ndev->name); | 214 | pr_err("%s: Can't allocate RX buffers\n", |
209 | return -ENOMEM; | 215 | ndev->name); |
216 | goto err_rxalloc_fail; | ||
217 | } | ||
218 | rx_queue->rx_skbuff[j] = skb; | ||
219 | |||
220 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | ||
210 | } | 221 | } |
211 | rx_queue->rx_skbuff[i] = skb; | ||
212 | 222 | ||
213 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | 223 | rxbdp++; |
214 | } | 224 | } |
215 | 225 | ||
216 | rxbdp++; | ||
217 | } | 226 | } |
218 | 227 | ||
219 | return 0; | 228 | return 0; |
229 | |||
230 | err_rxalloc_fail: | ||
231 | free_skb_resources(priv); | ||
232 | return -ENOMEM; | ||
220 | } | 233 | } |
221 | 234 | ||
222 | static int gfar_alloc_skb_resources(struct net_device *ndev) | 235 | static int gfar_alloc_skb_resources(struct net_device *ndev) |
223 | { | 236 | { |
224 | void *vaddr; | 237 | void *vaddr; |
225 | int i; | 238 | dma_addr_t addr; |
239 | int i, j, k; | ||
226 | struct gfar_private *priv = netdev_priv(ndev); | 240 | struct gfar_private *priv = netdev_priv(ndev); |
227 | struct device *dev = &priv->ofdev->dev; | 241 | struct device *dev = &priv->ofdev->dev; |
228 | struct gfar_priv_tx_q *tx_queue = NULL; | 242 | struct gfar_priv_tx_q *tx_queue = NULL; |
229 | struct gfar_priv_rx_q *rx_queue = NULL; | 243 | struct gfar_priv_rx_q *rx_queue = NULL; |
230 | 244 | ||
231 | tx_queue = priv->tx_queue; | 245 | priv->total_tx_ring_size = 0; |
232 | rx_queue = priv->rx_queue; | 246 | for (i = 0; i < priv->num_tx_queues; i++) |
247 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | ||
248 | |||
249 | priv->total_rx_ring_size = 0; | ||
250 | for (i = 0; i < priv->num_rx_queues; i++) | ||
251 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | ||
233 | 252 | ||
234 | /* Allocate memory for the buffer descriptors */ | 253 | /* Allocate memory for the buffer descriptors */ |
235 | vaddr = dma_alloc_coherent(dev, | 254 | vaddr = dma_alloc_coherent(dev, |
236 | sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size + | 255 | sizeof(struct txbd8) * priv->total_tx_ring_size + |
237 | sizeof(*rx_queue->rx_bd_base) * rx_queue->rx_ring_size, | 256 | sizeof(struct rxbd8) * priv->total_rx_ring_size, |
238 | &tx_queue->tx_bd_dma_base, GFP_KERNEL); | 257 | &addr, GFP_KERNEL); |
239 | if (!vaddr) { | 258 | if (!vaddr) { |
240 | if (netif_msg_ifup(priv)) | 259 | if (netif_msg_ifup(priv)) |
241 | pr_err("%s: Could not allocate buffer descriptors!\n", | 260 | pr_err("%s: Could not allocate buffer descriptors!\n", |
@@ -243,38 +262,57 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) | |||
243 | return -ENOMEM; | 262 | return -ENOMEM; |
244 | } | 263 | } |
245 | 264 | ||
246 | tx_queue->tx_bd_base = vaddr; | 265 | for (i = 0; i < priv->num_tx_queues; i++) { |
247 | tx_queue->dev = ndev; | 266 | tx_queue = priv->tx_queue[i]; |
267 | tx_queue->tx_bd_base = (struct txbd8 *) vaddr; | ||
268 | tx_queue->tx_bd_dma_base = addr; | ||
269 | tx_queue->dev = ndev; | ||
270 | /* enet DMA only understands physical addresses */ | ||
271 | addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | ||
272 | vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | ||
273 | } | ||
248 | 274 | ||
249 | /* Start the rx descriptor ring where the tx ring leaves off */ | 275 | /* Start the rx descriptor ring where the tx ring leaves off */ |
250 | vaddr = vaddr + sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size; | 276 | for (i = 0; i < priv->num_rx_queues; i++) { |
251 | rx_queue->rx_bd_base = vaddr; | 277 | rx_queue = priv->rx_queue[i]; |
252 | rx_queue->dev = ndev; | 278 | rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; |
279 | rx_queue->rx_bd_dma_base = addr; | ||
280 | rx_queue->dev = ndev; | ||
281 | addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | ||
282 | vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | ||
283 | } | ||
253 | 284 | ||
254 | /* Setup the skbuff rings */ | 285 | /* Setup the skbuff rings */ |
255 | tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * | 286 | for (i = 0; i < priv->num_tx_queues; i++) { |
287 | tx_queue = priv->tx_queue[i]; | ||
288 | tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * | ||
256 | tx_queue->tx_ring_size, GFP_KERNEL); | 289 | tx_queue->tx_ring_size, GFP_KERNEL); |
257 | if (!tx_queue->tx_skbuff) { | 290 | if (!tx_queue->tx_skbuff) { |
258 | if (netif_msg_ifup(priv)) | 291 | if (netif_msg_ifup(priv)) |
259 | pr_err("%s: Could not allocate tx_skbuff\n", | 292 | pr_err("%s: Could not allocate tx_skbuff\n", |
260 | ndev->name); | 293 | ndev->name); |
261 | goto cleanup; | 294 | goto cleanup; |
262 | } | 295 | } |
263 | 296 | ||
264 | for (i = 0; i < tx_queue->tx_ring_size; i++) | 297 | for (k = 0; k < tx_queue->tx_ring_size; k++) |
265 | tx_queue->tx_skbuff[i] = NULL; | 298 | tx_queue->tx_skbuff[k] = NULL; |
299 | } | ||
266 | 300 | ||
267 | rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * | 301 | for (i = 0; i < priv->num_rx_queues; i++) { |
302 | rx_queue = priv->rx_queue[i]; | ||
303 | rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * | ||
268 | rx_queue->rx_ring_size, GFP_KERNEL); | 304 | rx_queue->rx_ring_size, GFP_KERNEL); |
269 | if (!rx_queue->rx_skbuff) { | ||
270 | if (netif_msg_ifup(priv)) | ||
271 | pr_err("%s: Could not allocate rx_skbuff\n", | ||
272 | ndev->name); | ||
273 | goto cleanup; | ||
274 | } | ||
275 | 305 | ||
276 | for (i = 0; i < rx_queue->rx_ring_size; i++) | 306 | if (!rx_queue->rx_skbuff) { |
277 | rx_queue->rx_skbuff[i] = NULL; | 307 | if (netif_msg_ifup(priv)) |
308 | pr_err("%s: Could not allocate rx_skbuff\n", | ||
309 | ndev->name); | ||
310 | goto cleanup; | ||
311 | } | ||
312 | |||
313 | for (j = 0; j < rx_queue->rx_ring_size; j++) | ||
314 | rx_queue->rx_skbuff[j] = NULL; | ||
315 | } | ||
278 | 316 | ||
279 | if (gfar_init_bds(ndev)) | 317 | if (gfar_init_bds(ndev)) |
280 | goto cleanup; | 318 | goto cleanup; |
@@ -286,33 +324,47 @@ cleanup: | |||
286 | return -ENOMEM; | 324 | return -ENOMEM; |
287 | } | 325 | } |
288 | 326 | ||
327 | static void gfar_init_tx_rx_base(struct gfar_private *priv) | ||
328 | { | ||
329 | struct gfar __iomem *regs = priv->gfargrp.regs; | ||
330 | u32 *baddr; | ||
331 | int i; | ||
332 | |||
333 | baddr = ®s->tbase0; | ||
334 | for(i = 0; i < priv->num_tx_queues; i++) { | ||
335 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); | ||
336 | baddr += 2; | ||
337 | } | ||
338 | |||
339 | baddr = ®s->rbase0; | ||
340 | for(i = 0; i < priv->num_rx_queues; i++) { | ||
341 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); | ||
342 | baddr += 2; | ||
343 | } | ||
344 | } | ||
345 | |||
289 | static void gfar_init_mac(struct net_device *ndev) | 346 | static void gfar_init_mac(struct net_device *ndev) |
290 | { | 347 | { |
291 | struct gfar_private *priv = netdev_priv(ndev); | 348 | struct gfar_private *priv = netdev_priv(ndev); |
292 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
293 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
294 | struct gfar __iomem *regs = priv->gfargrp.regs; | 349 | struct gfar __iomem *regs = priv->gfargrp.regs; |
295 | u32 rctrl = 0; | 350 | u32 rctrl = 0; |
296 | u32 tctrl = 0; | 351 | u32 tctrl = 0; |
297 | u32 attrs = 0; | 352 | u32 attrs = 0; |
298 | 353 | ||
299 | tx_queue = priv->tx_queue; | 354 | /* write the tx/rx base registers */ |
300 | rx_queue = priv->rx_queue; | 355 | gfar_init_tx_rx_base(priv); |
301 | |||
302 | /* enet DMA only understands physical addresses */ | ||
303 | gfar_write(®s->tbase0, tx_queue->tx_bd_dma_base); | ||
304 | gfar_write(®s->rbase0, tx_queue->tx_bd_dma_base + | ||
305 | sizeof(*tx_queue->tx_bd_base) * | ||
306 | tx_queue->tx_ring_size); | ||
307 | 356 | ||
308 | /* Configure the coalescing support */ | 357 | /* Configure the coalescing support */ |
309 | gfar_write(®s->txic, 0); | 358 | gfar_write(®s->txic, 0); |
310 | if (tx_queue->txcoalescing) | 359 | if (priv->tx_queue[0]->txcoalescing) |
311 | gfar_write(®s->txic, tx_queue->txic); | 360 | gfar_write(®s->txic, priv->tx_queue[0]->txic); |
312 | 361 | ||
313 | gfar_write(®s->rxic, 0); | 362 | gfar_write(®s->rxic, 0); |
314 | if (rx_queue->rxcoalescing) | 363 | if (priv->rx_queue[0]->rxcoalescing) |
315 | gfar_write(®s->rxic, rx_queue->rxic); | 364 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); |
365 | |||
366 | if (priv->rx_filer_enable) | ||
367 | rctrl |= RCTRL_FILREN; | ||
316 | 368 | ||
317 | if (priv->rx_csum_enable) | 369 | if (priv->rx_csum_enable) |
318 | rctrl |= RCTRL_CHECKSUMMING; | 370 | rctrl |= RCTRL_CHECKSUMMING; |
@@ -341,6 +393,8 @@ static void gfar_init_mac(struct net_device *ndev) | |||
341 | if (ndev->features & NETIF_F_IP_CSUM) | 393 | if (ndev->features & NETIF_F_IP_CSUM) |
342 | tctrl |= TCTRL_INIT_CSUM; | 394 | tctrl |= TCTRL_INIT_CSUM; |
343 | 395 | ||
396 | tctrl |= TCTRL_TXSCHED_PRIO; | ||
397 | |||
344 | gfar_write(®s->tctrl, tctrl); | 398 | gfar_write(®s->tctrl, tctrl); |
345 | 399 | ||
346 | /* Set the extraction length and index */ | 400 | /* Set the extraction length and index */ |
@@ -374,6 +428,7 @@ static const struct net_device_ops gfar_netdev_ops = { | |||
374 | .ndo_set_multicast_list = gfar_set_multi, | 428 | .ndo_set_multicast_list = gfar_set_multi, |
375 | .ndo_tx_timeout = gfar_timeout, | 429 | .ndo_tx_timeout = gfar_timeout, |
376 | .ndo_do_ioctl = gfar_ioctl, | 430 | .ndo_do_ioctl = gfar_ioctl, |
431 | .ndo_select_queue = gfar_select_queue, | ||
377 | .ndo_vlan_rx_register = gfar_vlan_rx_register, | 432 | .ndo_vlan_rx_register = gfar_vlan_rx_register, |
378 | .ndo_set_mac_address = eth_mac_addr, | 433 | .ndo_set_mac_address = eth_mac_addr, |
379 | .ndo_validate_addr = eth_validate_addr, | 434 | .ndo_validate_addr = eth_validate_addr, |
@@ -382,36 +437,131 @@ static const struct net_device_ops gfar_netdev_ops = { | |||
382 | #endif | 437 | #endif |
383 | }; | 438 | }; |
384 | 439 | ||
440 | void lock_rx_qs(struct gfar_private *priv) | ||
441 | { | ||
442 | int i = 0x0; | ||
443 | |||
444 | for (i = 0; i < priv->num_rx_queues; i++) | ||
445 | spin_lock(&priv->rx_queue[i]->rxlock); | ||
446 | } | ||
447 | |||
448 | void lock_tx_qs(struct gfar_private *priv) | ||
449 | { | ||
450 | int i = 0x0; | ||
451 | |||
452 | for (i = 0; i < priv->num_tx_queues; i++) | ||
453 | spin_lock(&priv->tx_queue[i]->txlock); | ||
454 | } | ||
455 | |||
456 | void unlock_rx_qs(struct gfar_private *priv) | ||
457 | { | ||
458 | int i = 0x0; | ||
459 | |||
460 | for (i = 0; i < priv->num_rx_queues; i++) | ||
461 | spin_unlock(&priv->rx_queue[i]->rxlock); | ||
462 | } | ||
463 | |||
464 | void unlock_tx_qs(struct gfar_private *priv) | ||
465 | { | ||
466 | int i = 0x0; | ||
467 | |||
468 | for (i = 0; i < priv->num_tx_queues; i++) | ||
469 | spin_unlock(&priv->tx_queue[i]->txlock); | ||
470 | } | ||
471 | |||
385 | /* Returns 1 if incoming frames use an FCB */ | 472 | /* Returns 1 if incoming frames use an FCB */ |
386 | static inline int gfar_uses_fcb(struct gfar_private *priv) | 473 | static inline int gfar_uses_fcb(struct gfar_private *priv) |
387 | { | 474 | { |
388 | return priv->vlgrp || priv->rx_csum_enable; | 475 | return priv->vlgrp || priv->rx_csum_enable; |
389 | } | 476 | } |
390 | 477 | ||
391 | static int gfar_of_init(struct net_device *dev) | 478 | u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb) |
479 | { | ||
480 | return skb_get_queue_mapping(skb); | ||
481 | } | ||
482 | static void free_tx_pointers(struct gfar_private *priv) | ||
483 | { | ||
484 | int i = 0; | ||
485 | |||
486 | for (i = 0; i < priv->num_tx_queues; i++) | ||
487 | kfree(priv->tx_queue[i]); | ||
488 | } | ||
489 | |||
490 | static void free_rx_pointers(struct gfar_private *priv) | ||
491 | { | ||
492 | int i = 0; | ||
493 | |||
494 | for (i = 0; i < priv->num_rx_queues; i++) | ||
495 | kfree(priv->rx_queue[i]); | ||
496 | } | ||
497 | |||
498 | static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | ||
392 | { | 499 | { |
393 | const char *model; | 500 | const char *model; |
394 | const char *ctype; | 501 | const char *ctype; |
395 | const void *mac_addr; | 502 | const void *mac_addr; |
396 | u64 addr, size; | 503 | u64 addr, size; |
397 | int err = 0; | 504 | int err = 0, i; |
398 | struct gfar_private *priv = netdev_priv(dev); | 505 | struct net_device *dev = NULL; |
399 | struct device_node *np = priv->node; | 506 | struct gfar_private *priv = NULL; |
507 | struct device_node *np = ofdev->node; | ||
400 | const u32 *stash; | 508 | const u32 *stash; |
401 | const u32 *stash_len; | 509 | const u32 *stash_len; |
402 | const u32 *stash_idx; | 510 | const u32 *stash_idx; |
511 | unsigned int num_tx_qs, num_rx_qs; | ||
512 | u32 *tx_queues, *rx_queues; | ||
403 | 513 | ||
404 | if (!np || !of_device_is_available(np)) | 514 | if (!np || !of_device_is_available(np)) |
405 | return -ENODEV; | 515 | return -ENODEV; |
406 | 516 | ||
517 | /* parse the num of tx and rx queues */ | ||
518 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); | ||
519 | num_tx_qs = tx_queues ? *tx_queues : 1; | ||
520 | |||
521 | if (num_tx_qs > MAX_TX_QS) { | ||
522 | printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", | ||
523 | num_tx_qs, MAX_TX_QS); | ||
524 | printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); | ||
525 | return -EINVAL; | ||
526 | } | ||
527 | |||
528 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); | ||
529 | num_rx_qs = rx_queues ? *rx_queues : 1; | ||
530 | |||
531 | if (num_rx_qs > MAX_RX_QS) { | ||
532 | printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", | ||
533 | num_tx_qs, MAX_TX_QS); | ||
534 | printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); | ||
535 | return -EINVAL; | ||
536 | } | ||
537 | |||
538 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | ||
539 | dev = *pdev; | ||
540 | if (NULL == dev) | ||
541 | return -ENOMEM; | ||
542 | |||
543 | priv = netdev_priv(dev); | ||
544 | priv->node = ofdev->node; | ||
545 | priv->ndev = dev; | ||
546 | |||
547 | dev->num_tx_queues = num_tx_qs; | ||
548 | dev->real_num_tx_queues = num_tx_qs; | ||
549 | priv->num_tx_queues = num_tx_qs; | ||
550 | priv->num_rx_queues = num_rx_qs; | ||
551 | |||
407 | /* get a pointer to the register memory */ | 552 | /* get a pointer to the register memory */ |
408 | addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); | 553 | addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); |
409 | priv->gfargrp.regs = ioremap(addr, size); | 554 | priv->gfargrp.regs = ioremap(addr, size); |
410 | 555 | ||
411 | if (priv->gfargrp.regs == NULL) | 556 | if (priv->gfargrp.regs == NULL) { |
412 | return -ENOMEM; | 557 | err = -ENOMEM; |
558 | goto err_out; | ||
559 | } | ||
413 | 560 | ||
414 | priv->gfargrp.priv = priv; /* back pointer from group to priv */ | 561 | priv->gfargrp.priv = priv; /* back pointer from group to priv */ |
562 | priv->gfargrp.rx_bit_map = DEFAULT_MAPPING; | ||
563 | priv->gfargrp.tx_bit_map = DEFAULT_MAPPING; | ||
564 | |||
415 | priv->gfargrp.interruptTransmit = irq_of_parse_and_map(np, 0); | 565 | priv->gfargrp.interruptTransmit = irq_of_parse_and_map(np, 0); |
416 | 566 | ||
417 | model = of_get_property(np, "model", NULL); | 567 | model = of_get_property(np, "model", NULL); |
@@ -430,6 +580,38 @@ static int gfar_of_init(struct net_device *dev) | |||
430 | } | 580 | } |
431 | } | 581 | } |
432 | 582 | ||
583 | for (i = 0; i < priv->num_tx_queues; i++) | ||
584 | priv->tx_queue[i] = NULL; | ||
585 | for (i = 0; i < priv->num_rx_queues; i++) | ||
586 | priv->rx_queue[i] = NULL; | ||
587 | |||
588 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
589 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( | ||
590 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); | ||
591 | if (!priv->tx_queue[i]) { | ||
592 | err = -ENOMEM; | ||
593 | goto tx_alloc_failed; | ||
594 | } | ||
595 | priv->tx_queue[i]->tx_skbuff = NULL; | ||
596 | priv->tx_queue[i]->qindex = i; | ||
597 | priv->tx_queue[i]->dev = dev; | ||
598 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | ||
599 | } | ||
600 | |||
601 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
602 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( | ||
603 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); | ||
604 | if (!priv->rx_queue[i]) { | ||
605 | err = -ENOMEM; | ||
606 | goto rx_alloc_failed; | ||
607 | } | ||
608 | priv->rx_queue[i]->rx_skbuff = NULL; | ||
609 | priv->rx_queue[i]->qindex = i; | ||
610 | priv->rx_queue[i]->dev = dev; | ||
611 | spin_lock_init(&(priv->rx_queue[i]->rxlock)); | ||
612 | } | ||
613 | |||
614 | |||
433 | stash = of_get_property(np, "bd-stash", NULL); | 615 | stash = of_get_property(np, "bd-stash", NULL); |
434 | 616 | ||
435 | if (stash) { | 617 | if (stash) { |
@@ -490,8 +672,13 @@ static int gfar_of_init(struct net_device *dev) | |||
490 | 672 | ||
491 | return 0; | 673 | return 0; |
492 | 674 | ||
675 | rx_alloc_failed: | ||
676 | free_rx_pointers(priv); | ||
677 | tx_alloc_failed: | ||
678 | free_tx_pointers(priv); | ||
493 | err_out: | 679 | err_out: |
494 | iounmap(priv->gfargrp.regs); | 680 | iounmap(priv->gfargrp.regs); |
681 | free_netdev(dev); | ||
495 | return err; | 682 | return err; |
496 | } | 683 | } |
497 | 684 | ||
@@ -509,6 +696,17 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
509 | return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); | 696 | return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); |
510 | } | 697 | } |
511 | 698 | ||
699 | static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) | ||
700 | { | ||
701 | unsigned int new_bit_map = 0x0; | ||
702 | int mask = 0x1 << (max_qs - 1), i; | ||
703 | for (i = 0; i < max_qs; i++) { | ||
704 | if (bit_map & mask) | ||
705 | new_bit_map = new_bit_map + (1 << i); | ||
706 | mask = mask >> 0x1; | ||
707 | } | ||
708 | return new_bit_map; | ||
709 | } | ||
512 | /* Set up the ethernet device structure, private data, | 710 | /* Set up the ethernet device structure, private data, |
513 | * and anything else we need before we start */ | 711 | * and anything else we need before we start */ |
514 | static int gfar_probe(struct of_device *ofdev, | 712 | static int gfar_probe(struct of_device *ofdev, |
@@ -518,14 +716,14 @@ static int gfar_probe(struct of_device *ofdev, | |||
518 | struct net_device *dev = NULL; | 716 | struct net_device *dev = NULL; |
519 | struct gfar_private *priv = NULL; | 717 | struct gfar_private *priv = NULL; |
520 | struct gfar __iomem *regs = NULL; | 718 | struct gfar __iomem *regs = NULL; |
521 | int err = 0; | 719 | int err = 0, i; |
522 | int len_devname; | 720 | int len_devname; |
721 | u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; | ||
523 | 722 | ||
524 | /* Create an ethernet device instance */ | 723 | err = gfar_of_init(ofdev, &dev); |
525 | dev = alloc_etherdev(sizeof (*priv)); | ||
526 | 724 | ||
527 | if (NULL == dev) | 725 | if (err) |
528 | return -ENOMEM; | 726 | return err; |
529 | 727 | ||
530 | priv = netdev_priv(dev); | 728 | priv = netdev_priv(dev); |
531 | priv->ndev = dev; | 729 | priv->ndev = dev; |
@@ -533,23 +731,6 @@ static int gfar_probe(struct of_device *ofdev, | |||
533 | priv->node = ofdev->node; | 731 | priv->node = ofdev->node; |
534 | SET_NETDEV_DEV(dev, &ofdev->dev); | 732 | SET_NETDEV_DEV(dev, &ofdev->dev); |
535 | 733 | ||
536 | err = gfar_of_init(dev); | ||
537 | |||
538 | if (err) | ||
539 | goto regs_fail; | ||
540 | |||
541 | priv->tx_queue = (struct gfar_priv_tx_q *)kmalloc( | ||
542 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); | ||
543 | if (!priv->tx_queue) | ||
544 | goto regs_fail; | ||
545 | |||
546 | priv->rx_queue = (struct gfar_priv_rx_q *)kmalloc( | ||
547 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); | ||
548 | if (!priv->rx_queue) | ||
549 | goto rx_queue_fail; | ||
550 | |||
551 | spin_lock_init(&priv->tx_queue->txlock); | ||
552 | spin_lock_init(&priv->rx_queue->rxlock); | ||
553 | spin_lock_init(&priv->gfargrp.grplock); | 734 | spin_lock_init(&priv->gfargrp.grplock); |
554 | spin_lock_init(&priv->bflock); | 735 | spin_lock_init(&priv->bflock); |
555 | INIT_WORK(&priv->reset_task, gfar_reset_task); | 736 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
@@ -587,8 +768,8 @@ static int gfar_probe(struct of_device *ofdev, | |||
587 | dev->netdev_ops = &gfar_netdev_ops; | 768 | dev->netdev_ops = &gfar_netdev_ops; |
588 | dev->ethtool_ops = &gfar_ethtool_ops; | 769 | dev->ethtool_ops = &gfar_ethtool_ops; |
589 | 770 | ||
590 | /* Register for napi ...NAPI is for each rx_queue */ | 771 | /* Register for napi ...We are registering NAPI for each grp */ |
591 | netif_napi_add(dev, &priv->rx_queue->napi, gfar_poll, GFAR_DEV_WEIGHT); | 772 | netif_napi_add(dev, &priv->gfargrp.napi, gfar_poll, GFAR_DEV_WEIGHT); |
592 | 773 | ||
593 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { | 774 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
594 | priv->rx_csum_enable = 1; | 775 | priv->rx_csum_enable = 1; |
@@ -644,17 +825,44 @@ static int gfar_probe(struct of_device *ofdev, | |||
644 | if (dev->features & NETIF_F_IP_CSUM) | 825 | if (dev->features & NETIF_F_IP_CSUM) |
645 | dev->hard_header_len += GMAC_FCB_LEN; | 826 | dev->hard_header_len += GMAC_FCB_LEN; |
646 | 827 | ||
828 | /* Need to reverse the bit maps as bit_map's MSB is q0 | ||
829 | * but, for_each_bit parses from right to left, which | ||
830 | * basically reverses the queue numbers */ | ||
831 | priv->gfargrp.tx_bit_map = reverse_bitmap(priv->gfargrp.tx_bit_map, MAX_TX_QS); | ||
832 | priv->gfargrp.rx_bit_map = reverse_bitmap(priv->gfargrp.rx_bit_map, MAX_RX_QS); | ||
833 | |||
834 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values */ | ||
835 | for_each_bit(i, &priv->gfargrp.rx_bit_map, priv->num_rx_queues) { | ||
836 | priv->gfargrp.num_rx_queues++; | ||
837 | rstat = rstat | (RSTAT_CLEAR_RHALT >> i); | ||
838 | rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | ||
839 | } | ||
840 | for_each_bit (i, &priv->gfargrp.tx_bit_map, priv->num_tx_queues) { | ||
841 | priv->gfargrp.num_tx_queues++; | ||
842 | tstat = tstat | (TSTAT_CLEAR_THALT >> i); | ||
843 | tqueue = tqueue | (TQUEUE_EN0 >> i); | ||
844 | } | ||
845 | priv->gfargrp.rstat = rstat; | ||
846 | priv->gfargrp.tstat = tstat; | ||
847 | |||
848 | gfar_write(®s->rqueue, rqueue); | ||
849 | gfar_write(®s->tqueue, tqueue); | ||
850 | |||
647 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; | 851 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; |
648 | 852 | ||
649 | /* Initializing some of the rx/tx queue level parameters */ | 853 | /* Initializing some of the rx/tx queue level parameters */ |
650 | priv->tx_queue->tx_ring_size = DEFAULT_TX_RING_SIZE; | 854 | for (i = 0; i < priv->num_tx_queues; i++) { |
651 | priv->tx_queue->num_txbdfree = DEFAULT_TX_RING_SIZE; | 855 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; |
652 | priv->tx_queue->txcoalescing = DEFAULT_TX_COALESCE; | 856 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; |
653 | priv->tx_queue->txic = DEFAULT_TXIC; | 857 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; |
858 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | ||
859 | } | ||
654 | 860 | ||
655 | priv->rx_queue->rx_ring_size = DEFAULT_RX_RING_SIZE; | 861 | for (i = 0; i < priv->num_rx_queues; i++) { |
656 | priv->rx_queue->rxcoalescing = DEFAULT_RX_COALESCE; | 862 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; |
657 | priv->rx_queue->rxic = DEFAULT_RXIC; | 863 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; |
864 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | ||
865 | } | ||
658 | 866 | ||
659 | /* Enable most messages by default */ | 867 | /* Enable most messages by default */ |
660 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | 868 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
@@ -699,17 +907,19 @@ static int gfar_probe(struct of_device *ofdev, | |||
699 | /* Even more device info helps when determining which kernel */ | 907 | /* Even more device info helps when determining which kernel */ |
700 | /* provided which set of benchmarks. */ | 908 | /* provided which set of benchmarks. */ |
701 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); | 909 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); |
702 | printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", | 910 | for (i = 0; i < priv->num_rx_queues; i++) |
703 | dev->name, priv->rx_queue->rx_ring_size, priv->tx_queue->tx_ring_size); | 911 | printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", |
912 | dev->name, i, priv->rx_queue[i]->rx_ring_size); | ||
913 | for(i = 0; i < priv->num_tx_queues; i++) | ||
914 | printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", | ||
915 | dev->name, i, priv->tx_queue[i]->tx_ring_size); | ||
704 | 916 | ||
705 | return 0; | 917 | return 0; |
706 | 918 | ||
707 | register_fail: | 919 | register_fail: |
708 | iounmap(priv->gfargrp.regs); | 920 | iounmap(priv->gfargrp.regs); |
709 | kfree(priv->rx_queue); | 921 | free_tx_pointers(priv); |
710 | rx_queue_fail: | 922 | free_rx_pointers(priv); |
711 | kfree(priv->tx_queue); | ||
712 | regs_fail: | ||
713 | if (priv->phy_node) | 923 | if (priv->phy_node) |
714 | of_node_put(priv->phy_node); | 924 | of_node_put(priv->phy_node); |
715 | if (priv->tbi_node) | 925 | if (priv->tbi_node) |
@@ -742,8 +952,6 @@ static int gfar_suspend(struct device *dev) | |||
742 | { | 952 | { |
743 | struct gfar_private *priv = dev_get_drvdata(dev); | 953 | struct gfar_private *priv = dev_get_drvdata(dev); |
744 | struct net_device *ndev = priv->ndev; | 954 | struct net_device *ndev = priv->ndev; |
745 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
746 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
747 | struct gfar __iomem *regs = NULL; | 955 | struct gfar __iomem *regs = NULL; |
748 | unsigned long flags; | 956 | unsigned long flags; |
749 | u32 tempval; | 957 | u32 tempval; |
@@ -752,13 +960,13 @@ static int gfar_suspend(struct device *dev) | |||
752 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | 960 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
753 | 961 | ||
754 | netif_device_detach(ndev); | 962 | netif_device_detach(ndev); |
755 | tx_queue = priv->tx_queue; | ||
756 | rx_queue = priv->rx_queue; | ||
757 | regs = priv->gfargrp.regs; | 963 | regs = priv->gfargrp.regs; |
758 | 964 | ||
759 | if (netif_running(ndev)) { | 965 | if (netif_running(ndev)) { |
760 | spin_lock_irqsave(&tx_queue->txlock, flags); | 966 | |
761 | spin_lock(&rx_queue->rxlock); | 967 | local_irq_save(flags); |
968 | lock_tx_qs(priv); | ||
969 | lock_rx_qs(priv); | ||
762 | 970 | ||
763 | gfar_halt_nodisable(ndev); | 971 | gfar_halt_nodisable(ndev); |
764 | 972 | ||
@@ -772,10 +980,11 @@ static int gfar_suspend(struct device *dev) | |||
772 | 980 | ||
773 | gfar_write(®s->maccfg1, tempval); | 981 | gfar_write(®s->maccfg1, tempval); |
774 | 982 | ||
775 | spin_unlock(&rx_queue->rxlock); | 983 | unlock_rx_qs(priv); |
776 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | 984 | unlock_tx_qs(priv); |
985 | local_irq_restore(flags); | ||
777 | 986 | ||
778 | napi_disable(&rx_queue->napi); | 987 | napi_disable(&priv->gfargrp.napi); |
779 | 988 | ||
780 | if (magic_packet) { | 989 | if (magic_packet) { |
781 | /* Enable interrupt on Magic Packet */ | 990 | /* Enable interrupt on Magic Packet */ |
@@ -797,8 +1006,6 @@ static int gfar_resume(struct device *dev) | |||
797 | { | 1006 | { |
798 | struct gfar_private *priv = dev_get_drvdata(dev); | 1007 | struct gfar_private *priv = dev_get_drvdata(dev); |
799 | struct net_device *ndev = priv->ndev; | 1008 | struct net_device *ndev = priv->ndev; |
800 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
801 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
802 | struct gfar __iomem *regs = NULL; | 1009 | struct gfar __iomem *regs = NULL; |
803 | unsigned long flags; | 1010 | unsigned long flags; |
804 | u32 tempval; | 1011 | u32 tempval; |
@@ -816,12 +1023,11 @@ static int gfar_resume(struct device *dev) | |||
816 | /* Disable Magic Packet mode, in case something | 1023 | /* Disable Magic Packet mode, in case something |
817 | * else woke us up. | 1024 | * else woke us up. |
818 | */ | 1025 | */ |
819 | rx_queue = priv->rx_queue; | ||
820 | tx_queue = priv->tx_queue; | ||
821 | regs = priv->gfargrp.regs; | 1026 | regs = priv->gfargrp.regs; |
822 | 1027 | ||
823 | spin_lock_irqsave(&tx_queue->txlock, flags); | 1028 | local_irq_save(flags); |
824 | spin_lock(&rx_queue->rxlock); | 1029 | lock_tx_qs(priv); |
1030 | lock_rx_qs(priv); | ||
825 | 1031 | ||
826 | tempval = gfar_read(®s->maccfg2); | 1032 | tempval = gfar_read(®s->maccfg2); |
827 | tempval &= ~MACCFG2_MPEN; | 1033 | tempval &= ~MACCFG2_MPEN; |
@@ -829,12 +1035,13 @@ static int gfar_resume(struct device *dev) | |||
829 | 1035 | ||
830 | gfar_start(ndev); | 1036 | gfar_start(ndev); |
831 | 1037 | ||
832 | spin_unlock(&rx_queue->rxlock); | 1038 | unlock_rx_qs(priv); |
833 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | 1039 | unlock_tx_qs(priv); |
1040 | local_irq_restore(flags); | ||
834 | 1041 | ||
835 | netif_device_attach(ndev); | 1042 | netif_device_attach(ndev); |
836 | 1043 | ||
837 | napi_enable(&rx_queue->napi); | 1044 | napi_enable(&priv->gfargrp.napi); |
838 | 1045 | ||
839 | return 0; | 1046 | return 0; |
840 | } | 1047 | } |
@@ -861,7 +1068,7 @@ static int gfar_restore(struct device *dev) | |||
861 | phy_start(priv->phydev); | 1068 | phy_start(priv->phydev); |
862 | 1069 | ||
863 | netif_device_attach(ndev); | 1070 | netif_device_attach(ndev); |
864 | napi_enable(&priv->napi); | 1071 | napi_enable(&priv->gfargrp.napi); |
865 | 1072 | ||
866 | return 0; | 1073 | return 0; |
867 | } | 1074 | } |
@@ -1115,23 +1322,21 @@ void gfar_halt(struct net_device *dev) | |||
1115 | void stop_gfar(struct net_device *dev) | 1322 | void stop_gfar(struct net_device *dev) |
1116 | { | 1323 | { |
1117 | struct gfar_private *priv = netdev_priv(dev); | 1324 | struct gfar_private *priv = netdev_priv(dev); |
1118 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
1119 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
1120 | unsigned long flags; | 1325 | unsigned long flags; |
1121 | 1326 | ||
1122 | phy_stop(priv->phydev); | 1327 | phy_stop(priv->phydev); |
1123 | 1328 | ||
1124 | tx_queue = priv->tx_queue; | ||
1125 | rx_queue = priv->rx_queue; | ||
1126 | 1329 | ||
1127 | /* Lock it down */ | 1330 | /* Lock it down */ |
1128 | spin_lock_irqsave(&tx_queue->txlock, flags); | 1331 | local_irq_save(flags); |
1129 | spin_lock(&rx_queue->rxlock); | 1332 | lock_tx_qs(priv); |
1333 | lock_rx_qs(priv); | ||
1130 | 1334 | ||
1131 | gfar_halt(dev); | 1335 | gfar_halt(dev); |
1132 | 1336 | ||
1133 | spin_unlock(&rx_queue->rxlock); | 1337 | unlock_rx_qs(priv); |
1134 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | 1338 | unlock_tx_qs(priv); |
1339 | local_irq_restore(flags); | ||
1135 | 1340 | ||
1136 | /* Free the IRQs */ | 1341 | /* Free the IRQs */ |
1137 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | 1342 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
@@ -1145,24 +1350,14 @@ void stop_gfar(struct net_device *dev) | |||
1145 | free_skb_resources(priv); | 1350 | free_skb_resources(priv); |
1146 | } | 1351 | } |
1147 | 1352 | ||
1148 | /* If there are any tx skbs or rx skbs still around, free them. | 1353 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
1149 | * Then free tx_skbuff and rx_skbuff */ | ||
1150 | static void free_skb_resources(struct gfar_private *priv) | ||
1151 | { | 1354 | { |
1152 | struct device *dev = &priv->ofdev->dev; | ||
1153 | struct rxbd8 *rxbdp; | ||
1154 | struct txbd8 *txbdp; | 1355 | struct txbd8 *txbdp; |
1155 | struct gfar_priv_tx_q *tx_queue = NULL; | 1356 | struct gfar_private *priv = netdev_priv(tx_queue->dev); |
1156 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
1157 | int i, j; | 1357 | int i, j; |
1158 | 1358 | ||
1159 | /* Go through all the buffer descriptors and free their data buffers */ | ||
1160 | tx_queue = priv->tx_queue; | ||
1161 | txbdp = tx_queue->tx_bd_base; | 1359 | txbdp = tx_queue->tx_bd_base; |
1162 | 1360 | ||
1163 | if (!tx_queue->tx_skbuff) | ||
1164 | goto skip_tx_skbuff; | ||
1165 | |||
1166 | for (i = 0; i < tx_queue->tx_ring_size; i++) { | 1361 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
1167 | if (!tx_queue->tx_skbuff[i]) | 1362 | if (!tx_queue->tx_skbuff[i]) |
1168 | continue; | 1363 | continue; |
@@ -1170,7 +1365,8 @@ static void free_skb_resources(struct gfar_private *priv) | |||
1170 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, | 1365 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, |
1171 | txbdp->length, DMA_TO_DEVICE); | 1366 | txbdp->length, DMA_TO_DEVICE); |
1172 | txbdp->lstatus = 0; | 1367 | txbdp->lstatus = 0; |
1173 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) { | 1368 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
1369 | j++) { | ||
1174 | txbdp++; | 1370 | txbdp++; |
1175 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, | 1371 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, |
1176 | txbdp->length, DMA_TO_DEVICE); | 1372 | txbdp->length, DMA_TO_DEVICE); |
@@ -1179,36 +1375,58 @@ static void free_skb_resources(struct gfar_private *priv) | |||
1179 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); | 1375 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
1180 | tx_queue->tx_skbuff[i] = NULL; | 1376 | tx_queue->tx_skbuff[i] = NULL; |
1181 | } | 1377 | } |
1182 | |||
1183 | kfree(tx_queue->tx_skbuff); | 1378 | kfree(tx_queue->tx_skbuff); |
1184 | skip_tx_skbuff: | 1379 | } |
1185 | 1380 | ||
1186 | rx_queue = priv->rx_queue; | 1381 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
1187 | rxbdp = rx_queue->rx_bd_base; | 1382 | { |
1383 | struct rxbd8 *rxbdp; | ||
1384 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | ||
1385 | int i; | ||
1188 | 1386 | ||
1189 | if (!rx_queue->rx_skbuff) | 1387 | rxbdp = rx_queue->rx_bd_base; |
1190 | goto skip_rx_skbuff; | ||
1191 | 1388 | ||
1192 | for (i = 0; i < rx_queue->rx_ring_size; i++) { | 1389 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
1193 | if (rx_queue->rx_skbuff[i]) { | 1390 | if (rx_queue->rx_skbuff[i]) { |
1194 | dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr, | 1391 | dma_unmap_single(&priv->ofdev->dev, |
1195 | priv->rx_buffer_size, | 1392 | rxbdp->bufPtr, priv->rx_buffer_size, |
1196 | DMA_FROM_DEVICE); | 1393 | DMA_FROM_DEVICE); |
1197 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); | 1394 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); |
1198 | rx_queue->rx_skbuff[i] = NULL; | 1395 | rx_queue->rx_skbuff[i] = NULL; |
1199 | } | 1396 | } |
1200 | |||
1201 | rxbdp->lstatus = 0; | 1397 | rxbdp->lstatus = 0; |
1202 | rxbdp->bufPtr = 0; | 1398 | rxbdp->bufPtr = 0; |
1203 | rxbdp++; | 1399 | rxbdp++; |
1204 | } | 1400 | } |
1205 | |||
1206 | kfree(rx_queue->rx_skbuff); | 1401 | kfree(rx_queue->rx_skbuff); |
1207 | skip_rx_skbuff: | 1402 | } |
1208 | 1403 | ||
1209 | dma_free_coherent(dev, sizeof(*txbdp) * tx_queue->tx_ring_size + | 1404 | /* If there are any tx skbs or rx skbs still around, free them. |
1210 | sizeof(*rxbdp) * rx_queue->rx_ring_size, | 1405 | * Then free tx_skbuff and rx_skbuff */ |
1211 | tx_queue->tx_bd_base, tx_queue->tx_bd_dma_base); | 1406 | static void free_skb_resources(struct gfar_private *priv) |
1407 | { | ||
1408 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
1409 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
1410 | int i; | ||
1411 | |||
1412 | /* Go through all the buffer descriptors and free their data buffers */ | ||
1413 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
1414 | tx_queue = priv->tx_queue[i]; | ||
1415 | if(!tx_queue->tx_skbuff) | ||
1416 | free_skb_tx_queue(tx_queue); | ||
1417 | } | ||
1418 | |||
1419 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
1420 | rx_queue = priv->rx_queue[i]; | ||
1421 | if(!rx_queue->rx_skbuff) | ||
1422 | free_skb_rx_queue(rx_queue); | ||
1423 | } | ||
1424 | |||
1425 | dma_free_coherent(&priv->ofdev->dev, | ||
1426 | sizeof(struct txbd8) * priv->total_tx_ring_size + | ||
1427 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | ||
1428 | priv->tx_queue[0]->tx_bd_base, | ||
1429 | priv->tx_queue[0]->tx_bd_dma_base); | ||
1212 | } | 1430 | } |
1213 | 1431 | ||
1214 | void gfar_start(struct net_device *dev) | 1432 | void gfar_start(struct net_device *dev) |
@@ -1233,8 +1451,8 @@ void gfar_start(struct net_device *dev) | |||
1233 | gfar_write(®s->dmactrl, tempval); | 1451 | gfar_write(®s->dmactrl, tempval); |
1234 | 1452 | ||
1235 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | 1453 | /* Clear THLT/RHLT, so that the DMA starts polling now */ |
1236 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT); | 1454 | gfar_write(®s->tstat, priv->gfargrp.tstat); |
1237 | gfar_write(®s->rstat, RSTAT_CLEAR_RHALT); | 1455 | gfar_write(®s->rstat, priv->gfargrp.rstat); |
1238 | 1456 | ||
1239 | /* Unmask the interrupts we look for */ | 1457 | /* Unmask the interrupts we look for */ |
1240 | gfar_write(®s->imask, IMASK_DEFAULT); | 1458 | gfar_write(®s->imask, IMASK_DEFAULT); |
@@ -1329,7 +1547,7 @@ static int gfar_enet_open(struct net_device *dev) | |||
1329 | struct gfar_private *priv = netdev_priv(dev); | 1547 | struct gfar_private *priv = netdev_priv(dev); |
1330 | int err; | 1548 | int err; |
1331 | 1549 | ||
1332 | napi_enable(&priv->rx_queue->napi); | 1550 | napi_enable(&priv->gfargrp.napi); |
1333 | 1551 | ||
1334 | skb_queue_head_init(&priv->rx_recycle); | 1552 | skb_queue_head_init(&priv->rx_recycle); |
1335 | 1553 | ||
@@ -1341,17 +1559,17 @@ static int gfar_enet_open(struct net_device *dev) | |||
1341 | err = init_phy(dev); | 1559 | err = init_phy(dev); |
1342 | 1560 | ||
1343 | if (err) { | 1561 | if (err) { |
1344 | napi_disable(&priv->rx_queue->napi); | 1562 | napi_disable(&priv->gfargrp.napi); |
1345 | return err; | 1563 | return err; |
1346 | } | 1564 | } |
1347 | 1565 | ||
1348 | err = startup_gfar(dev); | 1566 | err = startup_gfar(dev); |
1349 | if (err) { | 1567 | if (err) { |
1350 | napi_disable(&priv->rx_queue->napi); | 1568 | napi_disable(&priv->gfargrp.napi); |
1351 | return err; | 1569 | return err; |
1352 | } | 1570 | } |
1353 | 1571 | ||
1354 | netif_start_queue(dev); | 1572 | netif_tx_start_all_queues(dev); |
1355 | 1573 | ||
1356 | device_set_wakeup_enable(&dev->dev, priv->wol_en); | 1574 | device_set_wakeup_enable(&dev->dev, priv->wol_en); |
1357 | 1575 | ||
@@ -1421,16 +1639,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1421 | { | 1639 | { |
1422 | struct gfar_private *priv = netdev_priv(dev); | 1640 | struct gfar_private *priv = netdev_priv(dev); |
1423 | struct gfar_priv_tx_q *tx_queue = NULL; | 1641 | struct gfar_priv_tx_q *tx_queue = NULL; |
1642 | struct netdev_queue *txq; | ||
1424 | struct gfar __iomem *regs = NULL; | 1643 | struct gfar __iomem *regs = NULL; |
1425 | struct txfcb *fcb = NULL; | 1644 | struct txfcb *fcb = NULL; |
1426 | struct txbd8 *txbdp, *txbdp_start, *base; | 1645 | struct txbd8 *txbdp, *txbdp_start, *base; |
1427 | u32 lstatus; | 1646 | u32 lstatus; |
1428 | int i; | 1647 | int i, rq = 0; |
1429 | u32 bufaddr; | 1648 | u32 bufaddr; |
1430 | unsigned long flags; | 1649 | unsigned long flags; |
1431 | unsigned int nr_frags, length; | 1650 | unsigned int nr_frags, length; |
1432 | 1651 | ||
1433 | tx_queue = priv->tx_queue; | 1652 | |
1653 | rq = skb->queue_mapping; | ||
1654 | tx_queue = priv->tx_queue[rq]; | ||
1655 | txq = netdev_get_tx_queue(dev, rq); | ||
1434 | base = tx_queue->tx_bd_base; | 1656 | base = tx_queue->tx_bd_base; |
1435 | regs = priv->gfargrp.regs; | 1657 | regs = priv->gfargrp.regs; |
1436 | 1658 | ||
@@ -1458,7 +1680,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1458 | /* check if there is space to queue this packet */ | 1680 | /* check if there is space to queue this packet */ |
1459 | if ((nr_frags+1) > tx_queue->num_txbdfree) { | 1681 | if ((nr_frags+1) > tx_queue->num_txbdfree) { |
1460 | /* no space, stop the queue */ | 1682 | /* no space, stop the queue */ |
1461 | netif_stop_queue(dev); | 1683 | netif_tx_stop_queue(txq); |
1462 | dev->stats.tx_fifo_errors++; | 1684 | dev->stats.tx_fifo_errors++; |
1463 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | 1685 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
1464 | return NETDEV_TX_BUSY; | 1686 | return NETDEV_TX_BUSY; |
@@ -1550,13 +1772,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1550 | /* If the next BD still needs to be cleaned up, then the bds | 1772 | /* If the next BD still needs to be cleaned up, then the bds |
1551 | are full. We need to tell the kernel to stop sending us stuff. */ | 1773 | are full. We need to tell the kernel to stop sending us stuff. */ |
1552 | if (!tx_queue->num_txbdfree) { | 1774 | if (!tx_queue->num_txbdfree) { |
1553 | netif_stop_queue(dev); | 1775 | netif_tx_stop_queue(txq); |
1554 | 1776 | ||
1555 | dev->stats.tx_fifo_errors++; | 1777 | dev->stats.tx_fifo_errors++; |
1556 | } | 1778 | } |
1557 | 1779 | ||
1558 | /* Tell the DMA to go go go */ | 1780 | /* Tell the DMA to go go go */ |
1559 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT); | 1781 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
1560 | 1782 | ||
1561 | /* Unlock priv */ | 1783 | /* Unlock priv */ |
1562 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | 1784 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
@@ -1569,7 +1791,7 @@ static int gfar_close(struct net_device *dev) | |||
1569 | { | 1791 | { |
1570 | struct gfar_private *priv = netdev_priv(dev); | 1792 | struct gfar_private *priv = netdev_priv(dev); |
1571 | 1793 | ||
1572 | napi_disable(&priv->rx_queue->napi); | 1794 | napi_disable(&priv->gfargrp.napi); |
1573 | 1795 | ||
1574 | skb_queue_purge(&priv->rx_recycle); | 1796 | skb_queue_purge(&priv->rx_recycle); |
1575 | cancel_work_sync(&priv->reset_task); | 1797 | cancel_work_sync(&priv->reset_task); |
@@ -1579,7 +1801,7 @@ static int gfar_close(struct net_device *dev) | |||
1579 | phy_disconnect(priv->phydev); | 1801 | phy_disconnect(priv->phydev); |
1580 | priv->phydev = NULL; | 1802 | priv->phydev = NULL; |
1581 | 1803 | ||
1582 | netif_stop_queue(dev); | 1804 | netif_tx_stop_all_queues(dev); |
1583 | 1805 | ||
1584 | return 0; | 1806 | return 0; |
1585 | } | 1807 | } |
@@ -1598,14 +1820,13 @@ static void gfar_vlan_rx_register(struct net_device *dev, | |||
1598 | struct vlan_group *grp) | 1820 | struct vlan_group *grp) |
1599 | { | 1821 | { |
1600 | struct gfar_private *priv = netdev_priv(dev); | 1822 | struct gfar_private *priv = netdev_priv(dev); |
1601 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
1602 | struct gfar __iomem *regs = NULL; | 1823 | struct gfar __iomem *regs = NULL; |
1603 | unsigned long flags; | 1824 | unsigned long flags; |
1604 | u32 tempval; | 1825 | u32 tempval; |
1605 | 1826 | ||
1606 | rx_queue = priv->rx_queue; | ||
1607 | regs = priv->gfargrp.regs; | 1827 | regs = priv->gfargrp.regs; |
1608 | spin_lock_irqsave(&rx_queue->rxlock, flags); | 1828 | local_irq_save(flags); |
1829 | lock_rx_qs(priv); | ||
1609 | 1830 | ||
1610 | priv->vlgrp = grp; | 1831 | priv->vlgrp = grp; |
1611 | 1832 | ||
@@ -1639,7 +1860,8 @@ static void gfar_vlan_rx_register(struct net_device *dev, | |||
1639 | 1860 | ||
1640 | gfar_change_mtu(dev, dev->mtu); | 1861 | gfar_change_mtu(dev, dev->mtu); |
1641 | 1862 | ||
1642 | spin_unlock_irqrestore(&rx_queue->rxlock, flags); | 1863 | unlock_rx_qs(priv); |
1864 | local_irq_restore(flags); | ||
1643 | } | 1865 | } |
1644 | 1866 | ||
1645 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) | 1867 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
@@ -1711,10 +1933,10 @@ static void gfar_reset_task(struct work_struct *work) | |||
1711 | struct net_device *dev = priv->ndev; | 1933 | struct net_device *dev = priv->ndev; |
1712 | 1934 | ||
1713 | if (dev->flags & IFF_UP) { | 1935 | if (dev->flags & IFF_UP) { |
1714 | netif_stop_queue(dev); | 1936 | netif_tx_stop_all_queues(dev); |
1715 | stop_gfar(dev); | 1937 | stop_gfar(dev); |
1716 | startup_gfar(dev); | 1938 | startup_gfar(dev); |
1717 | netif_start_queue(dev); | 1939 | netif_tx_start_all_queues(dev); |
1718 | } | 1940 | } |
1719 | 1941 | ||
1720 | netif_tx_schedule_all(dev); | 1942 | netif_tx_schedule_all(dev); |
@@ -1745,7 +1967,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
1745 | int howmany = 0; | 1967 | int howmany = 0; |
1746 | u32 lstatus; | 1968 | u32 lstatus; |
1747 | 1969 | ||
1748 | rx_queue = priv->rx_queue; | 1970 | rx_queue = priv->rx_queue[tx_queue->qindex]; |
1749 | bdp = tx_queue->dirty_tx; | 1971 | bdp = tx_queue->dirty_tx; |
1750 | skb_dirtytx = tx_queue->skb_dirtytx; | 1972 | skb_dirtytx = tx_queue->skb_dirtytx; |
1751 | 1973 | ||
@@ -1798,8 +2020,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
1798 | } | 2020 | } |
1799 | 2021 | ||
1800 | /* If we freed a buffer, we can restart transmission, if necessary */ | 2022 | /* If we freed a buffer, we can restart transmission, if necessary */ |
1801 | if (netif_queue_stopped(dev) && tx_queue->num_txbdfree) | 2023 | if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) |
1802 | netif_wake_queue(dev); | 2024 | netif_wake_subqueue(dev, tx_queue->qindex); |
1803 | 2025 | ||
1804 | /* Update dirty indicators */ | 2026 | /* Update dirty indicators */ |
1805 | tx_queue->skb_dirtytx = skb_dirtytx; | 2027 | tx_queue->skb_dirtytx = skb_dirtytx; |
@@ -1812,19 +2034,12 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
1812 | 2034 | ||
1813 | static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) | 2035 | static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) |
1814 | { | 2036 | { |
1815 | struct gfar_private *priv = gfargrp->priv; | ||
1816 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
1817 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
1818 | unsigned long flags; | 2037 | unsigned long flags; |
1819 | 2038 | ||
1820 | rx_queue = priv->rx_queue; | 2039 | spin_lock_irqsave(&gfargrp->grplock, flags); |
1821 | tx_queue = priv->tx_queue; | 2040 | if (napi_schedule_prep(&gfargrp->napi)) { |
1822 | spin_lock_irqsave(&tx_queue->txlock, flags); | ||
1823 | spin_lock(&rx_queue->rxlock); | ||
1824 | |||
1825 | if (napi_schedule_prep(&rx_queue->napi)) { | ||
1826 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); | 2041 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); |
1827 | __napi_schedule(&rx_queue->napi); | 2042 | __napi_schedule(&gfargrp->napi); |
1828 | } else { | 2043 | } else { |
1829 | /* | 2044 | /* |
1830 | * Clear IEVENT, so interrupts aren't called again | 2045 | * Clear IEVENT, so interrupts aren't called again |
@@ -1832,9 +2047,8 @@ static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) | |||
1832 | */ | 2047 | */ |
1833 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); | 2048 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); |
1834 | } | 2049 | } |
2050 | spin_unlock_irqrestore(&gfargrp->grplock, flags); | ||
1835 | 2051 | ||
1836 | spin_unlock(&rx_queue->rxlock); | ||
1837 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | ||
1838 | } | 2052 | } |
1839 | 2053 | ||
1840 | /* Interrupt Handler for Transmit complete */ | 2054 | /* Interrupt Handler for Transmit complete */ |
@@ -1952,6 +2166,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
1952 | fcb = (struct rxfcb *)skb->data; | 2166 | fcb = (struct rxfcb *)skb->data; |
1953 | 2167 | ||
1954 | /* Remove the FCB from the skb */ | 2168 | /* Remove the FCB from the skb */ |
2169 | skb_set_queue_mapping(skb, fcb->rq); | ||
1955 | /* Remove the padded bytes, if there are any */ | 2170 | /* Remove the padded bytes, if there are any */ |
1956 | if (amount_pull) | 2171 | if (amount_pull) |
1957 | skb_pull(skb, amount_pull); | 2172 | skb_pull(skb, amount_pull); |
@@ -2072,28 +2287,54 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
2072 | 2287 | ||
2073 | static int gfar_poll(struct napi_struct *napi, int budget) | 2288 | static int gfar_poll(struct napi_struct *napi, int budget) |
2074 | { | 2289 | { |
2075 | struct gfar_priv_rx_q *rx_queue = container_of(napi, | 2290 | struct gfar_priv_grp *gfargrp = container_of(napi, |
2076 | struct gfar_priv_rx_q, napi); | 2291 | struct gfar_priv_grp, napi); |
2077 | struct net_device *dev = rx_queue->dev; | 2292 | struct gfar_private *priv = gfargrp->priv; |
2078 | struct gfar_private *priv = netdev_priv(dev); | ||
2079 | struct gfar __iomem *regs = priv->gfargrp.regs; | 2293 | struct gfar __iomem *regs = priv->gfargrp.regs; |
2080 | struct gfar_priv_tx_q *tx_queue = NULL; | 2294 | struct gfar_priv_tx_q *tx_queue = NULL; |
2081 | int tx_cleaned = 0; | 2295 | struct gfar_priv_rx_q *rx_queue = NULL; |
2082 | int rx_cleaned = 0; | 2296 | int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; |
2297 | int tx_cleaned = 0, i, left_over_budget = budget, serviced_queues = 0; | ||
2298 | int num_queues = 0; | ||
2083 | unsigned long flags; | 2299 | unsigned long flags; |
2084 | 2300 | ||
2301 | num_queues = gfargrp->num_rx_queues; | ||
2302 | budget_per_queue = budget/num_queues; | ||
2303 | |||
2085 | /* Clear IEVENT, so interrupts aren't called again | 2304 | /* Clear IEVENT, so interrupts aren't called again |
2086 | * because of the packets that have already arrived */ | 2305 | * because of the packets that have already arrived */ |
2087 | gfar_write(®s->ievent, IEVENT_RTX_MASK); | 2306 | gfar_write(®s->ievent, IEVENT_RTX_MASK); |
2088 | tx_queue = priv->tx_queue; | ||
2089 | 2307 | ||
2090 | /* If we fail to get the lock, don't bother with the TX BDs */ | 2308 | while (num_queues && left_over_budget) { |
2091 | if (spin_trylock_irqsave(&tx_queue->txlock, flags)) { | ||
2092 | tx_cleaned = gfar_clean_tx_ring(tx_queue); | ||
2093 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | ||
2094 | } | ||
2095 | 2309 | ||
2096 | rx_cleaned = gfar_clean_rx_ring(rx_queue, budget); | 2310 | budget_per_queue = left_over_budget/num_queues; |
2311 | left_over_budget = 0; | ||
2312 | |||
2313 | for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { | ||
2314 | if (test_bit(i, &serviced_queues)) | ||
2315 | continue; | ||
2316 | rx_queue = priv->rx_queue[i]; | ||
2317 | tx_queue = priv->tx_queue[rx_queue->qindex]; | ||
2318 | |||
2319 | /* If we fail to get the lock, | ||
2320 | * don't bother with the TX BDs */ | ||
2321 | if (spin_trylock_irqsave(&tx_queue->txlock, flags)) { | ||
2322 | tx_cleaned += gfar_clean_tx_ring(tx_queue); | ||
2323 | spin_unlock_irqrestore(&tx_queue->txlock, | ||
2324 | flags); | ||
2325 | } | ||
2326 | |||
2327 | rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, | ||
2328 | budget_per_queue); | ||
2329 | rx_cleaned += rx_cleaned_per_queue; | ||
2330 | if(rx_cleaned_per_queue < budget_per_queue) { | ||
2331 | left_over_budget = left_over_budget + | ||
2332 | (budget_per_queue - rx_cleaned_per_queue); | ||
2333 | set_bit(i, &serviced_queues); | ||
2334 | num_queues--; | ||
2335 | } | ||
2336 | } | ||
2337 | } | ||
2097 | 2338 | ||
2098 | if (tx_cleaned) | 2339 | if (tx_cleaned) |
2099 | return budget; | 2340 | return budget; |
@@ -2102,7 +2343,7 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
2102 | napi_complete(napi); | 2343 | napi_complete(napi); |
2103 | 2344 | ||
2104 | /* Clear the halt bit in RSTAT */ | 2345 | /* Clear the halt bit in RSTAT */ |
2105 | gfar_write(®s->rstat, RSTAT_CLEAR_RHALT); | 2346 | gfar_write(®s->rstat, gfargrp->rstat); |
2106 | 2347 | ||
2107 | gfar_write(®s->imask, IMASK_DEFAULT); | 2348 | gfar_write(®s->imask, IMASK_DEFAULT); |
2108 | 2349 | ||
@@ -2180,14 +2421,14 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id) | |||
2180 | static void adjust_link(struct net_device *dev) | 2421 | static void adjust_link(struct net_device *dev) |
2181 | { | 2422 | { |
2182 | struct gfar_private *priv = netdev_priv(dev); | 2423 | struct gfar_private *priv = netdev_priv(dev); |
2183 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
2184 | struct gfar __iomem *regs = priv->gfargrp.regs; | 2424 | struct gfar __iomem *regs = priv->gfargrp.regs; |
2185 | unsigned long flags; | 2425 | unsigned long flags; |
2186 | struct phy_device *phydev = priv->phydev; | 2426 | struct phy_device *phydev = priv->phydev; |
2187 | int new_state = 0; | 2427 | int new_state = 0; |
2188 | 2428 | ||
2189 | tx_queue = priv->tx_queue; | 2429 | local_irq_save(flags); |
2190 | spin_lock_irqsave(&tx_queue->txlock, flags); | 2430 | lock_tx_qs(priv); |
2431 | |||
2191 | if (phydev->link) { | 2432 | if (phydev->link) { |
2192 | u32 tempval = gfar_read(®s->maccfg2); | 2433 | u32 tempval = gfar_read(®s->maccfg2); |
2193 | u32 ecntrl = gfar_read(®s->ecntrl); | 2434 | u32 ecntrl = gfar_read(®s->ecntrl); |
@@ -2252,8 +2493,8 @@ static void adjust_link(struct net_device *dev) | |||
2252 | 2493 | ||
2253 | if (new_state && netif_msg_link(priv)) | 2494 | if (new_state && netif_msg_link(priv)) |
2254 | phy_print_status(phydev); | 2495 | phy_print_status(phydev); |
2255 | 2496 | unlock_tx_qs(priv); | |
2256 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | 2497 | local_irq_restore(flags); |
2257 | } | 2498 | } |
2258 | 2499 | ||
2259 | /* Update the hash table based on the current list of multicast | 2500 | /* Update the hash table based on the current list of multicast |
@@ -2457,7 +2698,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id) | |||
2457 | priv->extra_stats.tx_underrun++; | 2698 | priv->extra_stats.tx_underrun++; |
2458 | 2699 | ||
2459 | /* Reactivate the Tx Queues */ | 2700 | /* Reactivate the Tx Queues */ |
2460 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT); | 2701 | gfar_write(®s->tstat, gfargrp->tstat); |
2461 | } | 2702 | } |
2462 | if (netif_msg_tx_err(priv)) | 2703 | if (netif_msg_tx_err(priv)) |
2463 | printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); | 2704 | printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); |