aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/gianfar.c695
-rw-r--r--drivers/net/gianfar.h96
-rw-r--r--drivers/net/gianfar_ethtool.c70
-rw-r--r--drivers/net/gianfar_sysfs.c50
4 files changed, 621 insertions, 290 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index fa0188ea923..aa258e89926 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -143,6 +143,7 @@ void gfar_start(struct net_device *dev);
143static void gfar_clear_exact_match(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev);
144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
146 147
147MODULE_AUTHOR("Freescale Semiconductor, Inc"); 148MODULE_AUTHOR("Freescale Semiconductor, Inc");
148MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@@ -171,71 +172,89 @@ static int gfar_init_bds(struct net_device *ndev)
171 struct gfar_priv_rx_q *rx_queue = NULL; 172 struct gfar_priv_rx_q *rx_queue = NULL;
172 struct txbd8 *txbdp; 173 struct txbd8 *txbdp;
173 struct rxbd8 *rxbdp; 174 struct rxbd8 *rxbdp;
174 int i; 175 int i, j;
175
176 tx_queue = priv->tx_queue;
177 rx_queue = priv->rx_queue;
178 176
179 /* Initialize some variables in our dev structure */ 177 for (i = 0; i < priv->num_tx_queues; i++) {
180 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 178 tx_queue = priv->tx_queue[i];
181 tx_queue->dirty_tx = tx_queue->cur_tx = tx_queue->tx_bd_base; 179 /* Initialize some variables in our dev structure */
182 rx_queue->cur_rx = rx_queue->rx_bd_base; 180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
183 tx_queue->skb_curtx = tx_queue->skb_dirtytx = 0; 181 tx_queue->dirty_tx = tx_queue->tx_bd_base;
184 rx_queue->skb_currx = 0; 182 tx_queue->cur_tx = tx_queue->tx_bd_base;
183 tx_queue->skb_curtx = 0;
184 tx_queue->skb_dirtytx = 0;
185
186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189 txbdp->lstatus = 0;
190 txbdp->bufPtr = 0;
191 txbdp++;
192 }
185 193
186 /* Initialize Transmit Descriptor Ring */ 194 /* Set the last descriptor in the ring to indicate wrap */
187 txbdp = tx_queue->tx_bd_base; 195 txbdp--;
188 for (i = 0; i < tx_queue->tx_ring_size; i++) { 196 txbdp->status |= TXBD_WRAP;
189 txbdp->lstatus = 0;
190 txbdp->bufPtr = 0;
191 txbdp++;
192 } 197 }
193 198
194 /* Set the last descriptor in the ring to indicate wrap */ 199 for (i = 0; i < priv->num_rx_queues; i++) {
195 txbdp--; 200 rx_queue = priv->rx_queue[i];
196 txbdp->status |= TXBD_WRAP; 201 rx_queue->cur_rx = rx_queue->rx_bd_base;
202 rx_queue->skb_currx = 0;
203 rxbdp = rx_queue->rx_bd_base;
197 204
198 rxbdp = rx_queue->rx_bd_base; 205 for (j = 0; j < rx_queue->rx_ring_size; j++) {
199 for (i = 0; i < rx_queue->rx_ring_size; i++) { 206 struct sk_buff *skb = rx_queue->rx_skbuff[j];
200 struct sk_buff *skb = rx_queue->rx_skbuff[i];
201 207
202 if (skb) { 208 if (skb) {
203 gfar_init_rxbdp(rx_queue, rxbdp, rxbdp->bufPtr); 209 gfar_init_rxbdp(rx_queue, rxbdp,
204 } else { 210 rxbdp->bufPtr);
205 skb = gfar_new_skb(ndev); 211 } else {
206 if (!skb) { 212 skb = gfar_new_skb(ndev);
207 pr_err("%s: Can't allocate RX buffers\n", 213 if (!skb) {
208 ndev->name); 214 pr_err("%s: Can't allocate RX buffers\n",
209 return -ENOMEM; 215 ndev->name);
216 goto err_rxalloc_fail;
217 }
218 rx_queue->rx_skbuff[j] = skb;
219
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
210 } 221 }
211 rx_queue->rx_skbuff[i] = skb;
212 222
213 gfar_new_rxbdp(rx_queue, rxbdp, skb); 223 rxbdp++;
214 } 224 }
215 225
216 rxbdp++;
217 } 226 }
218 227
219 return 0; 228 return 0;
229
230err_rxalloc_fail:
231 free_skb_resources(priv);
232 return -ENOMEM;
220} 233}
221 234
222static int gfar_alloc_skb_resources(struct net_device *ndev) 235static int gfar_alloc_skb_resources(struct net_device *ndev)
223{ 236{
224 void *vaddr; 237 void *vaddr;
225 int i; 238 dma_addr_t addr;
239 int i, j, k;
226 struct gfar_private *priv = netdev_priv(ndev); 240 struct gfar_private *priv = netdev_priv(ndev);
227 struct device *dev = &priv->ofdev->dev; 241 struct device *dev = &priv->ofdev->dev;
228 struct gfar_priv_tx_q *tx_queue = NULL; 242 struct gfar_priv_tx_q *tx_queue = NULL;
229 struct gfar_priv_rx_q *rx_queue = NULL; 243 struct gfar_priv_rx_q *rx_queue = NULL;
230 244
231 tx_queue = priv->tx_queue; 245 priv->total_tx_ring_size = 0;
232 rx_queue = priv->rx_queue; 246 for (i = 0; i < priv->num_tx_queues; i++)
247 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248
249 priv->total_rx_ring_size = 0;
250 for (i = 0; i < priv->num_rx_queues; i++)
251 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
233 252
234 /* Allocate memory for the buffer descriptors */ 253 /* Allocate memory for the buffer descriptors */
235 vaddr = dma_alloc_coherent(dev, 254 vaddr = dma_alloc_coherent(dev,
236 sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size + 255 sizeof(struct txbd8) * priv->total_tx_ring_size +
237 sizeof(*rx_queue->rx_bd_base) * rx_queue->rx_ring_size, 256 sizeof(struct rxbd8) * priv->total_rx_ring_size,
238 &tx_queue->tx_bd_dma_base, GFP_KERNEL); 257 &addr, GFP_KERNEL);
239 if (!vaddr) { 258 if (!vaddr) {
240 if (netif_msg_ifup(priv)) 259 if (netif_msg_ifup(priv))
241 pr_err("%s: Could not allocate buffer descriptors!\n", 260 pr_err("%s: Could not allocate buffer descriptors!\n",
@@ -243,38 +262,57 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
243 return -ENOMEM; 262 return -ENOMEM;
244 } 263 }
245 264
246 tx_queue->tx_bd_base = vaddr; 265 for (i = 0; i < priv->num_tx_queues; i++) {
247 tx_queue->dev = ndev; 266 tx_queue = priv->tx_queue[i];
267 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268 tx_queue->tx_bd_dma_base = addr;
269 tx_queue->dev = ndev;
270 /* enet DMA only understands physical addresses */
271 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 }
248 274
249 /* Start the rx descriptor ring where the tx ring leaves off */ 275 /* Start the rx descriptor ring where the tx ring leaves off */
250 vaddr = vaddr + sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size; 276 for (i = 0; i < priv->num_rx_queues; i++) {
251 rx_queue->rx_bd_base = vaddr; 277 rx_queue = priv->rx_queue[i];
252 rx_queue->dev = ndev; 278 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279 rx_queue->rx_bd_dma_base = addr;
280 rx_queue->dev = ndev;
281 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 }
253 284
254 /* Setup the skbuff rings */ 285 /* Setup the skbuff rings */
255 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * 286 for (i = 0; i < priv->num_tx_queues; i++) {
287 tx_queue = priv->tx_queue[i];
288 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
256 tx_queue->tx_ring_size, GFP_KERNEL); 289 tx_queue->tx_ring_size, GFP_KERNEL);
257 if (!tx_queue->tx_skbuff) { 290 if (!tx_queue->tx_skbuff) {
258 if (netif_msg_ifup(priv)) 291 if (netif_msg_ifup(priv))
259 pr_err("%s: Could not allocate tx_skbuff\n", 292 pr_err("%s: Could not allocate tx_skbuff\n",
260 ndev->name); 293 ndev->name);
261 goto cleanup; 294 goto cleanup;
262 } 295 }
263 296
264 for (i = 0; i < tx_queue->tx_ring_size; i++) 297 for (k = 0; k < tx_queue->tx_ring_size; k++)
265 tx_queue->tx_skbuff[i] = NULL; 298 tx_queue->tx_skbuff[k] = NULL;
299 }
266 300
267 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * 301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
268 rx_queue->rx_ring_size, GFP_KERNEL); 304 rx_queue->rx_ring_size, GFP_KERNEL);
269 if (!rx_queue->rx_skbuff) {
270 if (netif_msg_ifup(priv))
271 pr_err("%s: Could not allocate rx_skbuff\n",
272 ndev->name);
273 goto cleanup;
274 }
275 305
276 for (i = 0; i < rx_queue->rx_ring_size; i++) 306 if (!rx_queue->rx_skbuff) {
277 rx_queue->rx_skbuff[i] = NULL; 307 if (netif_msg_ifup(priv))
308 pr_err("%s: Could not allocate rx_skbuff\n",
309 ndev->name);
310 goto cleanup;
311 }
312
313 for (j = 0; j < rx_queue->rx_ring_size; j++)
314 rx_queue->rx_skbuff[j] = NULL;
315 }
278 316
279 if (gfar_init_bds(ndev)) 317 if (gfar_init_bds(ndev))
280 goto cleanup; 318 goto cleanup;
@@ -286,33 +324,47 @@ cleanup:
286 return -ENOMEM; 324 return -ENOMEM;
287} 325}
288 326
327static void gfar_init_tx_rx_base(struct gfar_private *priv)
328{
329 struct gfar __iomem *regs = priv->gfargrp.regs;
330 u32 *baddr;
331 int i;
332
333 baddr = &regs->tbase0;
334 for(i = 0; i < priv->num_tx_queues; i++) {
335 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336 baddr += 2;
337 }
338
339 baddr = &regs->rbase0;
340 for(i = 0; i < priv->num_rx_queues; i++) {
341 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
342 baddr += 2;
343 }
344}
345
289static void gfar_init_mac(struct net_device *ndev) 346static void gfar_init_mac(struct net_device *ndev)
290{ 347{
291 struct gfar_private *priv = netdev_priv(ndev); 348 struct gfar_private *priv = netdev_priv(ndev);
292 struct gfar_priv_tx_q *tx_queue = NULL;
293 struct gfar_priv_rx_q *rx_queue = NULL;
294 struct gfar __iomem *regs = priv->gfargrp.regs; 349 struct gfar __iomem *regs = priv->gfargrp.regs;
295 u32 rctrl = 0; 350 u32 rctrl = 0;
296 u32 tctrl = 0; 351 u32 tctrl = 0;
297 u32 attrs = 0; 352 u32 attrs = 0;
298 353
299 tx_queue = priv->tx_queue; 354 /* write the tx/rx base registers */
300 rx_queue = priv->rx_queue; 355 gfar_init_tx_rx_base(priv);
301
302 /* enet DMA only understands physical addresses */
303 gfar_write(&regs->tbase0, tx_queue->tx_bd_dma_base);
304 gfar_write(&regs->rbase0, tx_queue->tx_bd_dma_base +
305 sizeof(*tx_queue->tx_bd_base) *
306 tx_queue->tx_ring_size);
307 356
308 /* Configure the coalescing support */ 357 /* Configure the coalescing support */
309 gfar_write(&regs->txic, 0); 358 gfar_write(&regs->txic, 0);
310 if (tx_queue->txcoalescing) 359 if (priv->tx_queue[0]->txcoalescing)
311 gfar_write(&regs->txic, tx_queue->txic); 360 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
312 361
313 gfar_write(&regs->rxic, 0); 362 gfar_write(&regs->rxic, 0);
314 if (rx_queue->rxcoalescing) 363 if (priv->rx_queue[0]->rxcoalescing)
315 gfar_write(&regs->rxic, rx_queue->rxic); 364 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
365
366 if (priv->rx_filer_enable)
367 rctrl |= RCTRL_FILREN;
316 368
317 if (priv->rx_csum_enable) 369 if (priv->rx_csum_enable)
318 rctrl |= RCTRL_CHECKSUMMING; 370 rctrl |= RCTRL_CHECKSUMMING;
@@ -341,6 +393,8 @@ static void gfar_init_mac(struct net_device *ndev)
341 if (ndev->features & NETIF_F_IP_CSUM) 393 if (ndev->features & NETIF_F_IP_CSUM)
342 tctrl |= TCTRL_INIT_CSUM; 394 tctrl |= TCTRL_INIT_CSUM;
343 395
396 tctrl |= TCTRL_TXSCHED_PRIO;
397
344 gfar_write(&regs->tctrl, tctrl); 398 gfar_write(&regs->tctrl, tctrl);
345 399
346 /* Set the extraction length and index */ 400 /* Set the extraction length and index */
@@ -374,6 +428,7 @@ static const struct net_device_ops gfar_netdev_ops = {
374 .ndo_set_multicast_list = gfar_set_multi, 428 .ndo_set_multicast_list = gfar_set_multi,
375 .ndo_tx_timeout = gfar_timeout, 429 .ndo_tx_timeout = gfar_timeout,
376 .ndo_do_ioctl = gfar_ioctl, 430 .ndo_do_ioctl = gfar_ioctl,
431 .ndo_select_queue = gfar_select_queue,
377 .ndo_vlan_rx_register = gfar_vlan_rx_register, 432 .ndo_vlan_rx_register = gfar_vlan_rx_register,
378 .ndo_set_mac_address = eth_mac_addr, 433 .ndo_set_mac_address = eth_mac_addr,
379 .ndo_validate_addr = eth_validate_addr, 434 .ndo_validate_addr = eth_validate_addr,
@@ -382,36 +437,131 @@ static const struct net_device_ops gfar_netdev_ops = {
382#endif 437#endif
383}; 438};
384 439
440void lock_rx_qs(struct gfar_private *priv)
441{
442 int i = 0x0;
443
444 for (i = 0; i < priv->num_rx_queues; i++)
445 spin_lock(&priv->rx_queue[i]->rxlock);
446}
447
448void lock_tx_qs(struct gfar_private *priv)
449{
450 int i = 0x0;
451
452 for (i = 0; i < priv->num_tx_queues; i++)
453 spin_lock(&priv->tx_queue[i]->txlock);
454}
455
456void unlock_rx_qs(struct gfar_private *priv)
457{
458 int i = 0x0;
459
460 for (i = 0; i < priv->num_rx_queues; i++)
461 spin_unlock(&priv->rx_queue[i]->rxlock);
462}
463
464void unlock_tx_qs(struct gfar_private *priv)
465{
466 int i = 0x0;
467
468 for (i = 0; i < priv->num_tx_queues; i++)
469 spin_unlock(&priv->tx_queue[i]->txlock);
470}
471
385/* Returns 1 if incoming frames use an FCB */ 472/* Returns 1 if incoming frames use an FCB */
386static inline int gfar_uses_fcb(struct gfar_private *priv) 473static inline int gfar_uses_fcb(struct gfar_private *priv)
387{ 474{
388 return priv->vlgrp || priv->rx_csum_enable; 475 return priv->vlgrp || priv->rx_csum_enable;
389} 476}
390 477
391static int gfar_of_init(struct net_device *dev) 478u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
479{
480 return skb_get_queue_mapping(skb);
481}
482static void free_tx_pointers(struct gfar_private *priv)
483{
484 int i = 0;
485
486 for (i = 0; i < priv->num_tx_queues; i++)
487 kfree(priv->tx_queue[i]);
488}
489
490static void free_rx_pointers(struct gfar_private *priv)
491{
492 int i = 0;
493
494 for (i = 0; i < priv->num_rx_queues; i++)
495 kfree(priv->rx_queue[i]);
496}
497
498static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
392{ 499{
393 const char *model; 500 const char *model;
394 const char *ctype; 501 const char *ctype;
395 const void *mac_addr; 502 const void *mac_addr;
396 u64 addr, size; 503 u64 addr, size;
397 int err = 0; 504 int err = 0, i;
398 struct gfar_private *priv = netdev_priv(dev); 505 struct net_device *dev = NULL;
399 struct device_node *np = priv->node; 506 struct gfar_private *priv = NULL;
507 struct device_node *np = ofdev->node;
400 const u32 *stash; 508 const u32 *stash;
401 const u32 *stash_len; 509 const u32 *stash_len;
402 const u32 *stash_idx; 510 const u32 *stash_idx;
511 unsigned int num_tx_qs, num_rx_qs;
512 u32 *tx_queues, *rx_queues;
403 513
404 if (!np || !of_device_is_available(np)) 514 if (!np || !of_device_is_available(np))
405 return -ENODEV; 515 return -ENODEV;
406 516
517 /* parse the num of tx and rx queues */
518 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
519 num_tx_qs = tx_queues ? *tx_queues : 1;
520
521 if (num_tx_qs > MAX_TX_QS) {
522 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
523 num_tx_qs, MAX_TX_QS);
524 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
525 return -EINVAL;
526 }
527
528 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
529 num_rx_qs = rx_queues ? *rx_queues : 1;
530
531 if (num_rx_qs > MAX_RX_QS) {
532 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
533 num_tx_qs, MAX_TX_QS);
534 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
535 return -EINVAL;
536 }
537
538 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
539 dev = *pdev;
540 if (NULL == dev)
541 return -ENOMEM;
542
543 priv = netdev_priv(dev);
544 priv->node = ofdev->node;
545 priv->ndev = dev;
546
547 dev->num_tx_queues = num_tx_qs;
548 dev->real_num_tx_queues = num_tx_qs;
549 priv->num_tx_queues = num_tx_qs;
550 priv->num_rx_queues = num_rx_qs;
551
407 /* get a pointer to the register memory */ 552 /* get a pointer to the register memory */
408 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 553 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
409 priv->gfargrp.regs = ioremap(addr, size); 554 priv->gfargrp.regs = ioremap(addr, size);
410 555
411 if (priv->gfargrp.regs == NULL) 556 if (priv->gfargrp.regs == NULL) {
412 return -ENOMEM; 557 err = -ENOMEM;
558 goto err_out;
559 }
413 560
414 priv->gfargrp.priv = priv; /* back pointer from group to priv */ 561 priv->gfargrp.priv = priv; /* back pointer from group to priv */
562 priv->gfargrp.rx_bit_map = DEFAULT_MAPPING;
563 priv->gfargrp.tx_bit_map = DEFAULT_MAPPING;
564
415 priv->gfargrp.interruptTransmit = irq_of_parse_and_map(np, 0); 565 priv->gfargrp.interruptTransmit = irq_of_parse_and_map(np, 0);
416 566
417 model = of_get_property(np, "model", NULL); 567 model = of_get_property(np, "model", NULL);
@@ -430,6 +580,38 @@ static int gfar_of_init(struct net_device *dev)
430 } 580 }
431 } 581 }
432 582
583 for (i = 0; i < priv->num_tx_queues; i++)
584 priv->tx_queue[i] = NULL;
585 for (i = 0; i < priv->num_rx_queues; i++)
586 priv->rx_queue[i] = NULL;
587
588 for (i = 0; i < priv->num_tx_queues; i++) {
589 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
590 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
591 if (!priv->tx_queue[i]) {
592 err = -ENOMEM;
593 goto tx_alloc_failed;
594 }
595 priv->tx_queue[i]->tx_skbuff = NULL;
596 priv->tx_queue[i]->qindex = i;
597 priv->tx_queue[i]->dev = dev;
598 spin_lock_init(&(priv->tx_queue[i]->txlock));
599 }
600
601 for (i = 0; i < priv->num_rx_queues; i++) {
602 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
603 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
604 if (!priv->rx_queue[i]) {
605 err = -ENOMEM;
606 goto rx_alloc_failed;
607 }
608 priv->rx_queue[i]->rx_skbuff = NULL;
609 priv->rx_queue[i]->qindex = i;
610 priv->rx_queue[i]->dev = dev;
611 spin_lock_init(&(priv->rx_queue[i]->rxlock));
612 }
613
614
433 stash = of_get_property(np, "bd-stash", NULL); 615 stash = of_get_property(np, "bd-stash", NULL);
434 616
435 if (stash) { 617 if (stash) {
@@ -490,8 +672,13 @@ static int gfar_of_init(struct net_device *dev)
490 672
491 return 0; 673 return 0;
492 674
675rx_alloc_failed:
676 free_rx_pointers(priv);
677tx_alloc_failed:
678 free_tx_pointers(priv);
493err_out: 679err_out:
494 iounmap(priv->gfargrp.regs); 680 iounmap(priv->gfargrp.regs);
681 free_netdev(dev);
495 return err; 682 return err;
496} 683}
497 684
@@ -509,6 +696,17 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
509 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 696 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
510} 697}
511 698
699static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
700{
701 unsigned int new_bit_map = 0x0;
702 int mask = 0x1 << (max_qs - 1), i;
703 for (i = 0; i < max_qs; i++) {
704 if (bit_map & mask)
705 new_bit_map = new_bit_map + (1 << i);
706 mask = mask >> 0x1;
707 }
708 return new_bit_map;
709}
512/* Set up the ethernet device structure, private data, 710/* Set up the ethernet device structure, private data,
513 * and anything else we need before we start */ 711 * and anything else we need before we start */
514static int gfar_probe(struct of_device *ofdev, 712static int gfar_probe(struct of_device *ofdev,
@@ -518,14 +716,14 @@ static int gfar_probe(struct of_device *ofdev,
518 struct net_device *dev = NULL; 716 struct net_device *dev = NULL;
519 struct gfar_private *priv = NULL; 717 struct gfar_private *priv = NULL;
520 struct gfar __iomem *regs = NULL; 718 struct gfar __iomem *regs = NULL;
521 int err = 0; 719 int err = 0, i;
522 int len_devname; 720 int len_devname;
721 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
523 722
524 /* Create an ethernet device instance */ 723 err = gfar_of_init(ofdev, &dev);
525 dev = alloc_etherdev(sizeof (*priv));
526 724
527 if (NULL == dev) 725 if (err)
528 return -ENOMEM; 726 return err;
529 727
530 priv = netdev_priv(dev); 728 priv = netdev_priv(dev);
531 priv->ndev = dev; 729 priv->ndev = dev;
@@ -533,23 +731,6 @@ static int gfar_probe(struct of_device *ofdev,
533 priv->node = ofdev->node; 731 priv->node = ofdev->node;
534 SET_NETDEV_DEV(dev, &ofdev->dev); 732 SET_NETDEV_DEV(dev, &ofdev->dev);
535 733
536 err = gfar_of_init(dev);
537
538 if (err)
539 goto regs_fail;
540
541 priv->tx_queue = (struct gfar_priv_tx_q *)kmalloc(
542 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
543 if (!priv->tx_queue)
544 goto regs_fail;
545
546 priv->rx_queue = (struct gfar_priv_rx_q *)kmalloc(
547 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
548 if (!priv->rx_queue)
549 goto rx_queue_fail;
550
551 spin_lock_init(&priv->tx_queue->txlock);
552 spin_lock_init(&priv->rx_queue->rxlock);
553 spin_lock_init(&priv->gfargrp.grplock); 734 spin_lock_init(&priv->gfargrp.grplock);
554 spin_lock_init(&priv->bflock); 735 spin_lock_init(&priv->bflock);
555 INIT_WORK(&priv->reset_task, gfar_reset_task); 736 INIT_WORK(&priv->reset_task, gfar_reset_task);
@@ -587,8 +768,8 @@ static int gfar_probe(struct of_device *ofdev,
587 dev->netdev_ops = &gfar_netdev_ops; 768 dev->netdev_ops = &gfar_netdev_ops;
588 dev->ethtool_ops = &gfar_ethtool_ops; 769 dev->ethtool_ops = &gfar_ethtool_ops;
589 770
590 /* Register for napi ...NAPI is for each rx_queue */ 771 /* Register for napi ...We are registering NAPI for each grp */
591 netif_napi_add(dev, &priv->rx_queue->napi, gfar_poll, GFAR_DEV_WEIGHT); 772 netif_napi_add(dev, &priv->gfargrp.napi, gfar_poll, GFAR_DEV_WEIGHT);
592 773
593 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 774 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
594 priv->rx_csum_enable = 1; 775 priv->rx_csum_enable = 1;
@@ -644,17 +825,44 @@ static int gfar_probe(struct of_device *ofdev,
644 if (dev->features & NETIF_F_IP_CSUM) 825 if (dev->features & NETIF_F_IP_CSUM)
645 dev->hard_header_len += GMAC_FCB_LEN; 826 dev->hard_header_len += GMAC_FCB_LEN;
646 827
828 /* Need to reverse the bit maps as bit_map's MSB is q0
829 * but, for_each_bit parses from right to left, which
830 * basically reverses the queue numbers */
831 priv->gfargrp.tx_bit_map = reverse_bitmap(priv->gfargrp.tx_bit_map, MAX_TX_QS);
832 priv->gfargrp.rx_bit_map = reverse_bitmap(priv->gfargrp.rx_bit_map, MAX_RX_QS);
833
834 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values */
835 for_each_bit(i, &priv->gfargrp.rx_bit_map, priv->num_rx_queues) {
836 priv->gfargrp.num_rx_queues++;
837 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
838 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
839 }
840 for_each_bit (i, &priv->gfargrp.tx_bit_map, priv->num_tx_queues) {
841 priv->gfargrp.num_tx_queues++;
842 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
843 tqueue = tqueue | (TQUEUE_EN0 >> i);
844 }
845 priv->gfargrp.rstat = rstat;
846 priv->gfargrp.tstat = tstat;
847
848 gfar_write(&regs->rqueue, rqueue);
849 gfar_write(&regs->tqueue, tqueue);
850
647 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 851 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
648 852
649 /* Initializing some of the rx/tx queue level parameters */ 853 /* Initializing some of the rx/tx queue level parameters */
650 priv->tx_queue->tx_ring_size = DEFAULT_TX_RING_SIZE; 854 for (i = 0; i < priv->num_tx_queues; i++) {
651 priv->tx_queue->num_txbdfree = DEFAULT_TX_RING_SIZE; 855 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
652 priv->tx_queue->txcoalescing = DEFAULT_TX_COALESCE; 856 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
653 priv->tx_queue->txic = DEFAULT_TXIC; 857 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
858 priv->tx_queue[i]->txic = DEFAULT_TXIC;
859 }
654 860
655 priv->rx_queue->rx_ring_size = DEFAULT_RX_RING_SIZE; 861 for (i = 0; i < priv->num_rx_queues; i++) {
656 priv->rx_queue->rxcoalescing = DEFAULT_RX_COALESCE; 862 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
657 priv->rx_queue->rxic = DEFAULT_RXIC; 863 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
864 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
865 }
658 866
659 /* Enable most messages by default */ 867 /* Enable most messages by default */
660 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 868 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -699,17 +907,19 @@ static int gfar_probe(struct of_device *ofdev,
699 /* Even more device info helps when determining which kernel */ 907 /* Even more device info helps when determining which kernel */
700 /* provided which set of benchmarks. */ 908 /* provided which set of benchmarks. */
701 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 909 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
702 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 910 for (i = 0; i < priv->num_rx_queues; i++)
703 dev->name, priv->rx_queue->rx_ring_size, priv->tx_queue->tx_ring_size); 911 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
912 dev->name, i, priv->rx_queue[i]->rx_ring_size);
913 for(i = 0; i < priv->num_tx_queues; i++)
914 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
915 dev->name, i, priv->tx_queue[i]->tx_ring_size);
704 916
705 return 0; 917 return 0;
706 918
707register_fail: 919register_fail:
708 iounmap(priv->gfargrp.regs); 920 iounmap(priv->gfargrp.regs);
709 kfree(priv->rx_queue); 921 free_tx_pointers(priv);
710rx_queue_fail: 922 free_rx_pointers(priv);
711 kfree(priv->tx_queue);
712regs_fail:
713 if (priv->phy_node) 923 if (priv->phy_node)
714 of_node_put(priv->phy_node); 924 of_node_put(priv->phy_node);
715 if (priv->tbi_node) 925 if (priv->tbi_node)
@@ -742,8 +952,6 @@ static int gfar_suspend(struct device *dev)
742{ 952{
743 struct gfar_private *priv = dev_get_drvdata(dev); 953 struct gfar_private *priv = dev_get_drvdata(dev);
744 struct net_device *ndev = priv->ndev; 954 struct net_device *ndev = priv->ndev;
745 struct gfar_priv_tx_q *tx_queue = NULL;
746 struct gfar_priv_rx_q *rx_queue = NULL;
747 struct gfar __iomem *regs = NULL; 955 struct gfar __iomem *regs = NULL;
748 unsigned long flags; 956 unsigned long flags;
749 u32 tempval; 957 u32 tempval;
@@ -752,13 +960,13 @@ static int gfar_suspend(struct device *dev)
752 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 960 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
753 961
754 netif_device_detach(ndev); 962 netif_device_detach(ndev);
755 tx_queue = priv->tx_queue;
756 rx_queue = priv->rx_queue;
757 regs = priv->gfargrp.regs; 963 regs = priv->gfargrp.regs;
758 964
759 if (netif_running(ndev)) { 965 if (netif_running(ndev)) {
760 spin_lock_irqsave(&tx_queue->txlock, flags); 966
761 spin_lock(&rx_queue->rxlock); 967 local_irq_save(flags);
968 lock_tx_qs(priv);
969 lock_rx_qs(priv);
762 970
763 gfar_halt_nodisable(ndev); 971 gfar_halt_nodisable(ndev);
764 972
@@ -772,10 +980,11 @@ static int gfar_suspend(struct device *dev)
772 980
773 gfar_write(&regs->maccfg1, tempval); 981 gfar_write(&regs->maccfg1, tempval);
774 982
775 spin_unlock(&rx_queue->rxlock); 983 unlock_rx_qs(priv);
776 spin_unlock_irqrestore(&tx_queue->txlock, flags); 984 unlock_tx_qs(priv);
985 local_irq_restore(flags);
777 986
778 napi_disable(&rx_queue->napi); 987 napi_disable(&priv->gfargrp.napi);
779 988
780 if (magic_packet) { 989 if (magic_packet) {
781 /* Enable interrupt on Magic Packet */ 990 /* Enable interrupt on Magic Packet */
@@ -797,8 +1006,6 @@ static int gfar_resume(struct device *dev)
797{ 1006{
798 struct gfar_private *priv = dev_get_drvdata(dev); 1007 struct gfar_private *priv = dev_get_drvdata(dev);
799 struct net_device *ndev = priv->ndev; 1008 struct net_device *ndev = priv->ndev;
800 struct gfar_priv_tx_q *tx_queue = NULL;
801 struct gfar_priv_rx_q *rx_queue = NULL;
802 struct gfar __iomem *regs = NULL; 1009 struct gfar __iomem *regs = NULL;
803 unsigned long flags; 1010 unsigned long flags;
804 u32 tempval; 1011 u32 tempval;
@@ -816,12 +1023,11 @@ static int gfar_resume(struct device *dev)
816 /* Disable Magic Packet mode, in case something 1023 /* Disable Magic Packet mode, in case something
817 * else woke us up. 1024 * else woke us up.
818 */ 1025 */
819 rx_queue = priv->rx_queue;
820 tx_queue = priv->tx_queue;
821 regs = priv->gfargrp.regs; 1026 regs = priv->gfargrp.regs;
822 1027
823 spin_lock_irqsave(&tx_queue->txlock, flags); 1028 local_irq_save(flags);
824 spin_lock(&rx_queue->rxlock); 1029 lock_tx_qs(priv);
1030 lock_rx_qs(priv);
825 1031
826 tempval = gfar_read(&regs->maccfg2); 1032 tempval = gfar_read(&regs->maccfg2);
827 tempval &= ~MACCFG2_MPEN; 1033 tempval &= ~MACCFG2_MPEN;
@@ -829,12 +1035,13 @@ static int gfar_resume(struct device *dev)
829 1035
830 gfar_start(ndev); 1036 gfar_start(ndev);
831 1037
832 spin_unlock(&rx_queue->rxlock); 1038 unlock_rx_qs(priv);
833 spin_unlock_irqrestore(&tx_queue->txlock, flags); 1039 unlock_tx_qs(priv);
1040 local_irq_restore(flags);
834 1041
835 netif_device_attach(ndev); 1042 netif_device_attach(ndev);
836 1043
837 napi_enable(&rx_queue->napi); 1044 napi_enable(&priv->gfargrp.napi);
838 1045
839 return 0; 1046 return 0;
840} 1047}
@@ -861,7 +1068,7 @@ static int gfar_restore(struct device *dev)
861 phy_start(priv->phydev); 1068 phy_start(priv->phydev);
862 1069
863 netif_device_attach(ndev); 1070 netif_device_attach(ndev);
864 napi_enable(&priv->napi); 1071 napi_enable(&priv->gfargrp.napi);
865 1072
866 return 0; 1073 return 0;
867} 1074}
@@ -1115,23 +1322,21 @@ void gfar_halt(struct net_device *dev)
1115void stop_gfar(struct net_device *dev) 1322void stop_gfar(struct net_device *dev)
1116{ 1323{
1117 struct gfar_private *priv = netdev_priv(dev); 1324 struct gfar_private *priv = netdev_priv(dev);
1118 struct gfar_priv_tx_q *tx_queue = NULL;
1119 struct gfar_priv_rx_q *rx_queue = NULL;
1120 unsigned long flags; 1325 unsigned long flags;
1121 1326
1122 phy_stop(priv->phydev); 1327 phy_stop(priv->phydev);
1123 1328
1124 tx_queue = priv->tx_queue;
1125 rx_queue = priv->rx_queue;
1126 1329
1127 /* Lock it down */ 1330 /* Lock it down */
1128 spin_lock_irqsave(&tx_queue->txlock, flags); 1331 local_irq_save(flags);
1129 spin_lock(&rx_queue->rxlock); 1332 lock_tx_qs(priv);
1333 lock_rx_qs(priv);
1130 1334
1131 gfar_halt(dev); 1335 gfar_halt(dev);
1132 1336
1133 spin_unlock(&rx_queue->rxlock); 1337 unlock_rx_qs(priv);
1134 spin_unlock_irqrestore(&tx_queue->txlock, flags); 1338 unlock_tx_qs(priv);
1339 local_irq_restore(flags);
1135 1340
1136 /* Free the IRQs */ 1341 /* Free the IRQs */
1137 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1342 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -1145,24 +1350,14 @@ void stop_gfar(struct net_device *dev)
1145 free_skb_resources(priv); 1350 free_skb_resources(priv);
1146} 1351}
1147 1352
1148/* If there are any tx skbs or rx skbs still around, free them. 1353static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1149 * Then free tx_skbuff and rx_skbuff */
1150static void free_skb_resources(struct gfar_private *priv)
1151{ 1354{
1152 struct device *dev = &priv->ofdev->dev;
1153 struct rxbd8 *rxbdp;
1154 struct txbd8 *txbdp; 1355 struct txbd8 *txbdp;
1155 struct gfar_priv_tx_q *tx_queue = NULL; 1356 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1156 struct gfar_priv_rx_q *rx_queue = NULL;
1157 int i, j; 1357 int i, j;
1158 1358
1159 /* Go through all the buffer descriptors and free their data buffers */
1160 tx_queue = priv->tx_queue;
1161 txbdp = tx_queue->tx_bd_base; 1359 txbdp = tx_queue->tx_bd_base;
1162 1360
1163 if (!tx_queue->tx_skbuff)
1164 goto skip_tx_skbuff;
1165
1166 for (i = 0; i < tx_queue->tx_ring_size; i++) { 1361 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1167 if (!tx_queue->tx_skbuff[i]) 1362 if (!tx_queue->tx_skbuff[i])
1168 continue; 1363 continue;
@@ -1170,7 +1365,8 @@ static void free_skb_resources(struct gfar_private *priv)
1170 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1365 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1171 txbdp->length, DMA_TO_DEVICE); 1366 txbdp->length, DMA_TO_DEVICE);
1172 txbdp->lstatus = 0; 1367 txbdp->lstatus = 0;
1173 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) { 1368 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1369 j++) {
1174 txbdp++; 1370 txbdp++;
1175 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1371 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1176 txbdp->length, DMA_TO_DEVICE); 1372 txbdp->length, DMA_TO_DEVICE);
@@ -1179,36 +1375,58 @@ static void free_skb_resources(struct gfar_private *priv)
1179 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1375 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1180 tx_queue->tx_skbuff[i] = NULL; 1376 tx_queue->tx_skbuff[i] = NULL;
1181 } 1377 }
1182
1183 kfree(tx_queue->tx_skbuff); 1378 kfree(tx_queue->tx_skbuff);
1184skip_tx_skbuff: 1379}
1185 1380
1186 rx_queue = priv->rx_queue; 1381static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1187 rxbdp = rx_queue->rx_bd_base; 1382{
1383 struct rxbd8 *rxbdp;
1384 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1385 int i;
1188 1386
1189 if (!rx_queue->rx_skbuff) 1387 rxbdp = rx_queue->rx_bd_base;
1190 goto skip_rx_skbuff;
1191 1388
1192 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1389 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1193 if (rx_queue->rx_skbuff[i]) { 1390 if (rx_queue->rx_skbuff[i]) {
1194 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr, 1391 dma_unmap_single(&priv->ofdev->dev,
1195 priv->rx_buffer_size, 1392 rxbdp->bufPtr, priv->rx_buffer_size,
1196 DMA_FROM_DEVICE); 1393 DMA_FROM_DEVICE);
1197 dev_kfree_skb_any(rx_queue->rx_skbuff[i]); 1394 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1198 rx_queue->rx_skbuff[i] = NULL; 1395 rx_queue->rx_skbuff[i] = NULL;
1199 } 1396 }
1200
1201 rxbdp->lstatus = 0; 1397 rxbdp->lstatus = 0;
1202 rxbdp->bufPtr = 0; 1398 rxbdp->bufPtr = 0;
1203 rxbdp++; 1399 rxbdp++;
1204 } 1400 }
1205
1206 kfree(rx_queue->rx_skbuff); 1401 kfree(rx_queue->rx_skbuff);
1207skip_rx_skbuff: 1402}
1208 1403
1209 dma_free_coherent(dev, sizeof(*txbdp) * tx_queue->tx_ring_size + 1404/* If there are any tx skbs or rx skbs still around, free them.
1210 sizeof(*rxbdp) * rx_queue->rx_ring_size, 1405 * Then free tx_skbuff and rx_skbuff */
1211 tx_queue->tx_bd_base, tx_queue->tx_bd_dma_base); 1406static void free_skb_resources(struct gfar_private *priv)
1407{
1408 struct gfar_priv_tx_q *tx_queue = NULL;
1409 struct gfar_priv_rx_q *rx_queue = NULL;
1410 int i;
1411
1412 /* Go through all the buffer descriptors and free their data buffers */
1413 for (i = 0; i < priv->num_tx_queues; i++) {
1414 tx_queue = priv->tx_queue[i];
1415 if(!tx_queue->tx_skbuff)
1416 free_skb_tx_queue(tx_queue);
1417 }
1418
1419 for (i = 0; i < priv->num_rx_queues; i++) {
1420 rx_queue = priv->rx_queue[i];
1421 if(!rx_queue->rx_skbuff)
1422 free_skb_rx_queue(rx_queue);
1423 }
1424
1425 dma_free_coherent(&priv->ofdev->dev,
1426 sizeof(struct txbd8) * priv->total_tx_ring_size +
1427 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1428 priv->tx_queue[0]->tx_bd_base,
1429 priv->tx_queue[0]->tx_bd_dma_base);
1212} 1430}
1213 1431
1214void gfar_start(struct net_device *dev) 1432void gfar_start(struct net_device *dev)
@@ -1233,8 +1451,8 @@ void gfar_start(struct net_device *dev)
1233 gfar_write(&regs->dmactrl, tempval); 1451 gfar_write(&regs->dmactrl, tempval);
1234 1452
1235 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1453 /* Clear THLT/RHLT, so that the DMA starts polling now */
1236 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 1454 gfar_write(&regs->tstat, priv->gfargrp.tstat);
1237 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 1455 gfar_write(&regs->rstat, priv->gfargrp.rstat);
1238 1456
1239 /* Unmask the interrupts we look for */ 1457 /* Unmask the interrupts we look for */
1240 gfar_write(&regs->imask, IMASK_DEFAULT); 1458 gfar_write(&regs->imask, IMASK_DEFAULT);
@@ -1329,7 +1547,7 @@ static int gfar_enet_open(struct net_device *dev)
1329 struct gfar_private *priv = netdev_priv(dev); 1547 struct gfar_private *priv = netdev_priv(dev);
1330 int err; 1548 int err;
1331 1549
1332 napi_enable(&priv->rx_queue->napi); 1550 napi_enable(&priv->gfargrp.napi);
1333 1551
1334 skb_queue_head_init(&priv->rx_recycle); 1552 skb_queue_head_init(&priv->rx_recycle);
1335 1553
@@ -1341,17 +1559,17 @@ static int gfar_enet_open(struct net_device *dev)
1341 err = init_phy(dev); 1559 err = init_phy(dev);
1342 1560
1343 if (err) { 1561 if (err) {
1344 napi_disable(&priv->rx_queue->napi); 1562 napi_disable(&priv->gfargrp.napi);
1345 return err; 1563 return err;
1346 } 1564 }
1347 1565
1348 err = startup_gfar(dev); 1566 err = startup_gfar(dev);
1349 if (err) { 1567 if (err) {
1350 napi_disable(&priv->rx_queue->napi); 1568 napi_disable(&priv->gfargrp.napi);
1351 return err; 1569 return err;
1352 } 1570 }
1353 1571
1354 netif_start_queue(dev); 1572 netif_tx_start_all_queues(dev);
1355 1573
1356 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1574 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1357 1575
@@ -1421,16 +1639,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1421{ 1639{
1422 struct gfar_private *priv = netdev_priv(dev); 1640 struct gfar_private *priv = netdev_priv(dev);
1423 struct gfar_priv_tx_q *tx_queue = NULL; 1641 struct gfar_priv_tx_q *tx_queue = NULL;
1642 struct netdev_queue *txq;
1424 struct gfar __iomem *regs = NULL; 1643 struct gfar __iomem *regs = NULL;
1425 struct txfcb *fcb = NULL; 1644 struct txfcb *fcb = NULL;
1426 struct txbd8 *txbdp, *txbdp_start, *base; 1645 struct txbd8 *txbdp, *txbdp_start, *base;
1427 u32 lstatus; 1646 u32 lstatus;
1428 int i; 1647 int i, rq = 0;
1429 u32 bufaddr; 1648 u32 bufaddr;
1430 unsigned long flags; 1649 unsigned long flags;
1431 unsigned int nr_frags, length; 1650 unsigned int nr_frags, length;
1432 1651
1433 tx_queue = priv->tx_queue; 1652
1653 rq = skb->queue_mapping;
1654 tx_queue = priv->tx_queue[rq];
1655 txq = netdev_get_tx_queue(dev, rq);
1434 base = tx_queue->tx_bd_base; 1656 base = tx_queue->tx_bd_base;
1435 regs = priv->gfargrp.regs; 1657 regs = priv->gfargrp.regs;
1436 1658
@@ -1458,7 +1680,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1458 /* check if there is space to queue this packet */ 1680 /* check if there is space to queue this packet */
1459 if ((nr_frags+1) > tx_queue->num_txbdfree) { 1681 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1460 /* no space, stop the queue */ 1682 /* no space, stop the queue */
1461 netif_stop_queue(dev); 1683 netif_tx_stop_queue(txq);
1462 dev->stats.tx_fifo_errors++; 1684 dev->stats.tx_fifo_errors++;
1463 spin_unlock_irqrestore(&tx_queue->txlock, flags); 1685 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1464 return NETDEV_TX_BUSY; 1686 return NETDEV_TX_BUSY;
@@ -1550,13 +1772,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1550 /* If the next BD still needs to be cleaned up, then the bds 1772 /* If the next BD still needs to be cleaned up, then the bds
1551 are full. We need to tell the kernel to stop sending us stuff. */ 1773 are full. We need to tell the kernel to stop sending us stuff. */
1552 if (!tx_queue->num_txbdfree) { 1774 if (!tx_queue->num_txbdfree) {
1553 netif_stop_queue(dev); 1775 netif_tx_stop_queue(txq);
1554 1776
1555 dev->stats.tx_fifo_errors++; 1777 dev->stats.tx_fifo_errors++;
1556 } 1778 }
1557 1779
1558 /* Tell the DMA to go go go */ 1780 /* Tell the DMA to go go go */
1559 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 1781 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1560 1782
1561 /* Unlock priv */ 1783 /* Unlock priv */
1562 spin_unlock_irqrestore(&tx_queue->txlock, flags); 1784 spin_unlock_irqrestore(&tx_queue->txlock, flags);
@@ -1569,7 +1791,7 @@ static int gfar_close(struct net_device *dev)
1569{ 1791{
1570 struct gfar_private *priv = netdev_priv(dev); 1792 struct gfar_private *priv = netdev_priv(dev);
1571 1793
1572 napi_disable(&priv->rx_queue->napi); 1794 napi_disable(&priv->gfargrp.napi);
1573 1795
1574 skb_queue_purge(&priv->rx_recycle); 1796 skb_queue_purge(&priv->rx_recycle);
1575 cancel_work_sync(&priv->reset_task); 1797 cancel_work_sync(&priv->reset_task);
@@ -1579,7 +1801,7 @@ static int gfar_close(struct net_device *dev)
1579 phy_disconnect(priv->phydev); 1801 phy_disconnect(priv->phydev);
1580 priv->phydev = NULL; 1802 priv->phydev = NULL;
1581 1803
1582 netif_stop_queue(dev); 1804 netif_tx_stop_all_queues(dev);
1583 1805
1584 return 0; 1806 return 0;
1585} 1807}
@@ -1598,14 +1820,13 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1598 struct vlan_group *grp) 1820 struct vlan_group *grp)
1599{ 1821{
1600 struct gfar_private *priv = netdev_priv(dev); 1822 struct gfar_private *priv = netdev_priv(dev);
1601 struct gfar_priv_rx_q *rx_queue = NULL;
1602 struct gfar __iomem *regs = NULL; 1823 struct gfar __iomem *regs = NULL;
1603 unsigned long flags; 1824 unsigned long flags;
1604 u32 tempval; 1825 u32 tempval;
1605 1826
1606 rx_queue = priv->rx_queue;
1607 regs = priv->gfargrp.regs; 1827 regs = priv->gfargrp.regs;
1608 spin_lock_irqsave(&rx_queue->rxlock, flags); 1828 local_irq_save(flags);
1829 lock_rx_qs(priv);
1609 1830
1610 priv->vlgrp = grp; 1831 priv->vlgrp = grp;
1611 1832
@@ -1639,7 +1860,8 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1639 1860
1640 gfar_change_mtu(dev, dev->mtu); 1861 gfar_change_mtu(dev, dev->mtu);
1641 1862
1642 spin_unlock_irqrestore(&rx_queue->rxlock, flags); 1863 unlock_rx_qs(priv);
1864 local_irq_restore(flags);
1643} 1865}
1644 1866
1645static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1867static int gfar_change_mtu(struct net_device *dev, int new_mtu)
@@ -1711,10 +1933,10 @@ static void gfar_reset_task(struct work_struct *work)
1711 struct net_device *dev = priv->ndev; 1933 struct net_device *dev = priv->ndev;
1712 1934
1713 if (dev->flags & IFF_UP) { 1935 if (dev->flags & IFF_UP) {
1714 netif_stop_queue(dev); 1936 netif_tx_stop_all_queues(dev);
1715 stop_gfar(dev); 1937 stop_gfar(dev);
1716 startup_gfar(dev); 1938 startup_gfar(dev);
1717 netif_start_queue(dev); 1939 netif_tx_start_all_queues(dev);
1718 } 1940 }
1719 1941
1720 netif_tx_schedule_all(dev); 1942 netif_tx_schedule_all(dev);
@@ -1745,7 +1967,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1745 int howmany = 0; 1967 int howmany = 0;
1746 u32 lstatus; 1968 u32 lstatus;
1747 1969
1748 rx_queue = priv->rx_queue; 1970 rx_queue = priv->rx_queue[tx_queue->qindex];
1749 bdp = tx_queue->dirty_tx; 1971 bdp = tx_queue->dirty_tx;
1750 skb_dirtytx = tx_queue->skb_dirtytx; 1972 skb_dirtytx = tx_queue->skb_dirtytx;
1751 1973
@@ -1798,8 +2020,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1798 } 2020 }
1799 2021
1800 /* If we freed a buffer, we can restart transmission, if necessary */ 2022 /* If we freed a buffer, we can restart transmission, if necessary */
1801 if (netif_queue_stopped(dev) && tx_queue->num_txbdfree) 2023 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
1802 netif_wake_queue(dev); 2024 netif_wake_subqueue(dev, tx_queue->qindex);
1803 2025
1804 /* Update dirty indicators */ 2026 /* Update dirty indicators */
1805 tx_queue->skb_dirtytx = skb_dirtytx; 2027 tx_queue->skb_dirtytx = skb_dirtytx;
@@ -1812,19 +2034,12 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1812 2034
1813static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) 2035static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
1814{ 2036{
1815 struct gfar_private *priv = gfargrp->priv;
1816 struct gfar_priv_tx_q *tx_queue = NULL;
1817 struct gfar_priv_rx_q *rx_queue = NULL;
1818 unsigned long flags; 2037 unsigned long flags;
1819 2038
1820 rx_queue = priv->rx_queue; 2039 spin_lock_irqsave(&gfargrp->grplock, flags);
1821 tx_queue = priv->tx_queue; 2040 if (napi_schedule_prep(&gfargrp->napi)) {
1822 spin_lock_irqsave(&tx_queue->txlock, flags);
1823 spin_lock(&rx_queue->rxlock);
1824
1825 if (napi_schedule_prep(&rx_queue->napi)) {
1826 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); 2041 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
1827 __napi_schedule(&rx_queue->napi); 2042 __napi_schedule(&gfargrp->napi);
1828 } else { 2043 } else {
1829 /* 2044 /*
1830 * Clear IEVENT, so interrupts aren't called again 2045 * Clear IEVENT, so interrupts aren't called again
@@ -1832,9 +2047,8 @@ static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
1832 */ 2047 */
1833 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); 2048 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
1834 } 2049 }
2050 spin_unlock_irqrestore(&gfargrp->grplock, flags);
1835 2051
1836 spin_unlock(&rx_queue->rxlock);
1837 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1838} 2052}
1839 2053
1840/* Interrupt Handler for Transmit complete */ 2054/* Interrupt Handler for Transmit complete */
@@ -1952,6 +2166,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1952 fcb = (struct rxfcb *)skb->data; 2166 fcb = (struct rxfcb *)skb->data;
1953 2167
1954 /* Remove the FCB from the skb */ 2168 /* Remove the FCB from the skb */
2169 skb_set_queue_mapping(skb, fcb->rq);
1955 /* Remove the padded bytes, if there are any */ 2170 /* Remove the padded bytes, if there are any */
1956 if (amount_pull) 2171 if (amount_pull)
1957 skb_pull(skb, amount_pull); 2172 skb_pull(skb, amount_pull);
@@ -2072,28 +2287,54 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2072 2287
2073static int gfar_poll(struct napi_struct *napi, int budget) 2288static int gfar_poll(struct napi_struct *napi, int budget)
2074{ 2289{
2075 struct gfar_priv_rx_q *rx_queue = container_of(napi, 2290 struct gfar_priv_grp *gfargrp = container_of(napi,
2076 struct gfar_priv_rx_q, napi); 2291 struct gfar_priv_grp, napi);
2077 struct net_device *dev = rx_queue->dev; 2292 struct gfar_private *priv = gfargrp->priv;
2078 struct gfar_private *priv = netdev_priv(dev);
2079 struct gfar __iomem *regs = priv->gfargrp.regs; 2293 struct gfar __iomem *regs = priv->gfargrp.regs;
2080 struct gfar_priv_tx_q *tx_queue = NULL; 2294 struct gfar_priv_tx_q *tx_queue = NULL;
2081 int tx_cleaned = 0; 2295 struct gfar_priv_rx_q *rx_queue = NULL;
2082 int rx_cleaned = 0; 2296 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2297 int tx_cleaned = 0, i, left_over_budget = budget, serviced_queues = 0;
2298 int num_queues = 0;
2083 unsigned long flags; 2299 unsigned long flags;
2084 2300
2301 num_queues = gfargrp->num_rx_queues;
2302 budget_per_queue = budget/num_queues;
2303
2085 /* Clear IEVENT, so interrupts aren't called again 2304 /* Clear IEVENT, so interrupts aren't called again
2086 * because of the packets that have already arrived */ 2305 * because of the packets that have already arrived */
2087 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2306 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2088 tx_queue = priv->tx_queue;
2089 2307
2090 /* If we fail to get the lock, don't bother with the TX BDs */ 2308 while (num_queues && left_over_budget) {
2091 if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
2092 tx_cleaned = gfar_clean_tx_ring(tx_queue);
2093 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2094 }
2095 2309
2096 rx_cleaned = gfar_clean_rx_ring(rx_queue, budget); 2310 budget_per_queue = left_over_budget/num_queues;
2311 left_over_budget = 0;
2312
2313 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2314 if (test_bit(i, &serviced_queues))
2315 continue;
2316 rx_queue = priv->rx_queue[i];
2317 tx_queue = priv->tx_queue[rx_queue->qindex];
2318
2319 /* If we fail to get the lock,
2320 * don't bother with the TX BDs */
2321 if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
2322 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2323 spin_unlock_irqrestore(&tx_queue->txlock,
2324 flags);
2325 }
2326
2327 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2328 budget_per_queue);
2329 rx_cleaned += rx_cleaned_per_queue;
2330 if(rx_cleaned_per_queue < budget_per_queue) {
2331 left_over_budget = left_over_budget +
2332 (budget_per_queue - rx_cleaned_per_queue);
2333 set_bit(i, &serviced_queues);
2334 num_queues--;
2335 }
2336 }
2337 }
2097 2338
2098 if (tx_cleaned) 2339 if (tx_cleaned)
2099 return budget; 2340 return budget;
@@ -2102,7 +2343,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2102 napi_complete(napi); 2343 napi_complete(napi);
2103 2344
2104 /* Clear the halt bit in RSTAT */ 2345 /* Clear the halt bit in RSTAT */
2105 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 2346 gfar_write(&regs->rstat, gfargrp->rstat);
2106 2347
2107 gfar_write(&regs->imask, IMASK_DEFAULT); 2348 gfar_write(&regs->imask, IMASK_DEFAULT);
2108 2349
@@ -2180,14 +2421,14 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2180static void adjust_link(struct net_device *dev) 2421static void adjust_link(struct net_device *dev)
2181{ 2422{
2182 struct gfar_private *priv = netdev_priv(dev); 2423 struct gfar_private *priv = netdev_priv(dev);
2183 struct gfar_priv_tx_q *tx_queue = NULL;
2184 struct gfar __iomem *regs = priv->gfargrp.regs; 2424 struct gfar __iomem *regs = priv->gfargrp.regs;
2185 unsigned long flags; 2425 unsigned long flags;
2186 struct phy_device *phydev = priv->phydev; 2426 struct phy_device *phydev = priv->phydev;
2187 int new_state = 0; 2427 int new_state = 0;
2188 2428
2189 tx_queue = priv->tx_queue; 2429 local_irq_save(flags);
2190 spin_lock_irqsave(&tx_queue->txlock, flags); 2430 lock_tx_qs(priv);
2431
2191 if (phydev->link) { 2432 if (phydev->link) {
2192 u32 tempval = gfar_read(&regs->maccfg2); 2433 u32 tempval = gfar_read(&regs->maccfg2);
2193 u32 ecntrl = gfar_read(&regs->ecntrl); 2434 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2252,8 +2493,8 @@ static void adjust_link(struct net_device *dev)
2252 2493
2253 if (new_state && netif_msg_link(priv)) 2494 if (new_state && netif_msg_link(priv))
2254 phy_print_status(phydev); 2495 phy_print_status(phydev);
2255 2496 unlock_tx_qs(priv);
2256 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2497 local_irq_restore(flags);
2257} 2498}
2258 2499
2259/* Update the hash table based on the current list of multicast 2500/* Update the hash table based on the current list of multicast
@@ -2457,7 +2698,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
2457 priv->extra_stats.tx_underrun++; 2698 priv->extra_stats.tx_underrun++;
2458 2699
2459 /* Reactivate the Tx Queues */ 2700 /* Reactivate the Tx Queues */
2460 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 2701 gfar_write(&regs->tstat, gfargrp->tstat);
2461 } 2702 }
2462 if (netif_msg_tx_err(priv)) 2703 if (netif_msg_tx_err(priv))
2463 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 2704 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 79e8471584e..5ae769df1d8 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -75,6 +75,10 @@
75extern const char gfar_driver_name[]; 75extern const char gfar_driver_name[];
76extern const char gfar_driver_version[]; 76extern const char gfar_driver_version[];
77 77
78/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
79#define MAX_TX_QS 0x8
80#define MAX_RX_QS 0x8
81
78/* These need to be powers of 2 for this driver */ 82/* These need to be powers of 2 for this driver */
79#define DEFAULT_TX_RING_SIZE 256 83#define DEFAULT_TX_RING_SIZE 256
80#define DEFAULT_RX_RING_SIZE 256 84#define DEFAULT_RX_RING_SIZE 256
@@ -172,12 +176,63 @@ extern const char gfar_driver_version[];
172 176
173#define MINFLR_INIT_SETTINGS 0x00000040 177#define MINFLR_INIT_SETTINGS 0x00000040
174 178
179/* Tqueue control */
180#define TQUEUE_EN0 0x00008000
181#define TQUEUE_EN1 0x00004000
182#define TQUEUE_EN2 0x00002000
183#define TQUEUE_EN3 0x00001000
184#define TQUEUE_EN4 0x00000800
185#define TQUEUE_EN5 0x00000400
186#define TQUEUE_EN6 0x00000200
187#define TQUEUE_EN7 0x00000100
188#define TQUEUE_EN_ALL 0x0000FF00
189
190#define TR03WT_WT0_MASK 0xFF000000
191#define TR03WT_WT1_MASK 0x00FF0000
192#define TR03WT_WT2_MASK 0x0000FF00
193#define TR03WT_WT3_MASK 0x000000FF
194
195#define TR47WT_WT4_MASK 0xFF000000
196#define TR47WT_WT5_MASK 0x00FF0000
197#define TR47WT_WT6_MASK 0x0000FF00
198#define TR47WT_WT7_MASK 0x000000FF
199
200/* Rqueue control */
201#define RQUEUE_EX0 0x00800000
202#define RQUEUE_EX1 0x00400000
203#define RQUEUE_EX2 0x00200000
204#define RQUEUE_EX3 0x00100000
205#define RQUEUE_EX4 0x00080000
206#define RQUEUE_EX5 0x00040000
207#define RQUEUE_EX6 0x00020000
208#define RQUEUE_EX7 0x00010000
209#define RQUEUE_EX_ALL 0x00FF0000
210
211#define RQUEUE_EN0 0x00000080
212#define RQUEUE_EN1 0x00000040
213#define RQUEUE_EN2 0x00000020
214#define RQUEUE_EN3 0x00000010
215#define RQUEUE_EN4 0x00000008
216#define RQUEUE_EN5 0x00000004
217#define RQUEUE_EN6 0x00000002
218#define RQUEUE_EN7 0x00000001
219#define RQUEUE_EN_ALL 0x000000FF
220
175/* Init to do tx snooping for buffers and descriptors */ 221/* Init to do tx snooping for buffers and descriptors */
176#define DMACTRL_INIT_SETTINGS 0x000000c3 222#define DMACTRL_INIT_SETTINGS 0x000000c3
177#define DMACTRL_GRS 0x00000010 223#define DMACTRL_GRS 0x00000010
178#define DMACTRL_GTS 0x00000008 224#define DMACTRL_GTS 0x00000008
179 225
180#define TSTAT_CLEAR_THALT 0x80000000 226#define TSTAT_CLEAR_THALT_ALL 0xFF000000
227#define TSTAT_CLEAR_THALT 0x80000000
228#define TSTAT_CLEAR_THALT0 0x80000000
229#define TSTAT_CLEAR_THALT1 0x40000000
230#define TSTAT_CLEAR_THALT2 0x20000000
231#define TSTAT_CLEAR_THALT3 0x10000000
232#define TSTAT_CLEAR_THALT4 0x08000000
233#define TSTAT_CLEAR_THALT5 0x04000000
234#define TSTAT_CLEAR_THALT6 0x02000000
235#define TSTAT_CLEAR_THALT7 0x01000000
181 236
182/* Interrupt coalescing macros */ 237/* Interrupt coalescing macros */
183#define IC_ICEN 0x80000000 238#define IC_ICEN 0x80000000
@@ -228,6 +283,13 @@ extern const char gfar_driver_version[];
228#define TCTRL_IPCSEN 0x00004000 283#define TCTRL_IPCSEN 0x00004000
229#define TCTRL_TUCSEN 0x00002000 284#define TCTRL_TUCSEN 0x00002000
230#define TCTRL_VLINS 0x00001000 285#define TCTRL_VLINS 0x00001000
286#define TCTRL_THDF 0x00000800
287#define TCTRL_RFCPAUSE 0x00000010
288#define TCTRL_TFCPAUSE 0x00000008
289#define TCTRL_TXSCHED_MASK 0x00000006
290#define TCTRL_TXSCHED_INIT 0x00000000
291#define TCTRL_TXSCHED_PRIO 0x00000002
292#define TCTRL_TXSCHED_WRRS 0x00000004
231#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN) 293#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
232 294
233#define IEVENT_INIT_CLEAR 0xffffffff 295#define IEVENT_INIT_CLEAR 0xffffffff
@@ -700,6 +762,8 @@ struct gfar {
700#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 762#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
701#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 763#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
702 764
765#define DEFAULT_MAPPING 0xFF
766
703/** 767/**
704 * struct gfar_priv_tx_q - per tx queue structure 768 * struct gfar_priv_tx_q - per tx queue structure
705 * @txlock: per queue tx spin lock 769 * @txlock: per queue tx spin lock
@@ -743,7 +807,6 @@ struct gfar_priv_tx_q {
743/** 807/**
744 * struct gfar_priv_rx_q - per rx queue structure 808 * struct gfar_priv_rx_q - per rx queue structure
745 * @rxlock: per queue rx spin lock 809 * @rxlock: per queue rx spin lock
746 * @napi: the napi poll function
747 * @rx_skbuff: skb pointers 810 * @rx_skbuff: skb pointers
748 * @skb_currx: currently use skb pointer 811 * @skb_currx: currently use skb pointer
749 * @rx_bd_base: First rx buffer descriptor 812 * @rx_bd_base: First rx buffer descriptor
@@ -757,8 +820,8 @@ struct gfar_priv_tx_q {
757 820
758struct gfar_priv_rx_q { 821struct gfar_priv_rx_q {
759 spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES))); 822 spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
760 struct napi_struct napi;
761 struct sk_buff ** rx_skbuff; 823 struct sk_buff ** rx_skbuff;
824 dma_addr_t rx_bd_dma_base;
762 struct rxbd8 *rx_bd_base; 825 struct rxbd8 *rx_bd_base;
763 struct rxbd8 *cur_rx; 826 struct rxbd8 *cur_rx;
764 struct net_device *dev; 827 struct net_device *dev;
@@ -772,6 +835,7 @@ struct gfar_priv_rx_q {
772 835
773/** 836/**
774 * struct gfar_priv_grp - per group structure 837 * struct gfar_priv_grp - per group structure
838 * @napi: the napi poll function
775 * @priv: back pointer to the priv structure 839 * @priv: back pointer to the priv structure
776 * @regs: the ioremapped register space for this group 840 * @regs: the ioremapped register space for this group
777 * @grp_id: group id for this group 841 * @grp_id: group id for this group
@@ -785,8 +849,17 @@ struct gfar_priv_rx_q {
785 849
786struct gfar_priv_grp { 850struct gfar_priv_grp {
787 spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES))); 851 spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
852 struct napi_struct napi;
788 struct gfar_private *priv; 853 struct gfar_private *priv;
789 struct gfar __iomem *regs; 854 struct gfar __iomem *regs;
855 unsigned int rx_bit_map;
856 unsigned int tx_bit_map;
857 unsigned int num_tx_queues;
858 unsigned int num_rx_queues;
859 unsigned int rstat;
860 unsigned int tstat;
861 unsigned int imask;
862 unsigned int ievent;
790 unsigned int interruptTransmit; 863 unsigned int interruptTransmit;
791 unsigned int interruptReceive; 864 unsigned int interruptReceive;
792 unsigned int interruptError; 865 unsigned int interruptError;
@@ -807,13 +880,21 @@ struct gfar_priv_grp {
807 */ 880 */
808struct gfar_private { 881struct gfar_private {
809 882
883 /* Indicates how many tx, rx queues are enabled */
884 unsigned int num_tx_queues;
885 unsigned int num_rx_queues;
886
887 /* The total tx and rx ring size for the enabled queues */
888 unsigned int total_tx_ring_size;
889 unsigned int total_rx_ring_size;
890
810 struct device_node *node; 891 struct device_node *node;
811 struct net_device *ndev; 892 struct net_device *ndev;
812 struct of_device *ofdev; 893 struct of_device *ofdev;
813 894
814 struct gfar_priv_grp gfargrp; 895 struct gfar_priv_grp gfargrp;
815 struct gfar_priv_tx_q *tx_queue; 896 struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
816 struct gfar_priv_rx_q *rx_queue; 897 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
817 898
818 /* RX per device parameters */ 899 /* RX per device parameters */
819 unsigned int rx_buffer_size; 900 unsigned int rx_buffer_size;
@@ -844,6 +925,7 @@ struct gfar_private {
844 unsigned char rx_csum_enable:1, 925 unsigned char rx_csum_enable:1,
845 extended_hash:1, 926 extended_hash:1,
846 bd_stash_en:1, 927 bd_stash_en:1,
928 rx_filer_enable:1,
847 wol_en:1; /* Wake-on-LAN enabled */ 929 wol_en:1; /* Wake-on-LAN enabled */
848 unsigned short padding; 930 unsigned short padding;
849 931
@@ -874,6 +956,10 @@ static inline void gfar_write(volatile unsigned __iomem *addr, u32 val)
874 out_be32(addr, val); 956 out_be32(addr, val);
875} 957}
876 958
959extern void lock_rx_qs(struct gfar_private *priv);
960extern void lock_tx_qs(struct gfar_private *priv);
961extern void unlock_rx_qs(struct gfar_private *priv);
962extern void unlock_tx_qs(struct gfar_private *priv);
877extern irqreturn_t gfar_receive(int irq, void *dev_id); 963extern irqreturn_t gfar_receive(int irq, void *dev_id);
878extern int startup_gfar(struct net_device *dev); 964extern int startup_gfar(struct net_device *dev);
879extern void stop_gfar(struct net_device *dev); 965extern void stop_gfar(struct net_device *dev);
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index c681b414767..d3d26234f19 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -204,9 +204,11 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
204 204
205 if (NULL == phydev) 205 if (NULL == phydev)
206 return -ENODEV; 206 return -ENODEV;
207 tx_queue = priv->tx_queue; 207 tx_queue = priv->tx_queue[0];
208 rx_queue = priv->rx_queue; 208 rx_queue = priv->rx_queue[0];
209 209
210 /* etsec-1.7 and older versions have only one txic
211 * and rxic regs although they support multiple queues */
210 cmd->maxtxpkt = get_icft_value(tx_queue->txic); 212 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
211 cmd->maxrxpkt = get_icft_value(rx_queue->rxic); 213 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
212 214
@@ -298,8 +300,8 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
298 if (NULL == priv->phydev) 300 if (NULL == priv->phydev)
299 return -ENODEV; 301 return -ENODEV;
300 302
301 rx_queue = priv->rx_queue; 303 rx_queue = priv->rx_queue[0];
302 tx_queue = priv->tx_queue; 304 tx_queue = priv->tx_queue[0];
303 305
304 rxtime = get_ictt_value(rx_queue->rxic); 306 rxtime = get_ictt_value(rx_queue->rxic);
305 rxcount = get_icft_value(rx_queue->rxic); 307 rxcount = get_icft_value(rx_queue->rxic);
@@ -357,8 +359,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
357 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 359 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
358 return -EOPNOTSUPP; 360 return -EOPNOTSUPP;
359 361
360 tx_queue = priv->tx_queue; 362 tx_queue = priv->tx_queue[0];
361 rx_queue = priv->rx_queue; 363 rx_queue = priv->rx_queue[0];
362 364
363 /* Set up rx coalescing */ 365 /* Set up rx coalescing */
364 if ((cvals->rx_coalesce_usecs == 0) || 366 if ((cvals->rx_coalesce_usecs == 0) ||
@@ -429,8 +431,8 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
429 struct gfar_priv_tx_q *tx_queue = NULL; 431 struct gfar_priv_tx_q *tx_queue = NULL;
430 struct gfar_priv_rx_q *rx_queue = NULL; 432 struct gfar_priv_rx_q *rx_queue = NULL;
431 433
432 tx_queue = priv->tx_queue; 434 tx_queue = priv->tx_queue[0];
433 rx_queue = priv->rx_queue; 435 rx_queue = priv->rx_queue[0];
434 436
435 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; 437 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
436 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; 438 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
@@ -453,9 +455,7 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
453static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 455static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
454{ 456{
455 struct gfar_private *priv = netdev_priv(dev); 457 struct gfar_private *priv = netdev_priv(dev);
456 struct gfar_priv_tx_q *tx_queue = NULL; 458 int err = 0, i = 0;
457 struct gfar_priv_rx_q *rx_queue = NULL;
458 int err = 0;
459 459
460 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 460 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
461 return -EINVAL; 461 return -EINVAL;
@@ -475,37 +475,41 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
475 return -EINVAL; 475 return -EINVAL;
476 } 476 }
477 477
478 tx_queue = priv->tx_queue;
479 rx_queue = priv->rx_queue;
480 478
481 if (dev->flags & IFF_UP) { 479 if (dev->flags & IFF_UP) {
482 unsigned long flags; 480 unsigned long flags;
483 481
484 /* Halt TX and RX, and process the frames which 482 /* Halt TX and RX, and process the frames which
485 * have already been received */ 483 * have already been received */
486 spin_lock_irqsave(&tx_queue->txlock, flags); 484 local_irq_save(flags);
487 spin_lock(&rx_queue->rxlock); 485 lock_tx_qs(priv);
486 lock_rx_qs(priv);
488 487
489 gfar_halt(dev); 488 gfar_halt(dev);
490 489
491 spin_unlock(&rx_queue->rxlock); 490 unlock_rx_qs(priv);
492 spin_unlock_irqrestore(&tx_queue->txlock, flags); 491 unlock_tx_qs(priv);
492 local_irq_restore(flags);
493 493
494 gfar_clean_rx_ring(rx_queue, rx_queue->rx_ring_size); 494 for (i = 0; i < priv->num_rx_queues; i++)
495 gfar_clean_rx_ring(priv->rx_queue[i],
496 priv->rx_queue[i]->rx_ring_size);
495 497
496 /* Now we take down the rings to rebuild them */ 498 /* Now we take down the rings to rebuild them */
497 stop_gfar(dev); 499 stop_gfar(dev);
498 } 500 }
499 501
500 /* Change the size */ 502 /* Change the size */
501 rx_queue->rx_ring_size = rvals->rx_pending; 503 for (i = 0; i < priv->num_rx_queues; i++) {
502 tx_queue->tx_ring_size = rvals->tx_pending; 504 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
503 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 505 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
506 priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
507 }
504 508
505 /* Rebuild the rings with the new size */ 509 /* Rebuild the rings with the new size */
506 if (dev->flags & IFF_UP) { 510 if (dev->flags & IFF_UP) {
507 err = startup_gfar(dev); 511 err = startup_gfar(dev);
508 netif_wake_queue(dev); 512 netif_tx_wake_all_queues(dev);
509 } 513 }
510 return err; 514 return err;
511} 515}
@@ -513,29 +517,29 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
513static int gfar_set_rx_csum(struct net_device *dev, uint32_t data) 517static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
514{ 518{
515 struct gfar_private *priv = netdev_priv(dev); 519 struct gfar_private *priv = netdev_priv(dev);
516 struct gfar_priv_rx_q *rx_queue = NULL;
517 struct gfar_priv_tx_q *tx_queue = NULL;
518 unsigned long flags; 520 unsigned long flags;
519 int err = 0; 521 int err = 0, i = 0;
520 522
521 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 523 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
522 return -EOPNOTSUPP; 524 return -EOPNOTSUPP;
523 525
524 tx_queue = priv->tx_queue;
525 rx_queue = priv->rx_queue;
526 526
527 if (dev->flags & IFF_UP) { 527 if (dev->flags & IFF_UP) {
528 /* Halt TX and RX, and process the frames which 528 /* Halt TX and RX, and process the frames which
529 * have already been received */ 529 * have already been received */
530 spin_lock_irqsave(&tx_queue->txlock, flags); 530 local_irq_save(flags);
531 spin_lock(&rx_queue->rxlock); 531 lock_tx_qs(priv);
532 lock_rx_qs(priv);
532 533
533 gfar_halt(dev); 534 gfar_halt(dev);
534 535
535 spin_unlock(&rx_queue->rxlock); 536 unlock_tx_qs(priv);
536 spin_unlock_irqrestore(&tx_queue->txlock, flags); 537 unlock_rx_qs(priv);
538 local_irq_save(flags);
537 539
538 gfar_clean_rx_ring(rx_queue, rx_queue->rx_ring_size); 540 for (i = 0; i < priv->num_rx_queues; i++)
541 gfar_clean_rx_ring(priv->rx_queue[i],
542 priv->rx_queue[i]->rx_ring_size);
539 543
540 /* Now we take down the rings to rebuild them */ 544 /* Now we take down the rings to rebuild them */
541 stop_gfar(dev); 545 stop_gfar(dev);
@@ -547,7 +551,7 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
547 551
548 if (dev->flags & IFF_UP) { 552 if (dev->flags & IFF_UP) {
549 err = startup_gfar(dev); 553 err = startup_gfar(dev);
550 netif_wake_queue(dev); 554 netif_tx_wake_all_queues(dev);
551 } 555 }
552 return err; 556 return err;
553} 557}
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index adea11ea403..4b726f61314 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -51,7 +51,6 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
51{ 51{
52 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 52 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
53 struct gfar __iomem *regs = priv->gfargrp.regs; 53 struct gfar __iomem *regs = priv->gfargrp.regs;
54 struct gfar_priv_rx_q *rx_queue = NULL;
55 int new_setting = 0; 54 int new_setting = 0;
56 u32 temp; 55 u32 temp;
57 unsigned long flags; 56 unsigned long flags;
@@ -59,7 +58,6 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
59 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING)) 58 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
60 return count; 59 return count;
61 60
62 rx_queue = priv->rx_queue;
63 61
64 /* Find out the new setting */ 62 /* Find out the new setting */
65 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) 63 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
@@ -70,7 +68,9 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
70 else 68 else
71 return count; 69 return count;
72 70
73 spin_lock_irqsave(&rx_queue->rxlock, flags); 71
72 local_irq_save(flags);
73 lock_rx_qs(priv);
74 74
75 /* Set the new stashing value */ 75 /* Set the new stashing value */
76 priv->bd_stash_en = new_setting; 76 priv->bd_stash_en = new_setting;
@@ -84,7 +84,8 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
84 84
85 gfar_write(&regs->attr, temp); 85 gfar_write(&regs->attr, temp);
86 86
87 spin_unlock_irqrestore(&rx_queue->rxlock, flags); 87 unlock_rx_qs(priv);
88 local_irq_restore(flags);
88 89
89 return count; 90 return count;
90} 91}
@@ -105,7 +106,6 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
105{ 106{
106 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 107 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
107 struct gfar __iomem *regs = priv->gfargrp.regs; 108 struct gfar __iomem *regs = priv->gfargrp.regs;
108 struct gfar_priv_rx_q *rx_queue = NULL;
109 unsigned int length = simple_strtoul(buf, NULL, 0); 109 unsigned int length = simple_strtoul(buf, NULL, 0);
110 u32 temp; 110 u32 temp;
111 unsigned long flags; 111 unsigned long flags;
@@ -113,9 +113,9 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
113 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) 113 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
114 return count; 114 return count;
115 115
116 rx_queue = priv->rx_queue; 116 local_irq_save(flags);
117 lock_rx_qs(priv);
117 118
118 spin_lock_irqsave(&rx_queue->rxlock, flags);
119 if (length > priv->rx_buffer_size) 119 if (length > priv->rx_buffer_size)
120 goto out; 120 goto out;
121 121
@@ -140,7 +140,8 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
140 gfar_write(&regs->attr, temp); 140 gfar_write(&regs->attr, temp);
141 141
142out: 142out:
143 spin_unlock_irqrestore(&rx_queue->rxlock, flags); 143 unlock_rx_qs(priv);
144 local_irq_restore(flags);
144 145
145 return count; 146 return count;
146} 147}
@@ -164,7 +165,6 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
164{ 165{
165 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 166 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
166 struct gfar __iomem *regs = priv->gfargrp.regs; 167 struct gfar __iomem *regs = priv->gfargrp.regs;
167 struct gfar_priv_rx_q *rx_queue = NULL;
168 unsigned short index = simple_strtoul(buf, NULL, 0); 168 unsigned short index = simple_strtoul(buf, NULL, 0);
169 u32 temp; 169 u32 temp;
170 unsigned long flags; 170 unsigned long flags;
@@ -172,9 +172,9 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
172 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) 172 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
173 return count; 173 return count;
174 174
175 rx_queue = priv->rx_queue; 175 local_irq_save(flags);
176 lock_rx_qs(priv);
176 177
177 spin_lock_irqsave(&rx_queue->rxlock, flags);
178 if (index > priv->rx_stash_size) 178 if (index > priv->rx_stash_size)
179 goto out; 179 goto out;
180 180
@@ -189,7 +189,8 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
189 gfar_write(&regs->attreli, flags); 189 gfar_write(&regs->attreli, flags);
190 190
191out: 191out:
192 spin_unlock_irqrestore(&rx_queue->rxlock, flags); 192 unlock_rx_qs(priv);
193 local_irq_restore(flags);
193 194
194 return count; 195 return count;
195} 196}
@@ -212,7 +213,6 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
212{ 213{
213 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 214 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
214 struct gfar __iomem *regs = priv->gfargrp.regs; 215 struct gfar __iomem *regs = priv->gfargrp.regs;
215 struct gfar_priv_tx_q *tx_queue = NULL;
216 unsigned int length = simple_strtoul(buf, NULL, 0); 216 unsigned int length = simple_strtoul(buf, NULL, 0);
217 u32 temp; 217 u32 temp;
218 unsigned long flags; 218 unsigned long flags;
@@ -220,9 +220,8 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
220 if (length > GFAR_MAX_FIFO_THRESHOLD) 220 if (length > GFAR_MAX_FIFO_THRESHOLD)
221 return count; 221 return count;
222 222
223 tx_queue = priv->tx_queue; 223 local_irq_save(flags);
224 224 lock_tx_qs(priv);
225 spin_lock_irqsave(&tx_queue->txlock, flags);
226 225
227 priv->fifo_threshold = length; 226 priv->fifo_threshold = length;
228 227
@@ -231,7 +230,8 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
231 temp |= length; 230 temp |= length;
232 gfar_write(&regs->fifo_tx_thr, temp); 231 gfar_write(&regs->fifo_tx_thr, temp);
233 232
234 spin_unlock_irqrestore(&tx_queue->txlock, flags); 233 unlock_tx_qs(priv);
234 local_irq_restore(flags);
235 235
236 return count; 236 return count;
237} 237}
@@ -253,7 +253,6 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
253{ 253{
254 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 254 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
255 struct gfar __iomem *regs = priv->gfargrp.regs; 255 struct gfar __iomem *regs = priv->gfargrp.regs;
256 struct gfar_priv_tx_q *tx_queue = NULL;
257 unsigned int num = simple_strtoul(buf, NULL, 0); 256 unsigned int num = simple_strtoul(buf, NULL, 0);
258 u32 temp; 257 u32 temp;
259 unsigned long flags; 258 unsigned long flags;
@@ -261,8 +260,8 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
261 if (num > GFAR_MAX_FIFO_STARVE) 260 if (num > GFAR_MAX_FIFO_STARVE)
262 return count; 261 return count;
263 262
264 tx_queue = priv->tx_queue; 263 local_irq_save(flags);
265 spin_lock_irqsave(&tx_queue->txlock, flags); 264 lock_tx_qs(priv);
266 265
267 priv->fifo_starve = num; 266 priv->fifo_starve = num;
268 267
@@ -271,7 +270,8 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
271 temp |= num; 270 temp |= num;
272 gfar_write(&regs->fifo_tx_starve, temp); 271 gfar_write(&regs->fifo_tx_starve, temp);
273 272
274 spin_unlock_irqrestore(&tx_queue->txlock, flags); 273 unlock_tx_qs(priv);
274 local_irq_restore(flags);
275 275
276 return count; 276 return count;
277} 277}
@@ -294,7 +294,6 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
294{ 294{
295 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 295 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
296 struct gfar __iomem *regs = priv->gfargrp.regs; 296 struct gfar __iomem *regs = priv->gfargrp.regs;
297 struct gfar_priv_tx_q *tx_queue = NULL;
298 unsigned int num = simple_strtoul(buf, NULL, 0); 297 unsigned int num = simple_strtoul(buf, NULL, 0);
299 u32 temp; 298 u32 temp;
300 unsigned long flags; 299 unsigned long flags;
@@ -302,8 +301,8 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
302 if (num > GFAR_MAX_FIFO_STARVE_OFF) 301 if (num > GFAR_MAX_FIFO_STARVE_OFF)
303 return count; 302 return count;
304 303
305 tx_queue = priv->tx_queue; 304 local_irq_save(flags);
306 spin_lock_irqsave(&tx_queue->txlock, flags); 305 lock_tx_qs(priv);
307 306
308 priv->fifo_starve_off = num; 307 priv->fifo_starve_off = num;
309 308
@@ -312,7 +311,8 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
312 temp |= num; 311 temp |= num;
313 gfar_write(&regs->fifo_tx_starve_shutoff, temp); 312 gfar_write(&regs->fifo_tx_starve_shutoff, temp);
314 313
315 spin_unlock_irqrestore(&tx_queue->txlock, flags); 314 unlock_tx_qs(priv);
315 local_irq_restore(flags);
316 316
317 return count; 317 return count;
318} 318}