aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/gianfar.c
diff options
context:
space:
mode:
authorSandeep Gopalpet <Sandeep.Kumar@freescale.com>2009-11-02 02:03:00 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-03 02:40:55 -0500
commita12f801d4b349bc57622584e70e45a4ccbef53b6 (patch)
tree1b081795127d9e47aa5bac516fededa736dfc394 /drivers/net/gianfar.c
parent123b43e9716115302a0095e14f2c545811712715 (diff)
gianfar: Add per queue structure support
This patch introduces per tx and per rx queue structures. Earlier the members of these structures were inside the gfar_private structure. Moving forward if we want to support multiple queues, we need to refactor the gfar_private structure so that introduction of multiple queues is easier. Signed-off-by: Sandeep Gopalpet <Sandeep.Kumar@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r--drivers/net/gianfar.c384
1 files changed, 228 insertions, 156 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index f7141865869d..354b2b5936ea 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -8,9 +8,10 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala 10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc. 14 * Copyright 2007 MontaVista Software, Inc.
14 * 15 *
15 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
@@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work);
109static void gfar_timeout(struct net_device *dev); 110static void gfar_timeout(struct net_device *dev);
110static int gfar_close(struct net_device *dev); 111static int gfar_close(struct net_device *dev);
111struct sk_buff *gfar_new_skb(struct net_device *dev); 112struct sk_buff *gfar_new_skb(struct net_device *dev);
112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
113 struct sk_buff *skb); 114 struct sk_buff *skb);
114static int gfar_set_mac_address(struct net_device *dev); 115static int gfar_set_mac_address(struct net_device *dev);
115static int gfar_change_mtu(struct net_device *dev, int new_mtu); 116static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
130#ifdef CONFIG_NET_POLL_CONTROLLER 131#ifdef CONFIG_NET_POLL_CONTROLLER
131static void gfar_netpoll(struct net_device *dev); 132static void gfar_netpoll(struct net_device *dev);
132#endif 133#endif
133int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
134static int gfar_clean_tx_ring(struct net_device *dev); 135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
135static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
136 int amount_pull); 137 int amount_pull);
137static void gfar_vlan_rx_register(struct net_device *netdev, 138static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -147,16 +148,16 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 148MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL"); 149MODULE_LICENSE("GPL");
149 150
150static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 151static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
151 dma_addr_t buf) 152 dma_addr_t buf)
152{ 153{
153 struct gfar_private *priv = netdev_priv(dev); 154 struct net_device *dev = rx_queue->dev;
154 u32 lstatus; 155 u32 lstatus;
155 156
156 bdp->bufPtr = buf; 157 bdp->bufPtr = buf;
157 158
158 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
159 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1) 160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
160 lstatus |= BD_LFLAG(RXBD_WRAP); 161 lstatus |= BD_LFLAG(RXBD_WRAP);
161 162
162 eieio(); 163 eieio();
@@ -167,20 +168,25 @@ static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
167static int gfar_init_bds(struct net_device *ndev) 168static int gfar_init_bds(struct net_device *ndev)
168{ 169{
169 struct gfar_private *priv = netdev_priv(ndev); 170 struct gfar_private *priv = netdev_priv(ndev);
171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
170 struct txbd8 *txbdp; 173 struct txbd8 *txbdp;
171 struct rxbd8 *rxbdp; 174 struct rxbd8 *rxbdp;
172 int i; 175 int i;
173 176
177 tx_queue = priv->tx_queue;
178 rx_queue = priv->rx_queue;
179
174 /* Initialize some variables in our dev structure */ 180 /* Initialize some variables in our dev structure */
175 priv->num_txbdfree = priv->tx_ring_size; 181 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
176 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 182 tx_queue->dirty_tx = tx_queue->cur_tx = tx_queue->tx_bd_base;
177 priv->cur_rx = priv->rx_bd_base; 183 rx_queue->cur_rx = rx_queue->rx_bd_base;
178 priv->skb_curtx = priv->skb_dirtytx = 0; 184 tx_queue->skb_curtx = tx_queue->skb_dirtytx = 0;
179 priv->skb_currx = 0; 185 rx_queue->skb_currx = 0;
180 186
181 /* Initialize Transmit Descriptor Ring */ 187 /* Initialize Transmit Descriptor Ring */
182 txbdp = priv->tx_bd_base; 188 txbdp = tx_queue->tx_bd_base;
183 for (i = 0; i < priv->tx_ring_size; i++) { 189 for (i = 0; i < tx_queue->tx_ring_size; i++) {
184 txbdp->lstatus = 0; 190 txbdp->lstatus = 0;
185 txbdp->bufPtr = 0; 191 txbdp->bufPtr = 0;
186 txbdp++; 192 txbdp++;
@@ -190,12 +196,12 @@ static int gfar_init_bds(struct net_device *ndev)
190 txbdp--; 196 txbdp--;
191 txbdp->status |= TXBD_WRAP; 197 txbdp->status |= TXBD_WRAP;
192 198
193 rxbdp = priv->rx_bd_base; 199 rxbdp = rx_queue->rx_bd_base;
194 for (i = 0; i < priv->rx_ring_size; i++) { 200 for (i = 0; i < rx_queue->rx_ring_size; i++) {
195 struct sk_buff *skb = priv->rx_skbuff[i]; 201 struct sk_buff *skb = rx_queue->rx_skbuff[i];
196 202
197 if (skb) { 203 if (skb) {
198 gfar_init_rxbdp(ndev, rxbdp, rxbdp->bufPtr); 204 gfar_init_rxbdp(rx_queue, rxbdp, rxbdp->bufPtr);
199 } else { 205 } else {
200 skb = gfar_new_skb(ndev); 206 skb = gfar_new_skb(ndev);
201 if (!skb) { 207 if (!skb) {
@@ -203,9 +209,9 @@ static int gfar_init_bds(struct net_device *ndev)
203 ndev->name); 209 ndev->name);
204 return -ENOMEM; 210 return -ENOMEM;
205 } 211 }
206 priv->rx_skbuff[i] = skb; 212 rx_queue->rx_skbuff[i] = skb;
207 213
208 gfar_new_rxbdp(ndev, rxbdp, skb); 214 gfar_new_rxbdp(rx_queue, rxbdp, skb);
209 } 215 }
210 216
211 rxbdp++; 217 rxbdp++;
@@ -220,12 +226,17 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
220 int i; 226 int i;
221 struct gfar_private *priv = netdev_priv(ndev); 227 struct gfar_private *priv = netdev_priv(ndev);
222 struct device *dev = &priv->ofdev->dev; 228 struct device *dev = &priv->ofdev->dev;
229 struct gfar_priv_tx_q *tx_queue = NULL;
230 struct gfar_priv_rx_q *rx_queue = NULL;
231
232 tx_queue = priv->tx_queue;
233 rx_queue = priv->rx_queue;
223 234
224 /* Allocate memory for the buffer descriptors */ 235 /* Allocate memory for the buffer descriptors */
225 vaddr = dma_alloc_coherent(dev, 236 vaddr = dma_alloc_coherent(dev,
226 sizeof(*priv->tx_bd_base) * priv->tx_ring_size + 237 sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size +
227 sizeof(*priv->rx_bd_base) * priv->rx_ring_size, 238 sizeof(*rx_queue->rx_bd_base) * rx_queue->rx_ring_size,
228 &priv->tx_bd_dma_base, GFP_KERNEL); 239 &tx_queue->tx_bd_dma_base, GFP_KERNEL);
229 if (!vaddr) { 240 if (!vaddr) {
230 if (netif_msg_ifup(priv)) 241 if (netif_msg_ifup(priv))
231 pr_err("%s: Could not allocate buffer descriptors!\n", 242 pr_err("%s: Could not allocate buffer descriptors!\n",
@@ -233,36 +244,38 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
233 return -ENOMEM; 244 return -ENOMEM;
234 } 245 }
235 246
236 priv->tx_bd_base = vaddr; 247 tx_queue->tx_bd_base = vaddr;
248 tx_queue->dev = ndev;
237 249
238 /* Start the rx descriptor ring where the tx ring leaves off */ 250 /* Start the rx descriptor ring where the tx ring leaves off */
239 vaddr = vaddr + sizeof(*priv->tx_bd_base) * priv->tx_ring_size; 251 vaddr = vaddr + sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size;
240 priv->rx_bd_base = vaddr; 252 rx_queue->rx_bd_base = vaddr;
253 rx_queue->dev = ndev;
241 254
242 /* Setup the skbuff rings */ 255 /* Setup the skbuff rings */
243 priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) * 256 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
244 priv->tx_ring_size, GFP_KERNEL); 257 tx_queue->tx_ring_size, GFP_KERNEL);
245 if (!priv->tx_skbuff) { 258 if (!tx_queue->tx_skbuff) {
246 if (netif_msg_ifup(priv)) 259 if (netif_msg_ifup(priv))
247 pr_err("%s: Could not allocate tx_skbuff\n", 260 pr_err("%s: Could not allocate tx_skbuff\n",
248 ndev->name); 261 ndev->name);
249 goto cleanup; 262 goto cleanup;
250 } 263 }
251 264
252 for (i = 0; i < priv->tx_ring_size; i++) 265 for (i = 0; i < tx_queue->tx_ring_size; i++)
253 priv->tx_skbuff[i] = NULL; 266 tx_queue->tx_skbuff[i] = NULL;
254 267
255 priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) * 268 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
256 priv->rx_ring_size, GFP_KERNEL); 269 rx_queue->rx_ring_size, GFP_KERNEL);
257 if (!priv->rx_skbuff) { 270 if (!rx_queue->rx_skbuff) {
258 if (netif_msg_ifup(priv)) 271 if (netif_msg_ifup(priv))
259 pr_err("%s: Could not allocate rx_skbuff\n", 272 pr_err("%s: Could not allocate rx_skbuff\n",
260 ndev->name); 273 ndev->name);
261 goto cleanup; 274 goto cleanup;
262 } 275 }
263 276
264 for (i = 0; i < priv->rx_ring_size; i++) 277 for (i = 0; i < rx_queue->rx_ring_size; i++)
265 priv->rx_skbuff[i] = NULL; 278 rx_queue->rx_skbuff[i] = NULL;
266 279
267 if (gfar_init_bds(ndev)) 280 if (gfar_init_bds(ndev))
268 goto cleanup; 281 goto cleanup;
@@ -278,24 +291,29 @@ static void gfar_init_mac(struct net_device *ndev)
278{ 291{
279 struct gfar_private *priv = netdev_priv(ndev); 292 struct gfar_private *priv = netdev_priv(ndev);
280 struct gfar __iomem *regs = priv->regs; 293 struct gfar __iomem *regs = priv->regs;
294 struct gfar_priv_tx_q *tx_queue = NULL;
295 struct gfar_priv_rx_q *rx_queue = NULL;
281 u32 rctrl = 0; 296 u32 rctrl = 0;
282 u32 tctrl = 0; 297 u32 tctrl = 0;
283 u32 attrs = 0; 298 u32 attrs = 0;
284 299
300 tx_queue = priv->tx_queue;
301 rx_queue = priv->rx_queue;
302
285 /* enet DMA only understands physical addresses */ 303 /* enet DMA only understands physical addresses */
286 gfar_write(&regs->tbase0, priv->tx_bd_dma_base); 304 gfar_write(&regs->tbase0, tx_queue->tx_bd_dma_base);
287 gfar_write(&regs->rbase0, priv->tx_bd_dma_base + 305 gfar_write(&regs->rbase0, tx_queue->tx_bd_dma_base +
288 sizeof(*priv->tx_bd_base) * 306 sizeof(*tx_queue->tx_bd_base) *
289 priv->tx_ring_size); 307 tx_queue->tx_ring_size);
290 308
291 /* Configure the coalescing support */ 309 /* Configure the coalescing support */
292 gfar_write(&regs->txic, 0); 310 gfar_write(&regs->txic, 0);
293 if (priv->txcoalescing) 311 if (tx_queue->txcoalescing)
294 gfar_write(&regs->txic, priv->txic); 312 gfar_write(&regs->txic, tx_queue->txic);
295 313
296 gfar_write(&regs->rxic, 0); 314 gfar_write(&regs->rxic, 0);
297 if (priv->rxcoalescing) 315 if (rx_queue->rxcoalescing)
298 gfar_write(&regs->rxic, priv->rxic); 316 gfar_write(&regs->rxic, rx_queue->rxic);
299 317
300 if (priv->rx_csum_enable) 318 if (priv->rx_csum_enable)
301 rctrl |= RCTRL_CHECKSUMMING; 319 rctrl |= RCTRL_CHECKSUMMING;
@@ -414,7 +432,7 @@ static int gfar_of_init(struct net_device *dev)
414 432
415 stash = of_get_property(np, "bd-stash", NULL); 433 stash = of_get_property(np, "bd-stash", NULL);
416 434
417 if(stash) { 435 if (stash) {
418 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 436 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
419 priv->bd_stash_en = 1; 437 priv->bd_stash_en = 1;
420 } 438 }
@@ -519,8 +537,18 @@ static int gfar_probe(struct of_device *ofdev,
519 if (err) 537 if (err)
520 goto regs_fail; 538 goto regs_fail;
521 539
522 spin_lock_init(&priv->txlock); 540 priv->tx_queue = (struct gfar_priv_tx_q *)kmalloc(
523 spin_lock_init(&priv->rxlock); 541 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
542 if (!priv->tx_queue)
543 goto regs_fail;
544
545 priv->rx_queue = (struct gfar_priv_rx_q *)kmalloc(
546 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
547 if (!priv->rx_queue)
548 goto rx_queue_fail;
549
550 spin_lock_init(&priv->tx_queue->txlock);
551 spin_lock_init(&priv->rx_queue->rxlock);
524 spin_lock_init(&priv->bflock); 552 spin_lock_init(&priv->bflock);
525 INIT_WORK(&priv->reset_task, gfar_reset_task); 553 INIT_WORK(&priv->reset_task, gfar_reset_task);
526 554
@@ -552,12 +580,13 @@ static int gfar_probe(struct of_device *ofdev,
552 580
553 /* Fill in the dev structure */ 581 /* Fill in the dev structure */
554 dev->watchdog_timeo = TX_TIMEOUT; 582 dev->watchdog_timeo = TX_TIMEOUT;
555 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
556 dev->mtu = 1500; 583 dev->mtu = 1500;
557
558 dev->netdev_ops = &gfar_netdev_ops; 584 dev->netdev_ops = &gfar_netdev_ops;
559 dev->ethtool_ops = &gfar_ethtool_ops; 585 dev->ethtool_ops = &gfar_ethtool_ops;
560 586
587 /* Register for napi ...NAPI is for each rx_queue */
588 netif_napi_add(dev, &priv->rx_queue->napi, gfar_poll, GFAR_DEV_WEIGHT);
589
561 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 590 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
562 priv->rx_csum_enable = 1; 591 priv->rx_csum_enable = 1;
563 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 592 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -613,14 +642,16 @@ static int gfar_probe(struct of_device *ofdev,
613 dev->hard_header_len += GMAC_FCB_LEN; 642 dev->hard_header_len += GMAC_FCB_LEN;
614 643
615 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 644 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
616 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
617 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
618 priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
619 645
620 priv->txcoalescing = DEFAULT_TX_COALESCE; 646 /* Initializing some of the rx/tx queue level parameters */
621 priv->txic = DEFAULT_TXIC; 647 priv->tx_queue->tx_ring_size = DEFAULT_TX_RING_SIZE;
622 priv->rxcoalescing = DEFAULT_RX_COALESCE; 648 priv->tx_queue->num_txbdfree = DEFAULT_TX_RING_SIZE;
623 priv->rxic = DEFAULT_RXIC; 649 priv->tx_queue->txcoalescing = DEFAULT_TX_COALESCE;
650 priv->tx_queue->txic = DEFAULT_TXIC;
651
652 priv->rx_queue->rx_ring_size = DEFAULT_RX_RING_SIZE;
653 priv->rx_queue->rxcoalescing = DEFAULT_RX_COALESCE;
654 priv->rx_queue->rxic = DEFAULT_RXIC;
624 655
625 /* Enable most messages by default */ 656 /* Enable most messages by default */
626 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 657 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -666,12 +697,15 @@ static int gfar_probe(struct of_device *ofdev,
666 /* provided which set of benchmarks. */ 697 /* provided which set of benchmarks. */
667 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 698 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
668 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 699 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
669 dev->name, priv->rx_ring_size, priv->tx_ring_size); 700 dev->name, priv->rx_queue->rx_ring_size, priv->tx_queue->tx_ring_size);
670 701
671 return 0; 702 return 0;
672 703
673register_fail: 704register_fail:
674 iounmap(priv->regs); 705 iounmap(priv->regs);
706 kfree(priv->rx_queue);
707rx_queue_fail:
708 kfree(priv->tx_queue);
675regs_fail: 709regs_fail:
676 if (priv->phy_node) 710 if (priv->phy_node)
677 of_node_put(priv->phy_node); 711 of_node_put(priv->phy_node);
@@ -705,6 +739,8 @@ static int gfar_suspend(struct device *dev)
705{ 739{
706 struct gfar_private *priv = dev_get_drvdata(dev); 740 struct gfar_private *priv = dev_get_drvdata(dev);
707 struct net_device *ndev = priv->ndev; 741 struct net_device *ndev = priv->ndev;
742 struct gfar_priv_tx_q *tx_queue = NULL;
743 struct gfar_priv_rx_q *rx_queue = NULL;
708 unsigned long flags; 744 unsigned long flags;
709 u32 tempval; 745 u32 tempval;
710 746
@@ -712,10 +748,12 @@ static int gfar_suspend(struct device *dev)
712 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 748 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
713 749
714 netif_device_detach(ndev); 750 netif_device_detach(ndev);
751 tx_queue = priv->tx_queue;
752 rx_queue = priv->rx_queue;
715 753
716 if (netif_running(ndev)) { 754 if (netif_running(ndev)) {
717 spin_lock_irqsave(&priv->txlock, flags); 755 spin_lock_irqsave(&tx_queue->txlock, flags);
718 spin_lock(&priv->rxlock); 756 spin_lock(&rx_queue->rxlock);
719 757
720 gfar_halt_nodisable(ndev); 758 gfar_halt_nodisable(ndev);
721 759
@@ -729,10 +767,10 @@ static int gfar_suspend(struct device *dev)
729 767
730 gfar_write(&priv->regs->maccfg1, tempval); 768 gfar_write(&priv->regs->maccfg1, tempval);
731 769
732 spin_unlock(&priv->rxlock); 770 spin_unlock(&rx_queue->rxlock);
733 spin_unlock_irqrestore(&priv->txlock, flags); 771 spin_unlock_irqrestore(&tx_queue->txlock, flags);
734 772
735 napi_disable(&priv->napi); 773 napi_disable(&rx_queue->napi);
736 774
737 if (magic_packet) { 775 if (magic_packet) {
738 /* Enable interrupt on Magic Packet */ 776 /* Enable interrupt on Magic Packet */
@@ -754,6 +792,8 @@ static int gfar_resume(struct device *dev)
754{ 792{
755 struct gfar_private *priv = dev_get_drvdata(dev); 793 struct gfar_private *priv = dev_get_drvdata(dev);
756 struct net_device *ndev = priv->ndev; 794 struct net_device *ndev = priv->ndev;
795 struct gfar_priv_tx_q *tx_queue = NULL;
796 struct gfar_priv_rx_q *rx_queue = NULL;
757 unsigned long flags; 797 unsigned long flags;
758 u32 tempval; 798 u32 tempval;
759 int magic_packet = priv->wol_en && 799 int magic_packet = priv->wol_en &&
@@ -770,9 +810,11 @@ static int gfar_resume(struct device *dev)
770 /* Disable Magic Packet mode, in case something 810 /* Disable Magic Packet mode, in case something
771 * else woke us up. 811 * else woke us up.
772 */ 812 */
813 rx_queue = priv->rx_queue;
814 tx_queue = priv->tx_queue;
773 815
774 spin_lock_irqsave(&priv->txlock, flags); 816 spin_lock_irqsave(&tx_queue->txlock, flags);
775 spin_lock(&priv->rxlock); 817 spin_lock(&rx_queue->rxlock);
776 818
777 tempval = gfar_read(&priv->regs->maccfg2); 819 tempval = gfar_read(&priv->regs->maccfg2);
778 tempval &= ~MACCFG2_MPEN; 820 tempval &= ~MACCFG2_MPEN;
@@ -780,12 +822,12 @@ static int gfar_resume(struct device *dev)
780 822
781 gfar_start(ndev); 823 gfar_start(ndev);
782 824
783 spin_unlock(&priv->rxlock); 825 spin_unlock(&rx_queue->rxlock);
784 spin_unlock_irqrestore(&priv->txlock, flags); 826 spin_unlock_irqrestore(&tx_queue->txlock, flags);
785 827
786 netif_device_attach(ndev); 828 netif_device_attach(ndev);
787 829
788 napi_enable(&priv->napi); 830 napi_enable(&rx_queue->napi);
789 831
790 return 0; 832 return 0;
791} 833}
@@ -1060,18 +1102,23 @@ void gfar_halt(struct net_device *dev)
1060void stop_gfar(struct net_device *dev) 1102void stop_gfar(struct net_device *dev)
1061{ 1103{
1062 struct gfar_private *priv = netdev_priv(dev); 1104 struct gfar_private *priv = netdev_priv(dev);
1105 struct gfar_priv_tx_q *tx_queue = NULL;
1106 struct gfar_priv_rx_q *rx_queue = NULL;
1063 unsigned long flags; 1107 unsigned long flags;
1064 1108
1065 phy_stop(priv->phydev); 1109 phy_stop(priv->phydev);
1066 1110
1111 tx_queue = priv->tx_queue;
1112 rx_queue = priv->rx_queue;
1113
1067 /* Lock it down */ 1114 /* Lock it down */
1068 spin_lock_irqsave(&priv->txlock, flags); 1115 spin_lock_irqsave(&tx_queue->txlock, flags);
1069 spin_lock(&priv->rxlock); 1116 spin_lock(&rx_queue->rxlock);
1070 1117
1071 gfar_halt(dev); 1118 gfar_halt(dev);
1072 1119
1073 spin_unlock(&priv->rxlock); 1120 spin_unlock(&rx_queue->rxlock);
1074 spin_unlock_irqrestore(&priv->txlock, flags); 1121 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1075 1122
1076 /* Free the IRQs */ 1123 /* Free the IRQs */
1077 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1124 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -1092,46 +1139,50 @@ static void free_skb_resources(struct gfar_private *priv)
1092 struct device *dev = &priv->ofdev->dev; 1139 struct device *dev = &priv->ofdev->dev;
1093 struct rxbd8 *rxbdp; 1140 struct rxbd8 *rxbdp;
1094 struct txbd8 *txbdp; 1141 struct txbd8 *txbdp;
1142 struct gfar_priv_tx_q *tx_queue = NULL;
1143 struct gfar_priv_rx_q *rx_queue = NULL;
1095 int i, j; 1144 int i, j;
1096 1145
1097 /* Go through all the buffer descriptors and free their data buffers */ 1146 /* Go through all the buffer descriptors and free their data buffers */
1098 txbdp = priv->tx_bd_base; 1147 tx_queue = priv->tx_queue;
1148 txbdp = tx_queue->tx_bd_base;
1099 1149
1100 if (!priv->tx_skbuff) 1150 if (!tx_queue->tx_skbuff)
1101 goto skip_tx_skbuff; 1151 goto skip_tx_skbuff;
1102 1152
1103 for (i = 0; i < priv->tx_ring_size; i++) { 1153 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1104 if (!priv->tx_skbuff[i]) 1154 if (!tx_queue->tx_skbuff[i])
1105 continue; 1155 continue;
1106 1156
1107 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1157 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1108 txbdp->length, DMA_TO_DEVICE); 1158 txbdp->length, DMA_TO_DEVICE);
1109 txbdp->lstatus = 0; 1159 txbdp->lstatus = 0;
1110 for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { 1160 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) {
1111 txbdp++; 1161 txbdp++;
1112 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1162 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1113 txbdp->length, DMA_TO_DEVICE); 1163 txbdp->length, DMA_TO_DEVICE);
1114 } 1164 }
1115 txbdp++; 1165 txbdp++;
1116 dev_kfree_skb_any(priv->tx_skbuff[i]); 1166 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1117 priv->tx_skbuff[i] = NULL; 1167 tx_queue->tx_skbuff[i] = NULL;
1118 } 1168 }
1119 1169
1120 kfree(priv->tx_skbuff); 1170 kfree(tx_queue->tx_skbuff);
1121skip_tx_skbuff: 1171skip_tx_skbuff:
1122 1172
1123 rxbdp = priv->rx_bd_base; 1173 rx_queue = priv->rx_queue;
1174 rxbdp = rx_queue->rx_bd_base;
1124 1175
1125 if (!priv->rx_skbuff) 1176 if (!rx_queue->rx_skbuff)
1126 goto skip_rx_skbuff; 1177 goto skip_rx_skbuff;
1127 1178
1128 for (i = 0; i < priv->rx_ring_size; i++) { 1179 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1129 if (priv->rx_skbuff[i]) { 1180 if (rx_queue->rx_skbuff[i]) {
1130 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr, 1181 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
1131 priv->rx_buffer_size, 1182 priv->rx_buffer_size,
1132 DMA_FROM_DEVICE); 1183 DMA_FROM_DEVICE);
1133 dev_kfree_skb_any(priv->rx_skbuff[i]); 1184 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1134 priv->rx_skbuff[i] = NULL; 1185 rx_queue->rx_skbuff[i] = NULL;
1135 } 1186 }
1136 1187
1137 rxbdp->lstatus = 0; 1188 rxbdp->lstatus = 0;
@@ -1139,17 +1190,19 @@ skip_tx_skbuff:
1139 rxbdp++; 1190 rxbdp++;
1140 } 1191 }
1141 1192
1142 kfree(priv->rx_skbuff); 1193 kfree(rx_queue->rx_skbuff);
1143skip_rx_skbuff: 1194skip_rx_skbuff:
1144 1195
1145 dma_free_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size + 1196 dma_free_coherent(dev, sizeof(*txbdp) * tx_queue->tx_ring_size +
1146 sizeof(*rxbdp) * priv->rx_ring_size, 1197 sizeof(*rxbdp) * rx_queue->rx_ring_size,
1147 priv->tx_bd_base, priv->tx_bd_dma_base); 1198 tx_queue->tx_bd_base, tx_queue->tx_bd_dma_base);
1148} 1199}
1149 1200
1150void gfar_start(struct net_device *dev) 1201void gfar_start(struct net_device *dev)
1151{ 1202{
1152 struct gfar_private *priv = netdev_priv(dev); 1203 struct gfar_private *priv = netdev_priv(dev);
1204 struct gfar_priv_tx_q *tx_queue;
1205 struct gfar_priv_rx_q *rx_queue;
1153 struct gfar __iomem *regs = priv->regs; 1206 struct gfar __iomem *regs = priv->regs;
1154 u32 tempval; 1207 u32 tempval;
1155 1208
@@ -1258,7 +1311,7 @@ static int gfar_enet_open(struct net_device *dev)
1258 struct gfar_private *priv = netdev_priv(dev); 1311 struct gfar_private *priv = netdev_priv(dev);
1259 int err; 1312 int err;
1260 1313
1261 napi_enable(&priv->napi); 1314 napi_enable(&priv->rx_queue->napi);
1262 1315
1263 skb_queue_head_init(&priv->rx_recycle); 1316 skb_queue_head_init(&priv->rx_recycle);
1264 1317
@@ -1269,14 +1322,14 @@ static int gfar_enet_open(struct net_device *dev)
1269 1322
1270 err = init_phy(dev); 1323 err = init_phy(dev);
1271 1324
1272 if(err) { 1325 if (err) {
1273 napi_disable(&priv->napi); 1326 napi_disable(&priv->rx_queue->napi);
1274 return err; 1327 return err;
1275 } 1328 }
1276 1329
1277 err = startup_gfar(dev); 1330 err = startup_gfar(dev);
1278 if (err) { 1331 if (err) {
1279 napi_disable(&priv->napi); 1332 napi_disable(&priv->rx_queue->napi);
1280 return err; 1333 return err;
1281 } 1334 }
1282 1335
@@ -1349,6 +1402,7 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1349static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1402static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1350{ 1403{
1351 struct gfar_private *priv = netdev_priv(dev); 1404 struct gfar_private *priv = netdev_priv(dev);
1405 struct gfar_priv_tx_q *tx_queue = NULL;
1352 struct txfcb *fcb = NULL; 1406 struct txfcb *fcb = NULL;
1353 struct txbd8 *txbdp, *txbdp_start, *base; 1407 struct txbd8 *txbdp, *txbdp_start, *base;
1354 u32 lstatus; 1408 u32 lstatus;
@@ -1357,7 +1411,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1357 unsigned long flags; 1411 unsigned long flags;
1358 unsigned int nr_frags, length; 1412 unsigned int nr_frags, length;
1359 1413
1360 base = priv->tx_bd_base; 1414 tx_queue = priv->tx_queue;
1415 base = tx_queue->tx_bd_base;
1361 1416
1362 /* make space for additional header when fcb is needed */ 1417 /* make space for additional header when fcb is needed */
1363 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1418 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1378,21 +1433,21 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1378 /* total number of fragments in the SKB */ 1433 /* total number of fragments in the SKB */
1379 nr_frags = skb_shinfo(skb)->nr_frags; 1434 nr_frags = skb_shinfo(skb)->nr_frags;
1380 1435
1381 spin_lock_irqsave(&priv->txlock, flags); 1436 spin_lock_irqsave(&tx_queue->txlock, flags);
1382 1437
1383 /* check if there is space to queue this packet */ 1438 /* check if there is space to queue this packet */
1384 if ((nr_frags+1) > priv->num_txbdfree) { 1439 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1385 /* no space, stop the queue */ 1440 /* no space, stop the queue */
1386 netif_stop_queue(dev); 1441 netif_stop_queue(dev);
1387 dev->stats.tx_fifo_errors++; 1442 dev->stats.tx_fifo_errors++;
1388 spin_unlock_irqrestore(&priv->txlock, flags); 1443 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1389 return NETDEV_TX_BUSY; 1444 return NETDEV_TX_BUSY;
1390 } 1445 }
1391 1446
1392 /* Update transmit stats */ 1447 /* Update transmit stats */
1393 dev->stats.tx_bytes += skb->len; 1448 dev->stats.tx_bytes += skb->len;
1394 1449
1395 txbdp = txbdp_start = priv->cur_tx; 1450 txbdp = txbdp_start = tx_queue->cur_tx;
1396 1451
1397 if (nr_frags == 0) { 1452 if (nr_frags == 0) {
1398 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1453 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
@@ -1400,7 +1455,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1400 /* Place the fragment addresses and lengths into the TxBDs */ 1455 /* Place the fragment addresses and lengths into the TxBDs */
1401 for (i = 0; i < nr_frags; i++) { 1456 for (i = 0; i < nr_frags; i++) {
1402 /* Point at the next BD, wrapping as needed */ 1457 /* Point at the next BD, wrapping as needed */
1403 txbdp = next_txbd(txbdp, base, priv->tx_ring_size); 1458 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1404 1459
1405 length = skb_shinfo(skb)->frags[i].size; 1460 length = skb_shinfo(skb)->frags[i].size;
1406 1461
@@ -1442,7 +1497,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1442 } 1497 }
1443 1498
1444 /* setup the TxBD length and buffer pointer for the first BD */ 1499 /* setup the TxBD length and buffer pointer for the first BD */
1445 priv->tx_skbuff[priv->skb_curtx] = skb; 1500 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1446 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 1501 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1447 skb_headlen(skb), DMA_TO_DEVICE); 1502 skb_headlen(skb), DMA_TO_DEVICE);
1448 1503
@@ -1462,19 +1517,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1462 1517
1463 /* Update the current skb pointer to the next entry we will use 1518 /* Update the current skb pointer to the next entry we will use
1464 * (wrapping if necessary) */ 1519 * (wrapping if necessary) */
1465 priv->skb_curtx = (priv->skb_curtx + 1) & 1520 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1466 TX_RING_MOD_MASK(priv->tx_ring_size); 1521 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1467 1522
1468 priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); 1523 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1469 1524
1470 /* reduce TxBD free count */ 1525 /* reduce TxBD free count */
1471 priv->num_txbdfree -= (nr_frags + 1); 1526 tx_queue->num_txbdfree -= (nr_frags + 1);
1472 1527
1473 dev->trans_start = jiffies; 1528 dev->trans_start = jiffies;
1474 1529
1475 /* If the next BD still needs to be cleaned up, then the bds 1530 /* If the next BD still needs to be cleaned up, then the bds
1476 are full. We need to tell the kernel to stop sending us stuff. */ 1531 are full. We need to tell the kernel to stop sending us stuff. */
1477 if (!priv->num_txbdfree) { 1532 if (!tx_queue->num_txbdfree) {
1478 netif_stop_queue(dev); 1533 netif_stop_queue(dev);
1479 1534
1480 dev->stats.tx_fifo_errors++; 1535 dev->stats.tx_fifo_errors++;
@@ -1484,7 +1539,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1484 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1539 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1485 1540
1486 /* Unlock priv */ 1541 /* Unlock priv */
1487 spin_unlock_irqrestore(&priv->txlock, flags); 1542 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1488 1543
1489 return NETDEV_TX_OK; 1544 return NETDEV_TX_OK;
1490} 1545}
@@ -1494,7 +1549,7 @@ static int gfar_close(struct net_device *dev)
1494{ 1549{
1495 struct gfar_private *priv = netdev_priv(dev); 1550 struct gfar_private *priv = netdev_priv(dev);
1496 1551
1497 napi_disable(&priv->napi); 1552 napi_disable(&priv->rx_queue->napi);
1498 1553
1499 skb_queue_purge(&priv->rx_recycle); 1554 skb_queue_purge(&priv->rx_recycle);
1500 cancel_work_sync(&priv->reset_task); 1555 cancel_work_sync(&priv->reset_task);
@@ -1523,10 +1578,12 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1523 struct vlan_group *grp) 1578 struct vlan_group *grp)
1524{ 1579{
1525 struct gfar_private *priv = netdev_priv(dev); 1580 struct gfar_private *priv = netdev_priv(dev);
1581 struct gfar_priv_rx_q *rx_queue = NULL;
1526 unsigned long flags; 1582 unsigned long flags;
1527 u32 tempval; 1583 u32 tempval;
1528 1584
1529 spin_lock_irqsave(&priv->rxlock, flags); 1585 rx_queue = priv->rx_queue;
1586 spin_lock_irqsave(&rx_queue->rxlock, flags);
1530 1587
1531 priv->vlgrp = grp; 1588 priv->vlgrp = grp;
1532 1589
@@ -1560,7 +1617,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1560 1617
1561 gfar_change_mtu(dev, dev->mtu); 1618 gfar_change_mtu(dev, dev->mtu);
1562 1619
1563 spin_unlock_irqrestore(&priv->rxlock, flags); 1620 spin_unlock_irqrestore(&rx_queue->rxlock, flags);
1564} 1621}
1565 1622
1566static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1623static int gfar_change_mtu(struct net_device *dev, int new_mtu)
@@ -1649,24 +1706,27 @@ static void gfar_timeout(struct net_device *dev)
1649} 1706}
1650 1707
1651/* Interrupt Handler for Transmit complete */ 1708/* Interrupt Handler for Transmit complete */
1652static int gfar_clean_tx_ring(struct net_device *dev) 1709static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1653{ 1710{
1711 struct net_device *dev = tx_queue->dev;
1654 struct gfar_private *priv = netdev_priv(dev); 1712 struct gfar_private *priv = netdev_priv(dev);
1713 struct gfar_priv_rx_q *rx_queue = NULL;
1655 struct txbd8 *bdp; 1714 struct txbd8 *bdp;
1656 struct txbd8 *lbdp = NULL; 1715 struct txbd8 *lbdp = NULL;
1657 struct txbd8 *base = priv->tx_bd_base; 1716 struct txbd8 *base = tx_queue->tx_bd_base;
1658 struct sk_buff *skb; 1717 struct sk_buff *skb;
1659 int skb_dirtytx; 1718 int skb_dirtytx;
1660 int tx_ring_size = priv->tx_ring_size; 1719 int tx_ring_size = tx_queue->tx_ring_size;
1661 int frags = 0; 1720 int frags = 0;
1662 int i; 1721 int i;
1663 int howmany = 0; 1722 int howmany = 0;
1664 u32 lstatus; 1723 u32 lstatus;
1665 1724
1666 bdp = priv->dirty_tx; 1725 rx_queue = priv->rx_queue;
1667 skb_dirtytx = priv->skb_dirtytx; 1726 bdp = tx_queue->dirty_tx;
1727 skb_dirtytx = tx_queue->skb_dirtytx;
1668 1728
1669 while ((skb = priv->tx_skbuff[skb_dirtytx])) { 1729 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
1670 frags = skb_shinfo(skb)->nr_frags; 1730 frags = skb_shinfo(skb)->nr_frags;
1671 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 1731 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1672 1732
@@ -1698,29 +1758,29 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1698 * If there's room in the queue (limit it to rx_buffer_size) 1758 * If there's room in the queue (limit it to rx_buffer_size)
1699 * we add this skb back into the pool, if it's the right size 1759 * we add this skb back into the pool, if it's the right size
1700 */ 1760 */
1701 if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && 1761 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
1702 skb_recycle_check(skb, priv->rx_buffer_size + 1762 skb_recycle_check(skb, priv->rx_buffer_size +
1703 RXBUF_ALIGNMENT)) 1763 RXBUF_ALIGNMENT))
1704 __skb_queue_head(&priv->rx_recycle, skb); 1764 __skb_queue_head(&priv->rx_recycle, skb);
1705 else 1765 else
1706 dev_kfree_skb_any(skb); 1766 dev_kfree_skb_any(skb);
1707 1767
1708 priv->tx_skbuff[skb_dirtytx] = NULL; 1768 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
1709 1769
1710 skb_dirtytx = (skb_dirtytx + 1) & 1770 skb_dirtytx = (skb_dirtytx + 1) &
1711 TX_RING_MOD_MASK(tx_ring_size); 1771 TX_RING_MOD_MASK(tx_ring_size);
1712 1772
1713 howmany++; 1773 howmany++;
1714 priv->num_txbdfree += frags + 1; 1774 tx_queue->num_txbdfree += frags + 1;
1715 } 1775 }
1716 1776
1717 /* If we freed a buffer, we can restart transmission, if necessary */ 1777 /* If we freed a buffer, we can restart transmission, if necessary */
1718 if (netif_queue_stopped(dev) && priv->num_txbdfree) 1778 if (netif_queue_stopped(dev) && tx_queue->num_txbdfree)
1719 netif_wake_queue(dev); 1779 netif_wake_queue(dev);
1720 1780
1721 /* Update dirty indicators */ 1781 /* Update dirty indicators */
1722 priv->skb_dirtytx = skb_dirtytx; 1782 tx_queue->skb_dirtytx = skb_dirtytx;
1723 priv->dirty_tx = bdp; 1783 tx_queue->dirty_tx = bdp;
1724 1784
1725 dev->stats.tx_packets += howmany; 1785 dev->stats.tx_packets += howmany;
1726 1786
@@ -1730,14 +1790,18 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1730static void gfar_schedule_cleanup(struct net_device *dev) 1790static void gfar_schedule_cleanup(struct net_device *dev)
1731{ 1791{
1732 struct gfar_private *priv = netdev_priv(dev); 1792 struct gfar_private *priv = netdev_priv(dev);
1793 struct gfar_priv_tx_q *tx_queue = NULL;
1794 struct gfar_priv_rx_q *rx_queue = NULL;
1733 unsigned long flags; 1795 unsigned long flags;
1734 1796
1735 spin_lock_irqsave(&priv->txlock, flags); 1797 rx_queue = priv->rx_queue;
1736 spin_lock(&priv->rxlock); 1798 tx_queue = priv->tx_queue;
1799 spin_lock_irqsave(&tx_queue->txlock, flags);
1800 spin_lock(&rx_queue->rxlock);
1737 1801
1738 if (napi_schedule_prep(&priv->napi)) { 1802 if (napi_schedule_prep(&rx_queue->napi)) {
1739 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); 1803 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1740 __napi_schedule(&priv->napi); 1804 __napi_schedule(&rx_queue->napi);
1741 } else { 1805 } else {
1742 /* 1806 /*
1743 * Clear IEVENT, so interrupts aren't called again 1807 * Clear IEVENT, so interrupts aren't called again
@@ -1746,8 +1810,8 @@ static void gfar_schedule_cleanup(struct net_device *dev)
1746 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 1810 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1747 } 1811 }
1748 1812
1749 spin_unlock(&priv->rxlock); 1813 spin_unlock(&rx_queue->rxlock);
1750 spin_unlock_irqrestore(&priv->txlock, flags); 1814 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1751} 1815}
1752 1816
1753/* Interrupt Handler for Transmit complete */ 1817/* Interrupt Handler for Transmit complete */
@@ -1757,15 +1821,16 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
1757 return IRQ_HANDLED; 1821 return IRQ_HANDLED;
1758} 1822}
1759 1823
1760static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 1824static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
1761 struct sk_buff *skb) 1825 struct sk_buff *skb)
1762{ 1826{
1827 struct net_device *dev = rx_queue->dev;
1763 struct gfar_private *priv = netdev_priv(dev); 1828 struct gfar_private *priv = netdev_priv(dev);
1764 dma_addr_t buf; 1829 dma_addr_t buf;
1765 1830
1766 buf = dma_map_single(&priv->ofdev->dev, skb->data, 1831 buf = dma_map_single(&priv->ofdev->dev, skb->data,
1767 priv->rx_buffer_size, DMA_FROM_DEVICE); 1832 priv->rx_buffer_size, DMA_FROM_DEVICE);
1768 gfar_init_rxbdp(dev, bdp, buf); 1833 gfar_init_rxbdp(rx_queue, bdp, buf);
1769} 1834}
1770 1835
1771 1836
@@ -1890,8 +1955,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1890 * until the budget/quota has been reached. Returns the number 1955 * until the budget/quota has been reached. Returns the number
1891 * of frames handled 1956 * of frames handled
1892 */ 1957 */
1893int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1958int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
1894{ 1959{
1960 struct net_device *dev = rx_queue->dev;
1895 struct rxbd8 *bdp, *base; 1961 struct rxbd8 *bdp, *base;
1896 struct sk_buff *skb; 1962 struct sk_buff *skb;
1897 int pkt_len; 1963 int pkt_len;
@@ -1900,8 +1966,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1900 struct gfar_private *priv = netdev_priv(dev); 1966 struct gfar_private *priv = netdev_priv(dev);
1901 1967
1902 /* Get the first full descriptor */ 1968 /* Get the first full descriptor */
1903 bdp = priv->cur_rx; 1969 bdp = rx_queue->cur_rx;
1904 base = priv->rx_bd_base; 1970 base = rx_queue->rx_bd_base;
1905 1971
1906 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 1972 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1907 priv->padding; 1973 priv->padding;
@@ -1913,7 +1979,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1913 /* Add another skb for the future */ 1979 /* Add another skb for the future */
1914 newskb = gfar_new_skb(dev); 1980 newskb = gfar_new_skb(dev);
1915 1981
1916 skb = priv->rx_skbuff[priv->skb_currx]; 1982 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
1917 1983
1918 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 1984 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
1919 priv->rx_buffer_size, DMA_FROM_DEVICE); 1985 priv->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1961,30 +2027,33 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1961 2027
1962 } 2028 }
1963 2029
1964 priv->rx_skbuff[priv->skb_currx] = newskb; 2030 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
1965 2031
1966 /* Setup the new bdp */ 2032 /* Setup the new bdp */
1967 gfar_new_rxbdp(dev, bdp, newskb); 2033 gfar_new_rxbdp(rx_queue, bdp, newskb);
1968 2034
1969 /* Update to the next pointer */ 2035 /* Update to the next pointer */
1970 bdp = next_bd(bdp, base, priv->rx_ring_size); 2036 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
1971 2037
1972 /* update to point at the next skb */ 2038 /* update to point at the next skb */
1973 priv->skb_currx = 2039 rx_queue->skb_currx =
1974 (priv->skb_currx + 1) & 2040 (rx_queue->skb_currx + 1) &
1975 RX_RING_MOD_MASK(priv->rx_ring_size); 2041 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
1976 } 2042 }
1977 2043
1978 /* Update the current rxbd pointer to be the next one */ 2044 /* Update the current rxbd pointer to be the next one */
1979 priv->cur_rx = bdp; 2045 rx_queue->cur_rx = bdp;
1980 2046
1981 return howmany; 2047 return howmany;
1982} 2048}
1983 2049
1984static int gfar_poll(struct napi_struct *napi, int budget) 2050static int gfar_poll(struct napi_struct *napi, int budget)
1985{ 2051{
1986 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 2052 struct gfar_priv_rx_q *rx_queue = container_of(napi,
1987 struct net_device *dev = priv->ndev; 2053 struct gfar_priv_rx_q, napi);
2054 struct net_device *dev = rx_queue->dev;
2055 struct gfar_private *priv = netdev_priv(dev);
2056 struct gfar_priv_tx_q *tx_queue = NULL;
1988 int tx_cleaned = 0; 2057 int tx_cleaned = 0;
1989 int rx_cleaned = 0; 2058 int rx_cleaned = 0;
1990 unsigned long flags; 2059 unsigned long flags;
@@ -1992,14 +2061,15 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1992 /* Clear IEVENT, so interrupts aren't called again 2061 /* Clear IEVENT, so interrupts aren't called again
1993 * because of the packets that have already arrived */ 2062 * because of the packets that have already arrived */
1994 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2063 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
2064 tx_queue = priv->tx_queue;
1995 2065
1996 /* If we fail to get the lock, don't bother with the TX BDs */ 2066 /* If we fail to get the lock, don't bother with the TX BDs */
1997 if (spin_trylock_irqsave(&priv->txlock, flags)) { 2067 if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
1998 tx_cleaned = gfar_clean_tx_ring(dev); 2068 tx_cleaned = gfar_clean_tx_ring(tx_queue);
1999 spin_unlock_irqrestore(&priv->txlock, flags); 2069 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2000 } 2070 }
2001 2071
2002 rx_cleaned = gfar_clean_rx_ring(dev, budget); 2072 rx_cleaned = gfar_clean_rx_ring(rx_queue, budget);
2003 2073
2004 if (tx_cleaned) 2074 if (tx_cleaned)
2005 return budget; 2075 return budget;
@@ -2014,13 +2084,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2014 2084
2015 /* If we are coalescing interrupts, update the timer */ 2085 /* If we are coalescing interrupts, update the timer */
2016 /* Otherwise, clear it */ 2086 /* Otherwise, clear it */
2017 if (likely(priv->rxcoalescing)) { 2087 if (likely(rx_queue->rxcoalescing)) {
2018 gfar_write(&priv->regs->rxic, 0); 2088 gfar_write(&priv->regs->rxic, 0);
2019 gfar_write(&priv->regs->rxic, priv->rxic); 2089 gfar_write(&priv->regs->rxic, rx_queue->rxic);
2020 } 2090 }
2021 if (likely(priv->txcoalescing)) { 2091 if (likely(tx_queue->txcoalescing)) {
2022 gfar_write(&priv->regs->txic, 0); 2092 gfar_write(&priv->regs->txic, 0);
2023 gfar_write(&priv->regs->txic, priv->txic); 2093 gfar_write(&priv->regs->txic, tx_queue->txic);
2024 } 2094 }
2025 } 2095 }
2026 2096
@@ -2087,12 +2157,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
2087static void adjust_link(struct net_device *dev) 2157static void adjust_link(struct net_device *dev)
2088{ 2158{
2089 struct gfar_private *priv = netdev_priv(dev); 2159 struct gfar_private *priv = netdev_priv(dev);
2160 struct gfar_priv_tx_q *tx_queue = NULL;
2090 struct gfar __iomem *regs = priv->regs; 2161 struct gfar __iomem *regs = priv->regs;
2091 unsigned long flags; 2162 unsigned long flags;
2092 struct phy_device *phydev = priv->phydev; 2163 struct phy_device *phydev = priv->phydev;
2093 int new_state = 0; 2164 int new_state = 0;
2094 2165
2095 spin_lock_irqsave(&priv->txlock, flags); 2166 tx_queue = priv->tx_queue;
2167 spin_lock_irqsave(&tx_queue->txlock, flags);
2096 if (phydev->link) { 2168 if (phydev->link) {
2097 u32 tempval = gfar_read(&regs->maccfg2); 2169 u32 tempval = gfar_read(&regs->maccfg2);
2098 u32 ecntrl = gfar_read(&regs->ecntrl); 2170 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2158,7 +2230,7 @@ static void adjust_link(struct net_device *dev)
2158 if (new_state && netif_msg_link(priv)) 2230 if (new_state && netif_msg_link(priv))
2159 phy_print_status(phydev); 2231 phy_print_status(phydev);
2160 2232
2161 spin_unlock_irqrestore(&priv->txlock, flags); 2233 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2162} 2234}
2163 2235
2164/* Update the hash table based on the current list of multicast 2236/* Update the hash table based on the current list of multicast
@@ -2172,7 +2244,7 @@ static void gfar_set_multi(struct net_device *dev)
2172 struct gfar __iomem *regs = priv->regs; 2244 struct gfar __iomem *regs = priv->regs;
2173 u32 tempval; 2245 u32 tempval;
2174 2246
2175 if(dev->flags & IFF_PROMISC) { 2247 if (dev->flags & IFF_PROMISC) {
2176 /* Set RCTRL to PROM */ 2248 /* Set RCTRL to PROM */
2177 tempval = gfar_read(&regs->rctrl); 2249 tempval = gfar_read(&regs->rctrl);
2178 tempval |= RCTRL_PROM; 2250 tempval |= RCTRL_PROM;
@@ -2184,7 +2256,7 @@ static void gfar_set_multi(struct net_device *dev)
2184 gfar_write(&regs->rctrl, tempval); 2256 gfar_write(&regs->rctrl, tempval);
2185 } 2257 }
2186 2258
2187 if(dev->flags & IFF_ALLMULTI) { 2259 if (dev->flags & IFF_ALLMULTI) {
2188 /* Set the hash to rx all multicast frames */ 2260 /* Set the hash to rx all multicast frames */
2189 gfar_write(&regs->igaddr0, 0xffffffff); 2261 gfar_write(&regs->igaddr0, 0xffffffff);
2190 gfar_write(&regs->igaddr1, 0xffffffff); 2262 gfar_write(&regs->igaddr1, 0xffffffff);
@@ -2236,7 +2308,7 @@ static void gfar_set_multi(struct net_device *dev)
2236 em_num = 0; 2308 em_num = 0;
2237 } 2309 }
2238 2310
2239 if(dev->mc_count == 0) 2311 if (dev->mc_count == 0)
2240 return; 2312 return;
2241 2313
2242 /* Parse the list, and set the appropriate bits */ 2314 /* Parse the list, and set the appropriate bits */