aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSandeep Gopalpet <Sandeep.Kumar@freescale.com>2009-11-02 02:03:00 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-03 02:40:55 -0500
commita12f801d4b349bc57622584e70e45a4ccbef53b6 (patch)
tree1b081795127d9e47aa5bac516fededa736dfc394
parent123b43e9716115302a0095e14f2c545811712715 (diff)
gianfar: Add per queue structure support
This patch introduces per tx and per rx queue structures. Earlier the members of these structures were inside the gfar_private structure. Moving forward if we want to support multiple queues, we need to refactor the gfar_private structure so that introduction of multiple queues is easier. Signed-off-by: Sandeep Gopalpet <Sandeep.Kumar@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/gianfar.c384
-rw-r--r--drivers/net/gianfar.h116
-rw-r--r--drivers/net/gianfar_ethtool.c100
-rw-r--r--drivers/net/gianfar_sysfs.c43
4 files changed, 398 insertions, 245 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index f7141865869d..354b2b5936ea 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -8,9 +8,10 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala 10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc. 14 * Copyright 2007 MontaVista Software, Inc.
14 * 15 *
15 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
@@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work);
109static void gfar_timeout(struct net_device *dev); 110static void gfar_timeout(struct net_device *dev);
110static int gfar_close(struct net_device *dev); 111static int gfar_close(struct net_device *dev);
111struct sk_buff *gfar_new_skb(struct net_device *dev); 112struct sk_buff *gfar_new_skb(struct net_device *dev);
112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
113 struct sk_buff *skb); 114 struct sk_buff *skb);
114static int gfar_set_mac_address(struct net_device *dev); 115static int gfar_set_mac_address(struct net_device *dev);
115static int gfar_change_mtu(struct net_device *dev, int new_mtu); 116static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
130#ifdef CONFIG_NET_POLL_CONTROLLER 131#ifdef CONFIG_NET_POLL_CONTROLLER
131static void gfar_netpoll(struct net_device *dev); 132static void gfar_netpoll(struct net_device *dev);
132#endif 133#endif
133int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
134static int gfar_clean_tx_ring(struct net_device *dev); 135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
135static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
136 int amount_pull); 137 int amount_pull);
137static void gfar_vlan_rx_register(struct net_device *netdev, 138static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -147,16 +148,16 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 148MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL"); 149MODULE_LICENSE("GPL");
149 150
150static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 151static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
151 dma_addr_t buf) 152 dma_addr_t buf)
152{ 153{
153 struct gfar_private *priv = netdev_priv(dev); 154 struct net_device *dev = rx_queue->dev;
154 u32 lstatus; 155 u32 lstatus;
155 156
156 bdp->bufPtr = buf; 157 bdp->bufPtr = buf;
157 158
158 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
159 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1) 160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
160 lstatus |= BD_LFLAG(RXBD_WRAP); 161 lstatus |= BD_LFLAG(RXBD_WRAP);
161 162
162 eieio(); 163 eieio();
@@ -167,20 +168,25 @@ static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
167static int gfar_init_bds(struct net_device *ndev) 168static int gfar_init_bds(struct net_device *ndev)
168{ 169{
169 struct gfar_private *priv = netdev_priv(ndev); 170 struct gfar_private *priv = netdev_priv(ndev);
171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
170 struct txbd8 *txbdp; 173 struct txbd8 *txbdp;
171 struct rxbd8 *rxbdp; 174 struct rxbd8 *rxbdp;
172 int i; 175 int i;
173 176
177 tx_queue = priv->tx_queue;
178 rx_queue = priv->rx_queue;
179
174 /* Initialize some variables in our dev structure */ 180 /* Initialize some variables in our dev structure */
175 priv->num_txbdfree = priv->tx_ring_size; 181 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
176 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 182 tx_queue->dirty_tx = tx_queue->cur_tx = tx_queue->tx_bd_base;
177 priv->cur_rx = priv->rx_bd_base; 183 rx_queue->cur_rx = rx_queue->rx_bd_base;
178 priv->skb_curtx = priv->skb_dirtytx = 0; 184 tx_queue->skb_curtx = tx_queue->skb_dirtytx = 0;
179 priv->skb_currx = 0; 185 rx_queue->skb_currx = 0;
180 186
181 /* Initialize Transmit Descriptor Ring */ 187 /* Initialize Transmit Descriptor Ring */
182 txbdp = priv->tx_bd_base; 188 txbdp = tx_queue->tx_bd_base;
183 for (i = 0; i < priv->tx_ring_size; i++) { 189 for (i = 0; i < tx_queue->tx_ring_size; i++) {
184 txbdp->lstatus = 0; 190 txbdp->lstatus = 0;
185 txbdp->bufPtr = 0; 191 txbdp->bufPtr = 0;
186 txbdp++; 192 txbdp++;
@@ -190,12 +196,12 @@ static int gfar_init_bds(struct net_device *ndev)
190 txbdp--; 196 txbdp--;
191 txbdp->status |= TXBD_WRAP; 197 txbdp->status |= TXBD_WRAP;
192 198
193 rxbdp = priv->rx_bd_base; 199 rxbdp = rx_queue->rx_bd_base;
194 for (i = 0; i < priv->rx_ring_size; i++) { 200 for (i = 0; i < rx_queue->rx_ring_size; i++) {
195 struct sk_buff *skb = priv->rx_skbuff[i]; 201 struct sk_buff *skb = rx_queue->rx_skbuff[i];
196 202
197 if (skb) { 203 if (skb) {
198 gfar_init_rxbdp(ndev, rxbdp, rxbdp->bufPtr); 204 gfar_init_rxbdp(rx_queue, rxbdp, rxbdp->bufPtr);
199 } else { 205 } else {
200 skb = gfar_new_skb(ndev); 206 skb = gfar_new_skb(ndev);
201 if (!skb) { 207 if (!skb) {
@@ -203,9 +209,9 @@ static int gfar_init_bds(struct net_device *ndev)
203 ndev->name); 209 ndev->name);
204 return -ENOMEM; 210 return -ENOMEM;
205 } 211 }
206 priv->rx_skbuff[i] = skb; 212 rx_queue->rx_skbuff[i] = skb;
207 213
208 gfar_new_rxbdp(ndev, rxbdp, skb); 214 gfar_new_rxbdp(rx_queue, rxbdp, skb);
209 } 215 }
210 216
211 rxbdp++; 217 rxbdp++;
@@ -220,12 +226,17 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
220 int i; 226 int i;
221 struct gfar_private *priv = netdev_priv(ndev); 227 struct gfar_private *priv = netdev_priv(ndev);
222 struct device *dev = &priv->ofdev->dev; 228 struct device *dev = &priv->ofdev->dev;
229 struct gfar_priv_tx_q *tx_queue = NULL;
230 struct gfar_priv_rx_q *rx_queue = NULL;
231
232 tx_queue = priv->tx_queue;
233 rx_queue = priv->rx_queue;
223 234
224 /* Allocate memory for the buffer descriptors */ 235 /* Allocate memory for the buffer descriptors */
225 vaddr = dma_alloc_coherent(dev, 236 vaddr = dma_alloc_coherent(dev,
226 sizeof(*priv->tx_bd_base) * priv->tx_ring_size + 237 sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size +
227 sizeof(*priv->rx_bd_base) * priv->rx_ring_size, 238 sizeof(*rx_queue->rx_bd_base) * rx_queue->rx_ring_size,
228 &priv->tx_bd_dma_base, GFP_KERNEL); 239 &tx_queue->tx_bd_dma_base, GFP_KERNEL);
229 if (!vaddr) { 240 if (!vaddr) {
230 if (netif_msg_ifup(priv)) 241 if (netif_msg_ifup(priv))
231 pr_err("%s: Could not allocate buffer descriptors!\n", 242 pr_err("%s: Could not allocate buffer descriptors!\n",
@@ -233,36 +244,38 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
233 return -ENOMEM; 244 return -ENOMEM;
234 } 245 }
235 246
236 priv->tx_bd_base = vaddr; 247 tx_queue->tx_bd_base = vaddr;
248 tx_queue->dev = ndev;
237 249
238 /* Start the rx descriptor ring where the tx ring leaves off */ 250 /* Start the rx descriptor ring where the tx ring leaves off */
239 vaddr = vaddr + sizeof(*priv->tx_bd_base) * priv->tx_ring_size; 251 vaddr = vaddr + sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size;
240 priv->rx_bd_base = vaddr; 252 rx_queue->rx_bd_base = vaddr;
253 rx_queue->dev = ndev;
241 254
242 /* Setup the skbuff rings */ 255 /* Setup the skbuff rings */
243 priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) * 256 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
244 priv->tx_ring_size, GFP_KERNEL); 257 tx_queue->tx_ring_size, GFP_KERNEL);
245 if (!priv->tx_skbuff) { 258 if (!tx_queue->tx_skbuff) {
246 if (netif_msg_ifup(priv)) 259 if (netif_msg_ifup(priv))
247 pr_err("%s: Could not allocate tx_skbuff\n", 260 pr_err("%s: Could not allocate tx_skbuff\n",
248 ndev->name); 261 ndev->name);
249 goto cleanup; 262 goto cleanup;
250 } 263 }
251 264
252 for (i = 0; i < priv->tx_ring_size; i++) 265 for (i = 0; i < tx_queue->tx_ring_size; i++)
253 priv->tx_skbuff[i] = NULL; 266 tx_queue->tx_skbuff[i] = NULL;
254 267
255 priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) * 268 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
256 priv->rx_ring_size, GFP_KERNEL); 269 rx_queue->rx_ring_size, GFP_KERNEL);
257 if (!priv->rx_skbuff) { 270 if (!rx_queue->rx_skbuff) {
258 if (netif_msg_ifup(priv)) 271 if (netif_msg_ifup(priv))
259 pr_err("%s: Could not allocate rx_skbuff\n", 272 pr_err("%s: Could not allocate rx_skbuff\n",
260 ndev->name); 273 ndev->name);
261 goto cleanup; 274 goto cleanup;
262 } 275 }
263 276
264 for (i = 0; i < priv->rx_ring_size; i++) 277 for (i = 0; i < rx_queue->rx_ring_size; i++)
265 priv->rx_skbuff[i] = NULL; 278 rx_queue->rx_skbuff[i] = NULL;
266 279
267 if (gfar_init_bds(ndev)) 280 if (gfar_init_bds(ndev))
268 goto cleanup; 281 goto cleanup;
@@ -278,24 +291,29 @@ static void gfar_init_mac(struct net_device *ndev)
278{ 291{
279 struct gfar_private *priv = netdev_priv(ndev); 292 struct gfar_private *priv = netdev_priv(ndev);
280 struct gfar __iomem *regs = priv->regs; 293 struct gfar __iomem *regs = priv->regs;
294 struct gfar_priv_tx_q *tx_queue = NULL;
295 struct gfar_priv_rx_q *rx_queue = NULL;
281 u32 rctrl = 0; 296 u32 rctrl = 0;
282 u32 tctrl = 0; 297 u32 tctrl = 0;
283 u32 attrs = 0; 298 u32 attrs = 0;
284 299
300 tx_queue = priv->tx_queue;
301 rx_queue = priv->rx_queue;
302
285 /* enet DMA only understands physical addresses */ 303 /* enet DMA only understands physical addresses */
286 gfar_write(&regs->tbase0, priv->tx_bd_dma_base); 304 gfar_write(&regs->tbase0, tx_queue->tx_bd_dma_base);
287 gfar_write(&regs->rbase0, priv->tx_bd_dma_base + 305 gfar_write(&regs->rbase0, tx_queue->tx_bd_dma_base +
288 sizeof(*priv->tx_bd_base) * 306 sizeof(*tx_queue->tx_bd_base) *
289 priv->tx_ring_size); 307 tx_queue->tx_ring_size);
290 308
291 /* Configure the coalescing support */ 309 /* Configure the coalescing support */
292 gfar_write(&regs->txic, 0); 310 gfar_write(&regs->txic, 0);
293 if (priv->txcoalescing) 311 if (tx_queue->txcoalescing)
294 gfar_write(&regs->txic, priv->txic); 312 gfar_write(&regs->txic, tx_queue->txic);
295 313
296 gfar_write(&regs->rxic, 0); 314 gfar_write(&regs->rxic, 0);
297 if (priv->rxcoalescing) 315 if (rx_queue->rxcoalescing)
298 gfar_write(&regs->rxic, priv->rxic); 316 gfar_write(&regs->rxic, rx_queue->rxic);
299 317
300 if (priv->rx_csum_enable) 318 if (priv->rx_csum_enable)
301 rctrl |= RCTRL_CHECKSUMMING; 319 rctrl |= RCTRL_CHECKSUMMING;
@@ -414,7 +432,7 @@ static int gfar_of_init(struct net_device *dev)
414 432
415 stash = of_get_property(np, "bd-stash", NULL); 433 stash = of_get_property(np, "bd-stash", NULL);
416 434
417 if(stash) { 435 if (stash) {
418 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 436 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
419 priv->bd_stash_en = 1; 437 priv->bd_stash_en = 1;
420 } 438 }
@@ -519,8 +537,18 @@ static int gfar_probe(struct of_device *ofdev,
519 if (err) 537 if (err)
520 goto regs_fail; 538 goto regs_fail;
521 539
522 spin_lock_init(&priv->txlock); 540 priv->tx_queue = (struct gfar_priv_tx_q *)kmalloc(
523 spin_lock_init(&priv->rxlock); 541 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
542 if (!priv->tx_queue)
543 goto regs_fail;
544
545 priv->rx_queue = (struct gfar_priv_rx_q *)kmalloc(
546 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
547 if (!priv->rx_queue)
548 goto rx_queue_fail;
549
550 spin_lock_init(&priv->tx_queue->txlock);
551 spin_lock_init(&priv->rx_queue->rxlock);
524 spin_lock_init(&priv->bflock); 552 spin_lock_init(&priv->bflock);
525 INIT_WORK(&priv->reset_task, gfar_reset_task); 553 INIT_WORK(&priv->reset_task, gfar_reset_task);
526 554
@@ -552,12 +580,13 @@ static int gfar_probe(struct of_device *ofdev,
552 580
553 /* Fill in the dev structure */ 581 /* Fill in the dev structure */
554 dev->watchdog_timeo = TX_TIMEOUT; 582 dev->watchdog_timeo = TX_TIMEOUT;
555 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
556 dev->mtu = 1500; 583 dev->mtu = 1500;
557
558 dev->netdev_ops = &gfar_netdev_ops; 584 dev->netdev_ops = &gfar_netdev_ops;
559 dev->ethtool_ops = &gfar_ethtool_ops; 585 dev->ethtool_ops = &gfar_ethtool_ops;
560 586
587 /* Register for napi ...NAPI is for each rx_queue */
588 netif_napi_add(dev, &priv->rx_queue->napi, gfar_poll, GFAR_DEV_WEIGHT);
589
561 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 590 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
562 priv->rx_csum_enable = 1; 591 priv->rx_csum_enable = 1;
563 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 592 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -613,14 +642,16 @@ static int gfar_probe(struct of_device *ofdev,
613 dev->hard_header_len += GMAC_FCB_LEN; 642 dev->hard_header_len += GMAC_FCB_LEN;
614 643
615 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 644 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
616 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
617 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
618 priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
619 645
620 priv->txcoalescing = DEFAULT_TX_COALESCE; 646 /* Initializing some of the rx/tx queue level parameters */
621 priv->txic = DEFAULT_TXIC; 647 priv->tx_queue->tx_ring_size = DEFAULT_TX_RING_SIZE;
622 priv->rxcoalescing = DEFAULT_RX_COALESCE; 648 priv->tx_queue->num_txbdfree = DEFAULT_TX_RING_SIZE;
623 priv->rxic = DEFAULT_RXIC; 649 priv->tx_queue->txcoalescing = DEFAULT_TX_COALESCE;
650 priv->tx_queue->txic = DEFAULT_TXIC;
651
652 priv->rx_queue->rx_ring_size = DEFAULT_RX_RING_SIZE;
653 priv->rx_queue->rxcoalescing = DEFAULT_RX_COALESCE;
654 priv->rx_queue->rxic = DEFAULT_RXIC;
624 655
625 /* Enable most messages by default */ 656 /* Enable most messages by default */
626 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 657 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -666,12 +697,15 @@ static int gfar_probe(struct of_device *ofdev,
666 /* provided which set of benchmarks. */ 697 /* provided which set of benchmarks. */
667 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 698 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
668 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 699 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
669 dev->name, priv->rx_ring_size, priv->tx_ring_size); 700 dev->name, priv->rx_queue->rx_ring_size, priv->tx_queue->tx_ring_size);
670 701
671 return 0; 702 return 0;
672 703
673register_fail: 704register_fail:
674 iounmap(priv->regs); 705 iounmap(priv->regs);
706 kfree(priv->rx_queue);
707rx_queue_fail:
708 kfree(priv->tx_queue);
675regs_fail: 709regs_fail:
676 if (priv->phy_node) 710 if (priv->phy_node)
677 of_node_put(priv->phy_node); 711 of_node_put(priv->phy_node);
@@ -705,6 +739,8 @@ static int gfar_suspend(struct device *dev)
705{ 739{
706 struct gfar_private *priv = dev_get_drvdata(dev); 740 struct gfar_private *priv = dev_get_drvdata(dev);
707 struct net_device *ndev = priv->ndev; 741 struct net_device *ndev = priv->ndev;
742 struct gfar_priv_tx_q *tx_queue = NULL;
743 struct gfar_priv_rx_q *rx_queue = NULL;
708 unsigned long flags; 744 unsigned long flags;
709 u32 tempval; 745 u32 tempval;
710 746
@@ -712,10 +748,12 @@ static int gfar_suspend(struct device *dev)
712 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 748 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
713 749
714 netif_device_detach(ndev); 750 netif_device_detach(ndev);
751 tx_queue = priv->tx_queue;
752 rx_queue = priv->rx_queue;
715 753
716 if (netif_running(ndev)) { 754 if (netif_running(ndev)) {
717 spin_lock_irqsave(&priv->txlock, flags); 755 spin_lock_irqsave(&tx_queue->txlock, flags);
718 spin_lock(&priv->rxlock); 756 spin_lock(&rx_queue->rxlock);
719 757
720 gfar_halt_nodisable(ndev); 758 gfar_halt_nodisable(ndev);
721 759
@@ -729,10 +767,10 @@ static int gfar_suspend(struct device *dev)
729 767
730 gfar_write(&priv->regs->maccfg1, tempval); 768 gfar_write(&priv->regs->maccfg1, tempval);
731 769
732 spin_unlock(&priv->rxlock); 770 spin_unlock(&rx_queue->rxlock);
733 spin_unlock_irqrestore(&priv->txlock, flags); 771 spin_unlock_irqrestore(&tx_queue->txlock, flags);
734 772
735 napi_disable(&priv->napi); 773 napi_disable(&rx_queue->napi);
736 774
737 if (magic_packet) { 775 if (magic_packet) {
738 /* Enable interrupt on Magic Packet */ 776 /* Enable interrupt on Magic Packet */
@@ -754,6 +792,8 @@ static int gfar_resume(struct device *dev)
754{ 792{
755 struct gfar_private *priv = dev_get_drvdata(dev); 793 struct gfar_private *priv = dev_get_drvdata(dev);
756 struct net_device *ndev = priv->ndev; 794 struct net_device *ndev = priv->ndev;
795 struct gfar_priv_tx_q *tx_queue = NULL;
796 struct gfar_priv_rx_q *rx_queue = NULL;
757 unsigned long flags; 797 unsigned long flags;
758 u32 tempval; 798 u32 tempval;
759 int magic_packet = priv->wol_en && 799 int magic_packet = priv->wol_en &&
@@ -770,9 +810,11 @@ static int gfar_resume(struct device *dev)
770 /* Disable Magic Packet mode, in case something 810 /* Disable Magic Packet mode, in case something
771 * else woke us up. 811 * else woke us up.
772 */ 812 */
813 rx_queue = priv->rx_queue;
814 tx_queue = priv->tx_queue;
773 815
774 spin_lock_irqsave(&priv->txlock, flags); 816 spin_lock_irqsave(&tx_queue->txlock, flags);
775 spin_lock(&priv->rxlock); 817 spin_lock(&rx_queue->rxlock);
776 818
777 tempval = gfar_read(&priv->regs->maccfg2); 819 tempval = gfar_read(&priv->regs->maccfg2);
778 tempval &= ~MACCFG2_MPEN; 820 tempval &= ~MACCFG2_MPEN;
@@ -780,12 +822,12 @@ static int gfar_resume(struct device *dev)
780 822
781 gfar_start(ndev); 823 gfar_start(ndev);
782 824
783 spin_unlock(&priv->rxlock); 825 spin_unlock(&rx_queue->rxlock);
784 spin_unlock_irqrestore(&priv->txlock, flags); 826 spin_unlock_irqrestore(&tx_queue->txlock, flags);
785 827
786 netif_device_attach(ndev); 828 netif_device_attach(ndev);
787 829
788 napi_enable(&priv->napi); 830 napi_enable(&rx_queue->napi);
789 831
790 return 0; 832 return 0;
791} 833}
@@ -1060,18 +1102,23 @@ void gfar_halt(struct net_device *dev)
1060void stop_gfar(struct net_device *dev) 1102void stop_gfar(struct net_device *dev)
1061{ 1103{
1062 struct gfar_private *priv = netdev_priv(dev); 1104 struct gfar_private *priv = netdev_priv(dev);
1105 struct gfar_priv_tx_q *tx_queue = NULL;
1106 struct gfar_priv_rx_q *rx_queue = NULL;
1063 unsigned long flags; 1107 unsigned long flags;
1064 1108
1065 phy_stop(priv->phydev); 1109 phy_stop(priv->phydev);
1066 1110
1111 tx_queue = priv->tx_queue;
1112 rx_queue = priv->rx_queue;
1113
1067 /* Lock it down */ 1114 /* Lock it down */
1068 spin_lock_irqsave(&priv->txlock, flags); 1115 spin_lock_irqsave(&tx_queue->txlock, flags);
1069 spin_lock(&priv->rxlock); 1116 spin_lock(&rx_queue->rxlock);
1070 1117
1071 gfar_halt(dev); 1118 gfar_halt(dev);
1072 1119
1073 spin_unlock(&priv->rxlock); 1120 spin_unlock(&rx_queue->rxlock);
1074 spin_unlock_irqrestore(&priv->txlock, flags); 1121 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1075 1122
1076 /* Free the IRQs */ 1123 /* Free the IRQs */
1077 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1124 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -1092,46 +1139,50 @@ static void free_skb_resources(struct gfar_private *priv)
1092 struct device *dev = &priv->ofdev->dev; 1139 struct device *dev = &priv->ofdev->dev;
1093 struct rxbd8 *rxbdp; 1140 struct rxbd8 *rxbdp;
1094 struct txbd8 *txbdp; 1141 struct txbd8 *txbdp;
1142 struct gfar_priv_tx_q *tx_queue = NULL;
1143 struct gfar_priv_rx_q *rx_queue = NULL;
1095 int i, j; 1144 int i, j;
1096 1145
1097 /* Go through all the buffer descriptors and free their data buffers */ 1146 /* Go through all the buffer descriptors and free their data buffers */
1098 txbdp = priv->tx_bd_base; 1147 tx_queue = priv->tx_queue;
1148 txbdp = tx_queue->tx_bd_base;
1099 1149
1100 if (!priv->tx_skbuff) 1150 if (!tx_queue->tx_skbuff)
1101 goto skip_tx_skbuff; 1151 goto skip_tx_skbuff;
1102 1152
1103 for (i = 0; i < priv->tx_ring_size; i++) { 1153 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1104 if (!priv->tx_skbuff[i]) 1154 if (!tx_queue->tx_skbuff[i])
1105 continue; 1155 continue;
1106 1156
1107 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1157 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1108 txbdp->length, DMA_TO_DEVICE); 1158 txbdp->length, DMA_TO_DEVICE);
1109 txbdp->lstatus = 0; 1159 txbdp->lstatus = 0;
1110 for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { 1160 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) {
1111 txbdp++; 1161 txbdp++;
1112 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1162 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1113 txbdp->length, DMA_TO_DEVICE); 1163 txbdp->length, DMA_TO_DEVICE);
1114 } 1164 }
1115 txbdp++; 1165 txbdp++;
1116 dev_kfree_skb_any(priv->tx_skbuff[i]); 1166 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1117 priv->tx_skbuff[i] = NULL; 1167 tx_queue->tx_skbuff[i] = NULL;
1118 } 1168 }
1119 1169
1120 kfree(priv->tx_skbuff); 1170 kfree(tx_queue->tx_skbuff);
1121skip_tx_skbuff: 1171skip_tx_skbuff:
1122 1172
1123 rxbdp = priv->rx_bd_base; 1173 rx_queue = priv->rx_queue;
1174 rxbdp = rx_queue->rx_bd_base;
1124 1175
1125 if (!priv->rx_skbuff) 1176 if (!rx_queue->rx_skbuff)
1126 goto skip_rx_skbuff; 1177 goto skip_rx_skbuff;
1127 1178
1128 for (i = 0; i < priv->rx_ring_size; i++) { 1179 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1129 if (priv->rx_skbuff[i]) { 1180 if (rx_queue->rx_skbuff[i]) {
1130 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr, 1181 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
1131 priv->rx_buffer_size, 1182 priv->rx_buffer_size,
1132 DMA_FROM_DEVICE); 1183 DMA_FROM_DEVICE);
1133 dev_kfree_skb_any(priv->rx_skbuff[i]); 1184 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1134 priv->rx_skbuff[i] = NULL; 1185 rx_queue->rx_skbuff[i] = NULL;
1135 } 1186 }
1136 1187
1137 rxbdp->lstatus = 0; 1188 rxbdp->lstatus = 0;
@@ -1139,17 +1190,19 @@ skip_tx_skbuff:
1139 rxbdp++; 1190 rxbdp++;
1140 } 1191 }
1141 1192
1142 kfree(priv->rx_skbuff); 1193 kfree(rx_queue->rx_skbuff);
1143skip_rx_skbuff: 1194skip_rx_skbuff:
1144 1195
1145 dma_free_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size + 1196 dma_free_coherent(dev, sizeof(*txbdp) * tx_queue->tx_ring_size +
1146 sizeof(*rxbdp) * priv->rx_ring_size, 1197 sizeof(*rxbdp) * rx_queue->rx_ring_size,
1147 priv->tx_bd_base, priv->tx_bd_dma_base); 1198 tx_queue->tx_bd_base, tx_queue->tx_bd_dma_base);
1148} 1199}
1149 1200
1150void gfar_start(struct net_device *dev) 1201void gfar_start(struct net_device *dev)
1151{ 1202{
1152 struct gfar_private *priv = netdev_priv(dev); 1203 struct gfar_private *priv = netdev_priv(dev);
1204 struct gfar_priv_tx_q *tx_queue;
1205 struct gfar_priv_rx_q *rx_queue;
1153 struct gfar __iomem *regs = priv->regs; 1206 struct gfar __iomem *regs = priv->regs;
1154 u32 tempval; 1207 u32 tempval;
1155 1208
@@ -1258,7 +1311,7 @@ static int gfar_enet_open(struct net_device *dev)
1258 struct gfar_private *priv = netdev_priv(dev); 1311 struct gfar_private *priv = netdev_priv(dev);
1259 int err; 1312 int err;
1260 1313
1261 napi_enable(&priv->napi); 1314 napi_enable(&priv->rx_queue->napi);
1262 1315
1263 skb_queue_head_init(&priv->rx_recycle); 1316 skb_queue_head_init(&priv->rx_recycle);
1264 1317
@@ -1269,14 +1322,14 @@ static int gfar_enet_open(struct net_device *dev)
1269 1322
1270 err = init_phy(dev); 1323 err = init_phy(dev);
1271 1324
1272 if(err) { 1325 if (err) {
1273 napi_disable(&priv->napi); 1326 napi_disable(&priv->rx_queue->napi);
1274 return err; 1327 return err;
1275 } 1328 }
1276 1329
1277 err = startup_gfar(dev); 1330 err = startup_gfar(dev);
1278 if (err) { 1331 if (err) {
1279 napi_disable(&priv->napi); 1332 napi_disable(&priv->rx_queue->napi);
1280 return err; 1333 return err;
1281 } 1334 }
1282 1335
@@ -1349,6 +1402,7 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1349static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1402static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1350{ 1403{
1351 struct gfar_private *priv = netdev_priv(dev); 1404 struct gfar_private *priv = netdev_priv(dev);
1405 struct gfar_priv_tx_q *tx_queue = NULL;
1352 struct txfcb *fcb = NULL; 1406 struct txfcb *fcb = NULL;
1353 struct txbd8 *txbdp, *txbdp_start, *base; 1407 struct txbd8 *txbdp, *txbdp_start, *base;
1354 u32 lstatus; 1408 u32 lstatus;
@@ -1357,7 +1411,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1357 unsigned long flags; 1411 unsigned long flags;
1358 unsigned int nr_frags, length; 1412 unsigned int nr_frags, length;
1359 1413
1360 base = priv->tx_bd_base; 1414 tx_queue = priv->tx_queue;
1415 base = tx_queue->tx_bd_base;
1361 1416
1362 /* make space for additional header when fcb is needed */ 1417 /* make space for additional header when fcb is needed */
1363 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1418 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1378,21 +1433,21 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1378 /* total number of fragments in the SKB */ 1433 /* total number of fragments in the SKB */
1379 nr_frags = skb_shinfo(skb)->nr_frags; 1434 nr_frags = skb_shinfo(skb)->nr_frags;
1380 1435
1381 spin_lock_irqsave(&priv->txlock, flags); 1436 spin_lock_irqsave(&tx_queue->txlock, flags);
1382 1437
1383 /* check if there is space to queue this packet */ 1438 /* check if there is space to queue this packet */
1384 if ((nr_frags+1) > priv->num_txbdfree) { 1439 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1385 /* no space, stop the queue */ 1440 /* no space, stop the queue */
1386 netif_stop_queue(dev); 1441 netif_stop_queue(dev);
1387 dev->stats.tx_fifo_errors++; 1442 dev->stats.tx_fifo_errors++;
1388 spin_unlock_irqrestore(&priv->txlock, flags); 1443 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1389 return NETDEV_TX_BUSY; 1444 return NETDEV_TX_BUSY;
1390 } 1445 }
1391 1446
1392 /* Update transmit stats */ 1447 /* Update transmit stats */
1393 dev->stats.tx_bytes += skb->len; 1448 dev->stats.tx_bytes += skb->len;
1394 1449
1395 txbdp = txbdp_start = priv->cur_tx; 1450 txbdp = txbdp_start = tx_queue->cur_tx;
1396 1451
1397 if (nr_frags == 0) { 1452 if (nr_frags == 0) {
1398 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1453 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
@@ -1400,7 +1455,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1400 /* Place the fragment addresses and lengths into the TxBDs */ 1455 /* Place the fragment addresses and lengths into the TxBDs */
1401 for (i = 0; i < nr_frags; i++) { 1456 for (i = 0; i < nr_frags; i++) {
1402 /* Point at the next BD, wrapping as needed */ 1457 /* Point at the next BD, wrapping as needed */
1403 txbdp = next_txbd(txbdp, base, priv->tx_ring_size); 1458 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1404 1459
1405 length = skb_shinfo(skb)->frags[i].size; 1460 length = skb_shinfo(skb)->frags[i].size;
1406 1461
@@ -1442,7 +1497,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1442 } 1497 }
1443 1498
1444 /* setup the TxBD length and buffer pointer for the first BD */ 1499 /* setup the TxBD length and buffer pointer for the first BD */
1445 priv->tx_skbuff[priv->skb_curtx] = skb; 1500 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1446 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 1501 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1447 skb_headlen(skb), DMA_TO_DEVICE); 1502 skb_headlen(skb), DMA_TO_DEVICE);
1448 1503
@@ -1462,19 +1517,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1462 1517
1463 /* Update the current skb pointer to the next entry we will use 1518 /* Update the current skb pointer to the next entry we will use
1464 * (wrapping if necessary) */ 1519 * (wrapping if necessary) */
1465 priv->skb_curtx = (priv->skb_curtx + 1) & 1520 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1466 TX_RING_MOD_MASK(priv->tx_ring_size); 1521 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1467 1522
1468 priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); 1523 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1469 1524
1470 /* reduce TxBD free count */ 1525 /* reduce TxBD free count */
1471 priv->num_txbdfree -= (nr_frags + 1); 1526 tx_queue->num_txbdfree -= (nr_frags + 1);
1472 1527
1473 dev->trans_start = jiffies; 1528 dev->trans_start = jiffies;
1474 1529
1475 /* If the next BD still needs to be cleaned up, then the bds 1530 /* If the next BD still needs to be cleaned up, then the bds
1476 are full. We need to tell the kernel to stop sending us stuff. */ 1531 are full. We need to tell the kernel to stop sending us stuff. */
1477 if (!priv->num_txbdfree) { 1532 if (!tx_queue->num_txbdfree) {
1478 netif_stop_queue(dev); 1533 netif_stop_queue(dev);
1479 1534
1480 dev->stats.tx_fifo_errors++; 1535 dev->stats.tx_fifo_errors++;
@@ -1484,7 +1539,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1484 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1539 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1485 1540
1486 /* Unlock priv */ 1541 /* Unlock priv */
1487 spin_unlock_irqrestore(&priv->txlock, flags); 1542 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1488 1543
1489 return NETDEV_TX_OK; 1544 return NETDEV_TX_OK;
1490} 1545}
@@ -1494,7 +1549,7 @@ static int gfar_close(struct net_device *dev)
1494{ 1549{
1495 struct gfar_private *priv = netdev_priv(dev); 1550 struct gfar_private *priv = netdev_priv(dev);
1496 1551
1497 napi_disable(&priv->napi); 1552 napi_disable(&priv->rx_queue->napi);
1498 1553
1499 skb_queue_purge(&priv->rx_recycle); 1554 skb_queue_purge(&priv->rx_recycle);
1500 cancel_work_sync(&priv->reset_task); 1555 cancel_work_sync(&priv->reset_task);
@@ -1523,10 +1578,12 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1523 struct vlan_group *grp) 1578 struct vlan_group *grp)
1524{ 1579{
1525 struct gfar_private *priv = netdev_priv(dev); 1580 struct gfar_private *priv = netdev_priv(dev);
1581 struct gfar_priv_rx_q *rx_queue = NULL;
1526 unsigned long flags; 1582 unsigned long flags;
1527 u32 tempval; 1583 u32 tempval;
1528 1584
1529 spin_lock_irqsave(&priv->rxlock, flags); 1585 rx_queue = priv->rx_queue;
1586 spin_lock_irqsave(&rx_queue->rxlock, flags);
1530 1587
1531 priv->vlgrp = grp; 1588 priv->vlgrp = grp;
1532 1589
@@ -1560,7 +1617,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1560 1617
1561 gfar_change_mtu(dev, dev->mtu); 1618 gfar_change_mtu(dev, dev->mtu);
1562 1619
1563 spin_unlock_irqrestore(&priv->rxlock, flags); 1620 spin_unlock_irqrestore(&rx_queue->rxlock, flags);
1564} 1621}
1565 1622
1566static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1623static int gfar_change_mtu(struct net_device *dev, int new_mtu)
@@ -1649,24 +1706,27 @@ static void gfar_timeout(struct net_device *dev)
1649} 1706}
1650 1707
1651/* Interrupt Handler for Transmit complete */ 1708/* Interrupt Handler for Transmit complete */
1652static int gfar_clean_tx_ring(struct net_device *dev) 1709static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1653{ 1710{
1711 struct net_device *dev = tx_queue->dev;
1654 struct gfar_private *priv = netdev_priv(dev); 1712 struct gfar_private *priv = netdev_priv(dev);
1713 struct gfar_priv_rx_q *rx_queue = NULL;
1655 struct txbd8 *bdp; 1714 struct txbd8 *bdp;
1656 struct txbd8 *lbdp = NULL; 1715 struct txbd8 *lbdp = NULL;
1657 struct txbd8 *base = priv->tx_bd_base; 1716 struct txbd8 *base = tx_queue->tx_bd_base;
1658 struct sk_buff *skb; 1717 struct sk_buff *skb;
1659 int skb_dirtytx; 1718 int skb_dirtytx;
1660 int tx_ring_size = priv->tx_ring_size; 1719 int tx_ring_size = tx_queue->tx_ring_size;
1661 int frags = 0; 1720 int frags = 0;
1662 int i; 1721 int i;
1663 int howmany = 0; 1722 int howmany = 0;
1664 u32 lstatus; 1723 u32 lstatus;
1665 1724
1666 bdp = priv->dirty_tx; 1725 rx_queue = priv->rx_queue;
1667 skb_dirtytx = priv->skb_dirtytx; 1726 bdp = tx_queue->dirty_tx;
1727 skb_dirtytx = tx_queue->skb_dirtytx;
1668 1728
1669 while ((skb = priv->tx_skbuff[skb_dirtytx])) { 1729 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
1670 frags = skb_shinfo(skb)->nr_frags; 1730 frags = skb_shinfo(skb)->nr_frags;
1671 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 1731 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1672 1732
@@ -1698,29 +1758,29 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1698 * If there's room in the queue (limit it to rx_buffer_size) 1758 * If there's room in the queue (limit it to rx_buffer_size)
1699 * we add this skb back into the pool, if it's the right size 1759 * we add this skb back into the pool, if it's the right size
1700 */ 1760 */
1701 if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && 1761 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
1702 skb_recycle_check(skb, priv->rx_buffer_size + 1762 skb_recycle_check(skb, priv->rx_buffer_size +
1703 RXBUF_ALIGNMENT)) 1763 RXBUF_ALIGNMENT))
1704 __skb_queue_head(&priv->rx_recycle, skb); 1764 __skb_queue_head(&priv->rx_recycle, skb);
1705 else 1765 else
1706 dev_kfree_skb_any(skb); 1766 dev_kfree_skb_any(skb);
1707 1767
1708 priv->tx_skbuff[skb_dirtytx] = NULL; 1768 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
1709 1769
1710 skb_dirtytx = (skb_dirtytx + 1) & 1770 skb_dirtytx = (skb_dirtytx + 1) &
1711 TX_RING_MOD_MASK(tx_ring_size); 1771 TX_RING_MOD_MASK(tx_ring_size);
1712 1772
1713 howmany++; 1773 howmany++;
1714 priv->num_txbdfree += frags + 1; 1774 tx_queue->num_txbdfree += frags + 1;
1715 } 1775 }
1716 1776
1717 /* If we freed a buffer, we can restart transmission, if necessary */ 1777 /* If we freed a buffer, we can restart transmission, if necessary */
1718 if (netif_queue_stopped(dev) && priv->num_txbdfree) 1778 if (netif_queue_stopped(dev) && tx_queue->num_txbdfree)
1719 netif_wake_queue(dev); 1779 netif_wake_queue(dev);
1720 1780
1721 /* Update dirty indicators */ 1781 /* Update dirty indicators */
1722 priv->skb_dirtytx = skb_dirtytx; 1782 tx_queue->skb_dirtytx = skb_dirtytx;
1723 priv->dirty_tx = bdp; 1783 tx_queue->dirty_tx = bdp;
1724 1784
1725 dev->stats.tx_packets += howmany; 1785 dev->stats.tx_packets += howmany;
1726 1786
@@ -1730,14 +1790,18 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1730static void gfar_schedule_cleanup(struct net_device *dev) 1790static void gfar_schedule_cleanup(struct net_device *dev)
1731{ 1791{
1732 struct gfar_private *priv = netdev_priv(dev); 1792 struct gfar_private *priv = netdev_priv(dev);
1793 struct gfar_priv_tx_q *tx_queue = NULL;
1794 struct gfar_priv_rx_q *rx_queue = NULL;
1733 unsigned long flags; 1795 unsigned long flags;
1734 1796
1735 spin_lock_irqsave(&priv->txlock, flags); 1797 rx_queue = priv->rx_queue;
1736 spin_lock(&priv->rxlock); 1798 tx_queue = priv->tx_queue;
1799 spin_lock_irqsave(&tx_queue->txlock, flags);
1800 spin_lock(&rx_queue->rxlock);
1737 1801
1738 if (napi_schedule_prep(&priv->napi)) { 1802 if (napi_schedule_prep(&rx_queue->napi)) {
1739 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); 1803 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1740 __napi_schedule(&priv->napi); 1804 __napi_schedule(&rx_queue->napi);
1741 } else { 1805 } else {
1742 /* 1806 /*
1743 * Clear IEVENT, so interrupts aren't called again 1807 * Clear IEVENT, so interrupts aren't called again
@@ -1746,8 +1810,8 @@ static void gfar_schedule_cleanup(struct net_device *dev)
1746 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 1810 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1747 } 1811 }
1748 1812
1749 spin_unlock(&priv->rxlock); 1813 spin_unlock(&rx_queue->rxlock);
1750 spin_unlock_irqrestore(&priv->txlock, flags); 1814 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1751} 1815}
1752 1816
1753/* Interrupt Handler for Transmit complete */ 1817/* Interrupt Handler for Transmit complete */
@@ -1757,15 +1821,16 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
1757 return IRQ_HANDLED; 1821 return IRQ_HANDLED;
1758} 1822}
1759 1823
1760static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 1824static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
1761 struct sk_buff *skb) 1825 struct sk_buff *skb)
1762{ 1826{
1827 struct net_device *dev = rx_queue->dev;
1763 struct gfar_private *priv = netdev_priv(dev); 1828 struct gfar_private *priv = netdev_priv(dev);
1764 dma_addr_t buf; 1829 dma_addr_t buf;
1765 1830
1766 buf = dma_map_single(&priv->ofdev->dev, skb->data, 1831 buf = dma_map_single(&priv->ofdev->dev, skb->data,
1767 priv->rx_buffer_size, DMA_FROM_DEVICE); 1832 priv->rx_buffer_size, DMA_FROM_DEVICE);
1768 gfar_init_rxbdp(dev, bdp, buf); 1833 gfar_init_rxbdp(rx_queue, bdp, buf);
1769} 1834}
1770 1835
1771 1836
@@ -1890,8 +1955,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1890 * until the budget/quota has been reached. Returns the number 1955 * until the budget/quota has been reached. Returns the number
1891 * of frames handled 1956 * of frames handled
1892 */ 1957 */
1893int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1958int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
1894{ 1959{
1960 struct net_device *dev = rx_queue->dev;
1895 struct rxbd8 *bdp, *base; 1961 struct rxbd8 *bdp, *base;
1896 struct sk_buff *skb; 1962 struct sk_buff *skb;
1897 int pkt_len; 1963 int pkt_len;
@@ -1900,8 +1966,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1900 struct gfar_private *priv = netdev_priv(dev); 1966 struct gfar_private *priv = netdev_priv(dev);
1901 1967
1902 /* Get the first full descriptor */ 1968 /* Get the first full descriptor */
1903 bdp = priv->cur_rx; 1969 bdp = rx_queue->cur_rx;
1904 base = priv->rx_bd_base; 1970 base = rx_queue->rx_bd_base;
1905 1971
1906 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 1972 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1907 priv->padding; 1973 priv->padding;
@@ -1913,7 +1979,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1913 /* Add another skb for the future */ 1979 /* Add another skb for the future */
1914 newskb = gfar_new_skb(dev); 1980 newskb = gfar_new_skb(dev);
1915 1981
1916 skb = priv->rx_skbuff[priv->skb_currx]; 1982 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
1917 1983
1918 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 1984 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
1919 priv->rx_buffer_size, DMA_FROM_DEVICE); 1985 priv->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1961,30 +2027,33 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1961 2027
1962 } 2028 }
1963 2029
1964 priv->rx_skbuff[priv->skb_currx] = newskb; 2030 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
1965 2031
1966 /* Setup the new bdp */ 2032 /* Setup the new bdp */
1967 gfar_new_rxbdp(dev, bdp, newskb); 2033 gfar_new_rxbdp(rx_queue, bdp, newskb);
1968 2034
1969 /* Update to the next pointer */ 2035 /* Update to the next pointer */
1970 bdp = next_bd(bdp, base, priv->rx_ring_size); 2036 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
1971 2037
1972 /* update to point at the next skb */ 2038 /* update to point at the next skb */
1973 priv->skb_currx = 2039 rx_queue->skb_currx =
1974 (priv->skb_currx + 1) & 2040 (rx_queue->skb_currx + 1) &
1975 RX_RING_MOD_MASK(priv->rx_ring_size); 2041 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
1976 } 2042 }
1977 2043
1978 /* Update the current rxbd pointer to be the next one */ 2044 /* Update the current rxbd pointer to be the next one */
1979 priv->cur_rx = bdp; 2045 rx_queue->cur_rx = bdp;
1980 2046
1981 return howmany; 2047 return howmany;
1982} 2048}
1983 2049
1984static int gfar_poll(struct napi_struct *napi, int budget) 2050static int gfar_poll(struct napi_struct *napi, int budget)
1985{ 2051{
1986 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 2052 struct gfar_priv_rx_q *rx_queue = container_of(napi,
1987 struct net_device *dev = priv->ndev; 2053 struct gfar_priv_rx_q, napi);
2054 struct net_device *dev = rx_queue->dev;
2055 struct gfar_private *priv = netdev_priv(dev);
2056 struct gfar_priv_tx_q *tx_queue = NULL;
1988 int tx_cleaned = 0; 2057 int tx_cleaned = 0;
1989 int rx_cleaned = 0; 2058 int rx_cleaned = 0;
1990 unsigned long flags; 2059 unsigned long flags;
@@ -1992,14 +2061,15 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1992 /* Clear IEVENT, so interrupts aren't called again 2061 /* Clear IEVENT, so interrupts aren't called again
1993 * because of the packets that have already arrived */ 2062 * because of the packets that have already arrived */
1994 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2063 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
2064 tx_queue = priv->tx_queue;
1995 2065
1996 /* If we fail to get the lock, don't bother with the TX BDs */ 2066 /* If we fail to get the lock, don't bother with the TX BDs */
1997 if (spin_trylock_irqsave(&priv->txlock, flags)) { 2067 if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
1998 tx_cleaned = gfar_clean_tx_ring(dev); 2068 tx_cleaned = gfar_clean_tx_ring(tx_queue);
1999 spin_unlock_irqrestore(&priv->txlock, flags); 2069 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2000 } 2070 }
2001 2071
2002 rx_cleaned = gfar_clean_rx_ring(dev, budget); 2072 rx_cleaned = gfar_clean_rx_ring(rx_queue, budget);
2003 2073
2004 if (tx_cleaned) 2074 if (tx_cleaned)
2005 return budget; 2075 return budget;
@@ -2014,13 +2084,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2014 2084
2015 /* If we are coalescing interrupts, update the timer */ 2085 /* If we are coalescing interrupts, update the timer */
2016 /* Otherwise, clear it */ 2086 /* Otherwise, clear it */
2017 if (likely(priv->rxcoalescing)) { 2087 if (likely(rx_queue->rxcoalescing)) {
2018 gfar_write(&priv->regs->rxic, 0); 2088 gfar_write(&priv->regs->rxic, 0);
2019 gfar_write(&priv->regs->rxic, priv->rxic); 2089 gfar_write(&priv->regs->rxic, rx_queue->rxic);
2020 } 2090 }
2021 if (likely(priv->txcoalescing)) { 2091 if (likely(tx_queue->txcoalescing)) {
2022 gfar_write(&priv->regs->txic, 0); 2092 gfar_write(&priv->regs->txic, 0);
2023 gfar_write(&priv->regs->txic, priv->txic); 2093 gfar_write(&priv->regs->txic, tx_queue->txic);
2024 } 2094 }
2025 } 2095 }
2026 2096
@@ -2087,12 +2157,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
2087static void adjust_link(struct net_device *dev) 2157static void adjust_link(struct net_device *dev)
2088{ 2158{
2089 struct gfar_private *priv = netdev_priv(dev); 2159 struct gfar_private *priv = netdev_priv(dev);
2160 struct gfar_priv_tx_q *tx_queue = NULL;
2090 struct gfar __iomem *regs = priv->regs; 2161 struct gfar __iomem *regs = priv->regs;
2091 unsigned long flags; 2162 unsigned long flags;
2092 struct phy_device *phydev = priv->phydev; 2163 struct phy_device *phydev = priv->phydev;
2093 int new_state = 0; 2164 int new_state = 0;
2094 2165
2095 spin_lock_irqsave(&priv->txlock, flags); 2166 tx_queue = priv->tx_queue;
2167 spin_lock_irqsave(&tx_queue->txlock, flags);
2096 if (phydev->link) { 2168 if (phydev->link) {
2097 u32 tempval = gfar_read(&regs->maccfg2); 2169 u32 tempval = gfar_read(&regs->maccfg2);
2098 u32 ecntrl = gfar_read(&regs->ecntrl); 2170 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2158,7 +2230,7 @@ static void adjust_link(struct net_device *dev)
2158 if (new_state && netif_msg_link(priv)) 2230 if (new_state && netif_msg_link(priv))
2159 phy_print_status(phydev); 2231 phy_print_status(phydev);
2160 2232
2161 spin_unlock_irqrestore(&priv->txlock, flags); 2233 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2162} 2234}
2163 2235
2164/* Update the hash table based on the current list of multicast 2236/* Update the hash table based on the current list of multicast
@@ -2172,7 +2244,7 @@ static void gfar_set_multi(struct net_device *dev)
2172 struct gfar __iomem *regs = priv->regs; 2244 struct gfar __iomem *regs = priv->regs;
2173 u32 tempval; 2245 u32 tempval;
2174 2246
2175 if(dev->flags & IFF_PROMISC) { 2247 if (dev->flags & IFF_PROMISC) {
2176 /* Set RCTRL to PROM */ 2248 /* Set RCTRL to PROM */
2177 tempval = gfar_read(&regs->rctrl); 2249 tempval = gfar_read(&regs->rctrl);
2178 tempval |= RCTRL_PROM; 2250 tempval |= RCTRL_PROM;
@@ -2184,7 +2256,7 @@ static void gfar_set_multi(struct net_device *dev)
2184 gfar_write(&regs->rctrl, tempval); 2256 gfar_write(&regs->rctrl, tempval);
2185 } 2257 }
2186 2258
2187 if(dev->flags & IFF_ALLMULTI) { 2259 if (dev->flags & IFF_ALLMULTI) {
2188 /* Set the hash to rx all multicast frames */ 2260 /* Set the hash to rx all multicast frames */
2189 gfar_write(&regs->igaddr0, 0xffffffff); 2261 gfar_write(&regs->igaddr0, 0xffffffff);
2190 gfar_write(&regs->igaddr1, 0xffffffff); 2262 gfar_write(&regs->igaddr1, 0xffffffff);
@@ -2236,7 +2308,7 @@ static void gfar_set_multi(struct net_device *dev)
2236 em_num = 0; 2308 em_num = 0;
2237 } 2309 }
2238 2310
2239 if(dev->mc_count == 0) 2311 if (dev->mc_count == 0)
2240 return; 2312 return;
2241 2313
2242 /* Parse the list, and set the appropriate bits */ 2314 /* Parse the list, and set the appropriate bits */
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 05732faa2f90..a60f93f1ae07 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -7,8 +7,9 @@
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
10 * 11 *
11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 12 * Copyright 2002-2009 Freescale Semiconductor, Inc.
12 * 13 *
13 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
@@ -699,6 +700,76 @@ struct gfar {
699#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 700#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
700#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 701#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
701 702
703/**
704 * struct gfar_priv_tx_q - per tx queue structure
705 * @txlock: per queue tx spin lock
706 * @tx_skbuff:skb pointers
707 * @skb_curtx: to be used skb pointer
708 * @skb_dirtytx:the last used skb pointer
709 * @qindex: index of this queue
710 * @dev: back pointer to the dev structure
711 * @grp: back pointer to the group to which this queue belongs
712 * @tx_bd_base: First tx buffer descriptor
713 * @cur_tx: Next free ring entry
714 * @dirty_tx: First buffer in line to be transmitted
715 * @tx_ring_size: Tx ring size
716 * @num_txbdfree: number of free TxBds
717 * @txcoalescing: enable/disable tx coalescing
718 * @txic: transmit interrupt coalescing value
719 * @txcount: coalescing value if based on tx frame count
720 * @txtime: coalescing value if based on time
721 */
722struct gfar_priv_tx_q {
723 spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
724 struct sk_buff ** tx_skbuff;
725 /* Buffer descriptor pointers */
726 dma_addr_t tx_bd_dma_base;
727 struct txbd8 *tx_bd_base;
728 struct txbd8 *cur_tx;
729 struct txbd8 *dirty_tx;
730 struct net_device *dev;
731 u16 skb_curtx;
732 u16 skb_dirtytx;
733 u16 qindex;
734 unsigned int tx_ring_size;
735 unsigned int num_txbdfree;
736 /* Configuration info for the coalescing features */
737 unsigned char txcoalescing;
738 unsigned long txic;
739 unsigned short txcount;
740 unsigned short txtime;
741};
742
743/**
744 * struct gfar_priv_rx_q - per rx queue structure
745 * @rxlock: per queue rx spin lock
746 * @napi: the napi poll function
747 * @rx_skbuff: skb pointers
748 * @skb_currx: currently use skb pointer
749 * @rx_bd_base: First rx buffer descriptor
750 * @cur_rx: Next free rx ring entry
751 * @qindex: index of this queue
752 * @dev: back pointer to the dev structure
753 * @rx_ring_size: Rx ring size
754 * @rxcoalescing: enable/disable rx-coalescing
755 * @rxic: receive interrupt coalescing vlaue
756 */
757
758struct gfar_priv_rx_q {
759 spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
760 struct napi_struct napi;
761 struct sk_buff ** rx_skbuff;
762 struct rxbd8 *rx_bd_base;
763 struct rxbd8 *cur_rx;
764 struct net_device *dev;
765 u16 skb_currx;
766 u16 qindex;
767 unsigned int rx_ring_size;
768 /* RX Coalescing values */
769 unsigned char rxcoalescing;
770 unsigned long rxic;
771};
772
702/* Struct stolen almost completely (and shamelessly) from the FCC enet source 773/* Struct stolen almost completely (and shamelessly) from the FCC enet source
703 * (Ok, that's not so true anymore, but there is a family resemblence) 774 * (Ok, that's not so true anymore, but there is a family resemblence)
704 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base 775 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -709,52 +780,15 @@ struct gfar {
709 * the buffer descriptor determines the actual condition. 780 * the buffer descriptor determines the actual condition.
710 */ 781 */
711struct gfar_private { 782struct gfar_private {
712 /* Fields controlled by TX lock */
713 spinlock_t txlock;
714
715 /* Pointer to the array of skbuffs */
716 struct sk_buff ** tx_skbuff;
717
718 /* next free skb in the array */
719 u16 skb_curtx;
720
721 /* First skb in line to be transmitted */
722 u16 skb_dirtytx;
723
724 /* Configuration info for the coalescing features */
725 unsigned char txcoalescing;
726 unsigned long txic;
727
728 /* Buffer descriptor pointers */
729 dma_addr_t tx_bd_dma_base;
730 struct txbd8 *tx_bd_base; /* First tx buffer descriptor */
731 struct txbd8 *cur_tx; /* Next free ring entry */
732 struct txbd8 *dirty_tx; /* First buffer in line
733 to be transmitted */
734 unsigned int tx_ring_size;
735 unsigned int num_txbdfree; /* number of TxBDs free */
736
737 /* RX Locked fields */
738 spinlock_t rxlock;
739 783
740 struct device_node *node; 784 struct device_node *node;
741 struct net_device *ndev; 785 struct net_device *ndev;
742 struct of_device *ofdev; 786 struct of_device *ofdev;
743 struct napi_struct napi;
744
745 /* skb array and index */
746 struct sk_buff ** rx_skbuff;
747 u16 skb_currx;
748
749 /* RX Coalescing values */
750 unsigned char rxcoalescing;
751 unsigned long rxic;
752 787
753 struct rxbd8 *rx_bd_base; /* First Rx buffers */ 788 struct gfar_priv_tx_q *tx_queue;
754 struct rxbd8 *cur_rx; /* Next free rx ring entry */ 789 struct gfar_priv_rx_q *rx_queue;
755 790
756 /* RX parameters */ 791 /* RX per device parameters */
757 unsigned int rx_ring_size;
758 unsigned int rx_buffer_size; 792 unsigned int rx_buffer_size;
759 unsigned int rx_stash_size; 793 unsigned int rx_stash_size;
760 unsigned int rx_stash_index; 794 unsigned int rx_stash_index;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6c144b525b47..6d0d1714c2f2 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -7,8 +7,9 @@
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
10 * 11 *
11 * Copyright (c) 2003,2004 Freescale Semiconductor, Inc. 12 * Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
12 * 13 *
13 * This software may be used and distributed according to 14 * This software may be used and distributed according to
14 * the terms of the GNU Public License, Version 2, incorporated herein 15 * the terms of the GNU Public License, Version 2, incorporated herein
@@ -41,7 +42,7 @@
41#include "gianfar.h" 42#include "gianfar.h"
42 43
43extern void gfar_start(struct net_device *dev); 44extern void gfar_start(struct net_device *dev);
44extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 45extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
45 46
46#define GFAR_MAX_COAL_USECS 0xffff 47#define GFAR_MAX_COAL_USECS 0xffff
47#define GFAR_MAX_COAL_FRAMES 0xff 48#define GFAR_MAX_COAL_FRAMES 0xff
@@ -197,12 +198,16 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{ 198{
198 struct gfar_private *priv = netdev_priv(dev); 199 struct gfar_private *priv = netdev_priv(dev);
199 struct phy_device *phydev = priv->phydev; 200 struct phy_device *phydev = priv->phydev;
201 struct gfar_priv_rx_q *rx_queue = NULL;
202 struct gfar_priv_tx_q *tx_queue = NULL;
200 203
201 if (NULL == phydev) 204 if (NULL == phydev)
202 return -ENODEV; 205 return -ENODEV;
206 tx_queue = priv->tx_queue;
207 rx_queue = priv->rx_queue;
203 208
204 cmd->maxtxpkt = get_icft_value(priv->txic); 209 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
205 cmd->maxrxpkt = get_icft_value(priv->rxic); 210 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
206 211
207 return phy_ethtool_gset(phydev, cmd); 212 return phy_ethtool_gset(phydev, cmd);
208} 213}
@@ -279,6 +284,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
279static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 284static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
280{ 285{
281 struct gfar_private *priv = netdev_priv(dev); 286 struct gfar_private *priv = netdev_priv(dev);
287 struct gfar_priv_rx_q *rx_queue = NULL;
288 struct gfar_priv_tx_q *tx_queue = NULL;
282 unsigned long rxtime; 289 unsigned long rxtime;
283 unsigned long rxcount; 290 unsigned long rxcount;
284 unsigned long txtime; 291 unsigned long txtime;
@@ -290,10 +297,13 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
290 if (NULL == priv->phydev) 297 if (NULL == priv->phydev)
291 return -ENODEV; 298 return -ENODEV;
292 299
293 rxtime = get_ictt_value(priv->rxic); 300 rx_queue = priv->rx_queue;
294 rxcount = get_icft_value(priv->rxic); 301 tx_queue = priv->tx_queue;
295 txtime = get_ictt_value(priv->txic); 302
296 txcount = get_icft_value(priv->txic); 303 rxtime = get_ictt_value(rx_queue->rxic);
304 rxcount = get_icft_value(rx_queue->rxic);
305 txtime = get_ictt_value(tx_queue->txic);
306 txcount = get_icft_value(tx_queue->txic);
297 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); 307 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
298 cvals->rx_max_coalesced_frames = rxcount; 308 cvals->rx_max_coalesced_frames = rxcount;
299 309
@@ -339,16 +349,21 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
339static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 349static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
340{ 350{
341 struct gfar_private *priv = netdev_priv(dev); 351 struct gfar_private *priv = netdev_priv(dev);
352 struct gfar_priv_tx_q *tx_queue = NULL;
353 struct gfar_priv_rx_q *rx_queue = NULL;
342 354
343 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 355 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
344 return -EOPNOTSUPP; 356 return -EOPNOTSUPP;
345 357
358 tx_queue = priv->tx_queue;
359 rx_queue = priv->rx_queue;
360
346 /* Set up rx coalescing */ 361 /* Set up rx coalescing */
347 if ((cvals->rx_coalesce_usecs == 0) || 362 if ((cvals->rx_coalesce_usecs == 0) ||
348 (cvals->rx_max_coalesced_frames == 0)) 363 (cvals->rx_max_coalesced_frames == 0))
349 priv->rxcoalescing = 0; 364 rx_queue->rxcoalescing = 0;
350 else 365 else
351 priv->rxcoalescing = 1; 366 rx_queue->rxcoalescing = 1;
352 367
353 if (NULL == priv->phydev) 368 if (NULL == priv->phydev)
354 return -ENODEV; 369 return -ENODEV;
@@ -366,15 +381,15 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
366 return -EINVAL; 381 return -EINVAL;
367 } 382 }
368 383
369 priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, 384 rx_queue->rxic = mk_ic_value(cvals->rx_max_coalesced_frames,
370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); 385 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
371 386
372 /* Set up tx coalescing */ 387 /* Set up tx coalescing */
373 if ((cvals->tx_coalesce_usecs == 0) || 388 if ((cvals->tx_coalesce_usecs == 0) ||
374 (cvals->tx_max_coalesced_frames == 0)) 389 (cvals->tx_max_coalesced_frames == 0))
375 priv->txcoalescing = 0; 390 tx_queue->txcoalescing = 0;
376 else 391 else
377 priv->txcoalescing = 1; 392 tx_queue->txcoalescing = 1;
378 393
379 /* Check the bounds of the values */ 394 /* Check the bounds of the values */
380 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 395 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
@@ -389,16 +404,16 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
389 return -EINVAL; 404 return -EINVAL;
390 } 405 }
391 406
392 priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames, 407 tx_queue->txic = mk_ic_value(cvals->tx_max_coalesced_frames,
393 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 408 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
394 409
395 gfar_write(&priv->regs->rxic, 0); 410 gfar_write(&priv->regs->rxic, 0);
396 if (priv->rxcoalescing) 411 if (rx_queue->rxcoalescing)
397 gfar_write(&priv->regs->rxic, priv->rxic); 412 gfar_write(&priv->regs->rxic, rx_queue->rxic);
398 413
399 gfar_write(&priv->regs->txic, 0); 414 gfar_write(&priv->regs->txic, 0);
400 if (priv->txcoalescing) 415 if (tx_queue->txcoalescing)
401 gfar_write(&priv->regs->txic, priv->txic); 416 gfar_write(&priv->regs->txic, tx_queue->txic);
402 417
403 return 0; 418 return 0;
404} 419}
@@ -409,6 +424,11 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
409static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 424static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
410{ 425{
411 struct gfar_private *priv = netdev_priv(dev); 426 struct gfar_private *priv = netdev_priv(dev);
427 struct gfar_priv_tx_q *tx_queue = NULL;
428 struct gfar_priv_rx_q *rx_queue = NULL;
429
430 tx_queue = priv->tx_queue;
431 rx_queue = priv->rx_queue;
412 432
413 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; 433 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
414 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; 434 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
@@ -418,10 +438,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
418 /* Values changeable by the user. The valid values are 438 /* Values changeable by the user. The valid values are
419 * in the range 1 to the "*_max_pending" counterpart above. 439 * in the range 1 to the "*_max_pending" counterpart above.
420 */ 440 */
421 rvals->rx_pending = priv->rx_ring_size; 441 rvals->rx_pending = rx_queue->rx_ring_size;
422 rvals->rx_mini_pending = priv->rx_ring_size; 442 rvals->rx_mini_pending = rx_queue->rx_ring_size;
423 rvals->rx_jumbo_pending = priv->rx_ring_size; 443 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
424 rvals->tx_pending = priv->tx_ring_size; 444 rvals->tx_pending = tx_queue->tx_ring_size;
425} 445}
426 446
427/* Change the current ring parameters, stopping the controller if 447/* Change the current ring parameters, stopping the controller if
@@ -431,6 +451,8 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
431static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 451static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
432{ 452{
433 struct gfar_private *priv = netdev_priv(dev); 453 struct gfar_private *priv = netdev_priv(dev);
454 struct gfar_priv_tx_q *tx_queue = NULL;
455 struct gfar_priv_rx_q *rx_queue = NULL;
434 int err = 0; 456 int err = 0;
435 457
436 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 458 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
@@ -451,29 +473,32 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
451 return -EINVAL; 473 return -EINVAL;
452 } 474 }
453 475
476 tx_queue = priv->tx_queue;
477 rx_queue = priv->rx_queue;
478
454 if (dev->flags & IFF_UP) { 479 if (dev->flags & IFF_UP) {
455 unsigned long flags; 480 unsigned long flags;
456 481
457 /* Halt TX and RX, and process the frames which 482 /* Halt TX and RX, and process the frames which
458 * have already been received */ 483 * have already been received */
459 spin_lock_irqsave(&priv->txlock, flags); 484 spin_lock_irqsave(&tx_queue->txlock, flags);
460 spin_lock(&priv->rxlock); 485 spin_lock(&rx_queue->rxlock);
461 486
462 gfar_halt(dev); 487 gfar_halt(dev);
463 488
464 spin_unlock(&priv->rxlock); 489 spin_unlock(&rx_queue->rxlock);
465 spin_unlock_irqrestore(&priv->txlock, flags); 490 spin_unlock_irqrestore(&tx_queue->txlock, flags);
466 491
467 gfar_clean_rx_ring(dev, priv->rx_ring_size); 492 gfar_clean_rx_ring(rx_queue, rx_queue->rx_ring_size);
468 493
469 /* Now we take down the rings to rebuild them */ 494 /* Now we take down the rings to rebuild them */
470 stop_gfar(dev); 495 stop_gfar(dev);
471 } 496 }
472 497
473 /* Change the size */ 498 /* Change the size */
474 priv->rx_ring_size = rvals->rx_pending; 499 rx_queue->rx_ring_size = rvals->rx_pending;
475 priv->tx_ring_size = rvals->tx_pending; 500 tx_queue->tx_ring_size = rvals->tx_pending;
476 priv->num_txbdfree = priv->tx_ring_size; 501 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
477 502
478 /* Rebuild the rings with the new size */ 503 /* Rebuild the rings with the new size */
479 if (dev->flags & IFF_UP) { 504 if (dev->flags & IFF_UP) {
@@ -486,24 +511,29 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
486static int gfar_set_rx_csum(struct net_device *dev, uint32_t data) 511static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
487{ 512{
488 struct gfar_private *priv = netdev_priv(dev); 513 struct gfar_private *priv = netdev_priv(dev);
514 struct gfar_priv_rx_q *rx_queue = NULL;
515 struct gfar_priv_tx_q *tx_queue = NULL;
489 unsigned long flags; 516 unsigned long flags;
490 int err = 0; 517 int err = 0;
491 518
492 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 519 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
493 return -EOPNOTSUPP; 520 return -EOPNOTSUPP;
494 521
522 tx_queue = priv->tx_queue;
523 rx_queue = priv->rx_queue;
524
495 if (dev->flags & IFF_UP) { 525 if (dev->flags & IFF_UP) {
496 /* Halt TX and RX, and process the frames which 526 /* Halt TX and RX, and process the frames which
497 * have already been received */ 527 * have already been received */
498 spin_lock_irqsave(&priv->txlock, flags); 528 spin_lock_irqsave(&tx_queue->txlock, flags);
499 spin_lock(&priv->rxlock); 529 spin_lock(&rx_queue->rxlock);
500 530
501 gfar_halt(dev); 531 gfar_halt(dev);
502 532
503 spin_unlock(&priv->rxlock); 533 spin_unlock(&rx_queue->rxlock);
504 spin_unlock_irqrestore(&priv->txlock, flags); 534 spin_unlock_irqrestore(&tx_queue->txlock, flags);
505 535
506 gfar_clean_rx_ring(dev, priv->rx_ring_size); 536 gfar_clean_rx_ring(rx_queue, rx_queue->rx_ring_size);
507 537
508 /* Now we take down the rings to rebuild them */ 538 /* Now we take down the rings to rebuild them */
509 stop_gfar(dev); 539 stop_gfar(dev);
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index dd26da74f27a..9c664f85705c 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -8,8 +8,9 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala (galak@kernel.crashing.org) 10 * Maintainer: Kumar Gala (galak@kernel.crashing.org)
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2005 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * 14 *
14 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
@@ -49,6 +50,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
49 const char *buf, size_t count) 50 const char *buf, size_t count)
50{ 51{
51 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 52 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
53 struct gfar_priv_rx_q *rx_queue = NULL;
52 int new_setting = 0; 54 int new_setting = 0;
53 u32 temp; 55 u32 temp;
54 unsigned long flags; 56 unsigned long flags;
@@ -56,6 +58,8 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
56 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING)) 58 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
57 return count; 59 return count;
58 60
61 rx_queue = priv->rx_queue;
62
59 /* Find out the new setting */ 63 /* Find out the new setting */
60 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) 64 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
61 new_setting = 1; 65 new_setting = 1;
@@ -65,7 +69,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
65 else 69 else
66 return count; 70 return count;
67 71
68 spin_lock_irqsave(&priv->rxlock, flags); 72 spin_lock_irqsave(&rx_queue->rxlock, flags);
69 73
70 /* Set the new stashing value */ 74 /* Set the new stashing value */
71 priv->bd_stash_en = new_setting; 75 priv->bd_stash_en = new_setting;
@@ -79,7 +83,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
79 83
80 gfar_write(&priv->regs->attr, temp); 84 gfar_write(&priv->regs->attr, temp);
81 85
82 spin_unlock_irqrestore(&priv->rxlock, flags); 86 spin_unlock_irqrestore(&rx_queue->rxlock, flags);
83 87
84 return count; 88 return count;
85} 89}
@@ -99,6 +103,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
99 const char *buf, size_t count) 103 const char *buf, size_t count)
100{ 104{
101 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 105 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
106 struct gfar_priv_rx_q *rx_queue = NULL;
102 unsigned int length = simple_strtoul(buf, NULL, 0); 107 unsigned int length = simple_strtoul(buf, NULL, 0);
103 u32 temp; 108 u32 temp;
104 unsigned long flags; 109 unsigned long flags;
@@ -106,7 +111,9 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
106 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) 111 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
107 return count; 112 return count;
108 113
109 spin_lock_irqsave(&priv->rxlock, flags); 114 rx_queue = priv->rx_queue;
115
116 spin_lock_irqsave(&rx_queue->rxlock, flags);
110 if (length > priv->rx_buffer_size) 117 if (length > priv->rx_buffer_size)
111 goto out; 118 goto out;
112 119
@@ -131,7 +138,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
131 gfar_write(&priv->regs->attr, temp); 138 gfar_write(&priv->regs->attr, temp);
132 139
133out: 140out:
134 spin_unlock_irqrestore(&priv->rxlock, flags); 141 spin_unlock_irqrestore(&rx_queue->rxlock, flags);
135 142
136 return count; 143 return count;
137} 144}
@@ -154,6 +161,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
154 const char *buf, size_t count) 161 const char *buf, size_t count)
155{ 162{
156 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 163 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
164 struct gfar_priv_rx_q *rx_queue = NULL;
157 unsigned short index = simple_strtoul(buf, NULL, 0); 165 unsigned short index = simple_strtoul(buf, NULL, 0);
158 u32 temp; 166 u32 temp;
159 unsigned long flags; 167 unsigned long flags;
@@ -161,7 +169,9 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
161 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) 169 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
162 return count; 170 return count;
163 171
164 spin_lock_irqsave(&priv->rxlock, flags); 172 rx_queue = priv->rx_queue;
173
174 spin_lock_irqsave(&rx_queue->rxlock, flags);
165 if (index > priv->rx_stash_size) 175 if (index > priv->rx_stash_size)
166 goto out; 176 goto out;
167 177
@@ -176,7 +186,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
176 gfar_write(&priv->regs->attreli, flags); 186 gfar_write(&priv->regs->attreli, flags);
177 187
178out: 188out:
179 spin_unlock_irqrestore(&priv->rxlock, flags); 189 spin_unlock_irqrestore(&rx_queue->rxlock, flags);
180 190
181 return count; 191 return count;
182} 192}
@@ -198,6 +208,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
198 const char *buf, size_t count) 208 const char *buf, size_t count)
199{ 209{
200 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 210 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
211 struct gfar_priv_tx_q *tx_queue = NULL;
201 unsigned int length = simple_strtoul(buf, NULL, 0); 212 unsigned int length = simple_strtoul(buf, NULL, 0);
202 u32 temp; 213 u32 temp;
203 unsigned long flags; 214 unsigned long flags;
@@ -205,7 +216,9 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
205 if (length > GFAR_MAX_FIFO_THRESHOLD) 216 if (length > GFAR_MAX_FIFO_THRESHOLD)
206 return count; 217 return count;
207 218
208 spin_lock_irqsave(&priv->txlock, flags); 219 tx_queue = priv->tx_queue;
220
221 spin_lock_irqsave(&tx_queue->txlock, flags);
209 222
210 priv->fifo_threshold = length; 223 priv->fifo_threshold = length;
211 224
@@ -214,7 +227,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
214 temp |= length; 227 temp |= length;
215 gfar_write(&priv->regs->fifo_tx_thr, temp); 228 gfar_write(&priv->regs->fifo_tx_thr, temp);
216 229
217 spin_unlock_irqrestore(&priv->txlock, flags); 230 spin_unlock_irqrestore(&tx_queue->txlock, flags);
218 231
219 return count; 232 return count;
220} 233}
@@ -235,6 +248,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
235 const char *buf, size_t count) 248 const char *buf, size_t count)
236{ 249{
237 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 250 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
251 struct gfar_priv_tx_q *tx_queue = NULL;
238 unsigned int num = simple_strtoul(buf, NULL, 0); 252 unsigned int num = simple_strtoul(buf, NULL, 0);
239 u32 temp; 253 u32 temp;
240 unsigned long flags; 254 unsigned long flags;
@@ -242,7 +256,8 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
242 if (num > GFAR_MAX_FIFO_STARVE) 256 if (num > GFAR_MAX_FIFO_STARVE)
243 return count; 257 return count;
244 258
245 spin_lock_irqsave(&priv->txlock, flags); 259 tx_queue = priv->tx_queue;
260 spin_lock_irqsave(&tx_queue->txlock, flags);
246 261
247 priv->fifo_starve = num; 262 priv->fifo_starve = num;
248 263
@@ -251,7 +266,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
251 temp |= num; 266 temp |= num;
252 gfar_write(&priv->regs->fifo_tx_starve, temp); 267 gfar_write(&priv->regs->fifo_tx_starve, temp);
253 268
254 spin_unlock_irqrestore(&priv->txlock, flags); 269 spin_unlock_irqrestore(&tx_queue->txlock, flags);
255 270
256 return count; 271 return count;
257} 272}
@@ -273,6 +288,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
273 const char *buf, size_t count) 288 const char *buf, size_t count)
274{ 289{
275 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 290 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
291 struct gfar_priv_tx_q *tx_queue = NULL;
276 unsigned int num = simple_strtoul(buf, NULL, 0); 292 unsigned int num = simple_strtoul(buf, NULL, 0);
277 u32 temp; 293 u32 temp;
278 unsigned long flags; 294 unsigned long flags;
@@ -280,7 +296,8 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
280 if (num > GFAR_MAX_FIFO_STARVE_OFF) 296 if (num > GFAR_MAX_FIFO_STARVE_OFF)
281 return count; 297 return count;
282 298
283 spin_lock_irqsave(&priv->txlock, flags); 299 tx_queue = priv->tx_queue;
300 spin_lock_irqsave(&tx_queue->txlock, flags);
284 301
285 priv->fifo_starve_off = num; 302 priv->fifo_starve_off = num;
286 303
@@ -289,7 +306,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
289 temp |= num; 306 temp |= num;
290 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp); 307 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
291 308
292 spin_unlock_irqrestore(&priv->txlock, flags); 309 spin_unlock_irqrestore(&tx_queue->txlock, flags);
293 310
294 return count; 311 return count;
295} 312}