aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKumar Gala <galak@freescale.com>2005-06-20 11:54:21 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-06-27 00:40:33 -0400
commit0bbaf069f053957e8d733784e18a2992afd1dd3c (patch)
tree5fd2250138b0486aaa5b135e70afe551b92d8374
parentbe83668a253149d99085ca4afe6cd8dc8a43fcd0 (diff)
[PATCH] gianfar: Add support enhanced TSEC features on the MPC 8548
Jeff, Just incase this got lost in the recent netdev mailing list transition here is a nicer version of Andy's patch for gianfar. - kumar * TCP/IP/UDP checksumming and verification * VLAN tag insertion/extraction * Larger multicast hash-table * Padding to align IP headers Also added: * msg lvl support * Some whitespace cleanup Signed-off-by: Andy Fleming <afleming@freescale.com> Signed-off-by: Kumar Gala <kumar.gala@freescale.com>
-rw-r--r--drivers/net/gianfar.c652
-rw-r--r--drivers/net/gianfar.h363
-rw-r--r--drivers/net/gianfar_ethtool.c277
3 files changed, 902 insertions, 390 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b43b2b11aacd..6518334b9280 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * drivers/net/gianfar.c 2 * drivers/net/gianfar.c
3 * 3 *
4 * Gianfar Ethernet Driver 4 * Gianfar Ethernet Driver
@@ -22,10 +22,9 @@
22 * B-V +1.62 22 * B-V +1.62
23 * 23 *
24 * Theory of operation 24 * Theory of operation
25 * This driver is designed for the Triple-speed Ethernet 25 * This driver is designed for the non-CPM ethernet controllers
26 * controllers on the Freescale 8540/8560 integrated processors, 26 * on the 85xx and 83xx family of integrated processors
27 * as well as the Fast Ethernet Controller on the 8540. 27 *
28 *
29 * The driver is initialized through platform_device. Structures which 28 * The driver is initialized through platform_device. Structures which
30 * define the configuration needed by the board are defined in a 29 * define the configuration needed by the board are defined in a
31 * board structure in arch/ppc/platforms (though I do not 30 * board structure in arch/ppc/platforms (though I do not
@@ -39,12 +38,12 @@
39 * 38 *
40 * The Gianfar Ethernet Controller uses a ring of buffer 39 * The Gianfar Ethernet Controller uses a ring of buffer
41 * descriptors. The beginning is indicated by a register 40 * descriptors. The beginning is indicated by a register
42 * pointing to the physical address of the start of the ring. 41 * pointing to the physical address of the start of the ring.
43 * The end is determined by a "wrap" bit being set in the 42 * The end is determined by a "wrap" bit being set in the
44 * last descriptor of the ring. 43 * last descriptor of the ring.
45 * 44 *
46 * When a packet is received, the RXF bit in the 45 * When a packet is received, the RXF bit in the
47 * IEVENT register is set, triggering an interrupt when the 46 * IEVENT register is set, triggering an interrupt when the
48 * corresponding bit in the IMASK register is also set (if 47 * corresponding bit in the IMASK register is also set (if
49 * interrupt coalescing is active, then the interrupt may not 48 * interrupt coalescing is active, then the interrupt may not
50 * happen immediately, but will wait until either a set number 49 * happen immediately, but will wait until either a set number
@@ -52,7 +51,7 @@
52 * interrupt handler will signal there is work to be done, and 51 * interrupt handler will signal there is work to be done, and
53 * exit. Without NAPI, the packet(s) will be handled 52 * exit. Without NAPI, the packet(s) will be handled
54 * immediately. Both methods will start at the last known empty 53 * immediately. Both methods will start at the last known empty
55 * descriptor, and process every subsequent descriptor until there 54 * descriptor, and process every subsequent descriptor until there
56 * are none left with data (NAPI will stop after a set number of 55 * are none left with data (NAPI will stop after a set number of
57 * packets to give time to other tasks, but will eventually 56 * packets to give time to other tasks, but will eventually
58 * process all the packets). The data arrives inside a 57 * process all the packets). The data arrives inside a
@@ -83,9 +82,13 @@
83#include <linux/netdevice.h> 82#include <linux/netdevice.h>
84#include <linux/etherdevice.h> 83#include <linux/etherdevice.h>
85#include <linux/skbuff.h> 84#include <linux/skbuff.h>
85#include <linux/if_vlan.h>
86#include <linux/spinlock.h> 86#include <linux/spinlock.h>
87#include <linux/mm.h> 87#include <linux/mm.h>
88#include <linux/device.h> 88#include <linux/device.h>
89#include <linux/ip.h>
90#include <linux/tcp.h>
91#include <linux/udp.h>
89 92
90#include <asm/io.h> 93#include <asm/io.h>
91#include <asm/irq.h> 94#include <asm/irq.h>
@@ -123,7 +126,7 @@ static int gfar_set_mac_address(struct net_device *dev);
123static int gfar_change_mtu(struct net_device *dev, int new_mtu); 126static int gfar_change_mtu(struct net_device *dev, int new_mtu);
124static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs); 127static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
125static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs); 128static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
126irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs); 129static irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
127static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs); 130static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
128static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs); 131static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
129static void gfar_phy_change(void *data); 132static void gfar_phy_change(void *data);
@@ -139,9 +142,12 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
139#ifdef CONFIG_GFAR_NAPI 142#ifdef CONFIG_GFAR_NAPI
140static int gfar_poll(struct net_device *dev, int *budget); 143static int gfar_poll(struct net_device *dev, int *budget);
141#endif 144#endif
142static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 145int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
143static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 146static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
144static void gfar_phy_startup_timer(unsigned long data); 147static void gfar_phy_startup_timer(unsigned long data);
148static void gfar_vlan_rx_register(struct net_device *netdev,
149 struct vlan_group *grp);
150static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
145 151
146extern struct ethtool_ops gfar_ethtool_ops; 152extern struct ethtool_ops gfar_ethtool_ops;
147 153
@@ -149,6 +155,13 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc");
149MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 155MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150MODULE_LICENSE("GPL"); 156MODULE_LICENSE("GPL");
151 157
158int gfar_uses_fcb(struct gfar_private *priv)
159{
160 if (priv->vlan_enable || priv->rx_csum_enable)
161 return 1;
162 else
163 return 0;
164}
152static int gfar_probe(struct device *device) 165static int gfar_probe(struct device *device)
153{ 166{
154 u32 tempval; 167 u32 tempval;
@@ -159,7 +172,6 @@ static int gfar_probe(struct device *device)
159 struct resource *r; 172 struct resource *r;
160 int idx; 173 int idx;
161 int err = 0; 174 int err = 0;
162 int dev_ethtool_ops = 0;
163 175
164 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 176 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
165 177
@@ -265,15 +277,69 @@ static int gfar_probe(struct device *device)
265 dev->mtu = 1500; 277 dev->mtu = 1500;
266 dev->set_multicast_list = gfar_set_multi; 278 dev->set_multicast_list = gfar_set_multi;
267 279
268 /* Index into the array of possible ethtool 280 dev->ethtool_ops = &gfar_ethtool_ops;
269 * ops to catch all 4 possibilities */ 281
270 if((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) == 0) 282 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
271 dev_ethtool_ops += 1; 283 priv->rx_csum_enable = 1;
284 dev->features |= NETIF_F_IP_CSUM;
285 } else
286 priv->rx_csum_enable = 0;
287
288 priv->vlgrp = NULL;
272 289
273 if((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE) == 0) 290 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
274 dev_ethtool_ops += 2; 291 dev->vlan_rx_register = gfar_vlan_rx_register;
292 dev->vlan_rx_kill_vid = gfar_vlan_rx_kill_vid;
275 293
276 dev->ethtool_ops = gfar_op_array[dev_ethtool_ops]; 294 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
295
296 priv->vlan_enable = 1;
297 }
298
299 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
300 priv->extended_hash = 1;
301 priv->hash_width = 9;
302
303 priv->hash_regs[0] = &priv->regs->igaddr0;
304 priv->hash_regs[1] = &priv->regs->igaddr1;
305 priv->hash_regs[2] = &priv->regs->igaddr2;
306 priv->hash_regs[3] = &priv->regs->igaddr3;
307 priv->hash_regs[4] = &priv->regs->igaddr4;
308 priv->hash_regs[5] = &priv->regs->igaddr5;
309 priv->hash_regs[6] = &priv->regs->igaddr6;
310 priv->hash_regs[7] = &priv->regs->igaddr7;
311 priv->hash_regs[8] = &priv->regs->gaddr0;
312 priv->hash_regs[9] = &priv->regs->gaddr1;
313 priv->hash_regs[10] = &priv->regs->gaddr2;
314 priv->hash_regs[11] = &priv->regs->gaddr3;
315 priv->hash_regs[12] = &priv->regs->gaddr4;
316 priv->hash_regs[13] = &priv->regs->gaddr5;
317 priv->hash_regs[14] = &priv->regs->gaddr6;
318 priv->hash_regs[15] = &priv->regs->gaddr7;
319
320 } else {
321 priv->extended_hash = 0;
322 priv->hash_width = 8;
323
324 priv->hash_regs[0] = &priv->regs->gaddr0;
325 priv->hash_regs[1] = &priv->regs->gaddr1;
326 priv->hash_regs[2] = &priv->regs->gaddr2;
327 priv->hash_regs[3] = &priv->regs->gaddr3;
328 priv->hash_regs[4] = &priv->regs->gaddr4;
329 priv->hash_regs[5] = &priv->regs->gaddr5;
330 priv->hash_regs[6] = &priv->regs->gaddr6;
331 priv->hash_regs[7] = &priv->regs->gaddr7;
332 }
333
334 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
335 priv->padding = DEFAULT_PADDING;
336 else
337 priv->padding = 0;
338
339 dev->hard_header_len += priv->padding;
340
341 if (dev->features & NETIF_F_IP_CSUM)
342 dev->hard_header_len += GMAC_FCB_LEN;
277 343
278 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 344 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
279#ifdef CONFIG_GFAR_BUFSTASH 345#ifdef CONFIG_GFAR_BUFSTASH
@@ -289,6 +355,9 @@ static int gfar_probe(struct device *device)
289 priv->rxcount = DEFAULT_RXCOUNT; 355 priv->rxcount = DEFAULT_RXCOUNT;
290 priv->rxtime = DEFAULT_RXTIME; 356 priv->rxtime = DEFAULT_RXTIME;
291 357
358 /* Enable most messages by default */
359 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
360
292 err = register_netdev(dev); 361 err = register_netdev(dev);
293 362
294 if (err) { 363 if (err) {
@@ -360,8 +429,9 @@ static int init_phy(struct net_device *dev)
360 GFP_KERNEL); 429 GFP_KERNEL);
361 430
362 if(NULL == mii_info) { 431 if(NULL == mii_info) {
363 printk(KERN_ERR "%s: Could not allocate mii_info\n", 432 if (netif_msg_ifup(priv))
364 dev->name); 433 printk(KERN_ERR "%s: Could not allocate mii_info\n",
434 dev->name);
365 return -ENOMEM; 435 return -ENOMEM;
366 } 436 }
367 437
@@ -410,7 +480,8 @@ static int init_phy(struct net_device *dev)
410 curphy = get_phy_info(priv->mii_info); 480 curphy = get_phy_info(priv->mii_info);
411 481
412 if (curphy == NULL) { 482 if (curphy == NULL) {
413 printk(KERN_ERR "%s: No PHY found\n", dev->name); 483 if (netif_msg_ifup(priv))
484 printk(KERN_ERR "%s: No PHY found\n", dev->name);
414 err = -1; 485 err = -1;
415 goto no_phy; 486 goto no_phy;
416 } 487 }
@@ -421,7 +492,7 @@ static int init_phy(struct net_device *dev)
421 if(curphy->init) { 492 if(curphy->init) {
422 err = curphy->init(priv->mii_info); 493 err = curphy->init(priv->mii_info);
423 494
424 if (err) 495 if (err)
425 goto phy_init_fail; 496 goto phy_init_fail;
426 } 497 }
427 498
@@ -446,14 +517,14 @@ static void init_registers(struct net_device *dev)
446 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 517 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
447 518
448 /* Init hash registers to zero */ 519 /* Init hash registers to zero */
449 gfar_write(&priv->regs->iaddr0, 0); 520 gfar_write(&priv->regs->igaddr0, 0);
450 gfar_write(&priv->regs->iaddr1, 0); 521 gfar_write(&priv->regs->igaddr1, 0);
451 gfar_write(&priv->regs->iaddr2, 0); 522 gfar_write(&priv->regs->igaddr2, 0);
452 gfar_write(&priv->regs->iaddr3, 0); 523 gfar_write(&priv->regs->igaddr3, 0);
453 gfar_write(&priv->regs->iaddr4, 0); 524 gfar_write(&priv->regs->igaddr4, 0);
454 gfar_write(&priv->regs->iaddr5, 0); 525 gfar_write(&priv->regs->igaddr5, 0);
455 gfar_write(&priv->regs->iaddr6, 0); 526 gfar_write(&priv->regs->igaddr6, 0);
456 gfar_write(&priv->regs->iaddr7, 0); 527 gfar_write(&priv->regs->igaddr7, 0);
457 528
458 gfar_write(&priv->regs->gaddr0, 0); 529 gfar_write(&priv->regs->gaddr0, 0);
459 gfar_write(&priv->regs->gaddr1, 0); 530 gfar_write(&priv->regs->gaddr1, 0);
@@ -464,9 +535,6 @@ static void init_registers(struct net_device *dev)
464 gfar_write(&priv->regs->gaddr6, 0); 535 gfar_write(&priv->regs->gaddr6, 0);
465 gfar_write(&priv->regs->gaddr7, 0); 536 gfar_write(&priv->regs->gaddr7, 0);
466 537
467 /* Zero out rctrl */
468 gfar_write(&priv->regs->rctrl, 0x00000000);
469
470 /* Zero out the rmon mib registers if it has them */ 538 /* Zero out the rmon mib registers if it has them */
471 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 539 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
472 memset((void *) &(priv->regs->rmon), 0, 540 memset((void *) &(priv->regs->rmon), 0,
@@ -497,20 +565,14 @@ static void init_registers(struct net_device *dev)
497 gfar_write(&priv->regs->tbipa, TBIPA_VALUE); 565 gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
498} 566}
499 567
500void stop_gfar(struct net_device *dev) 568
569/* Halt the receive and transmit queues */
570void gfar_halt(struct net_device *dev)
501{ 571{
502 struct gfar_private *priv = netdev_priv(dev); 572 struct gfar_private *priv = netdev_priv(dev);
503 struct gfar *regs = priv->regs; 573 struct gfar *regs = priv->regs;
504 unsigned long flags;
505 u32 tempval; 574 u32 tempval;
506 575
507 /* Lock it down */
508 spin_lock_irqsave(&priv->lock, flags);
509
510 /* Tell the kernel the link is down */
511 priv->mii_info->link = 0;
512 adjust_link(dev);
513
514 /* Mask all interrupts */ 576 /* Mask all interrupts */
515 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 577 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
516 578
@@ -533,13 +595,29 @@ void stop_gfar(struct net_device *dev)
533 tempval = gfar_read(&regs->maccfg1); 595 tempval = gfar_read(&regs->maccfg1);
534 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 596 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
535 gfar_write(&regs->maccfg1, tempval); 597 gfar_write(&regs->maccfg1, tempval);
598}
599
600void stop_gfar(struct net_device *dev)
601{
602 struct gfar_private *priv = netdev_priv(dev);
603 struct gfar *regs = priv->regs;
604 unsigned long flags;
605
606 /* Lock it down */
607 spin_lock_irqsave(&priv->lock, flags);
608
609 /* Tell the kernel the link is down */
610 priv->mii_info->link = 0;
611 adjust_link(dev);
612
613 gfar_halt(dev);
536 614
537 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { 615 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
538 /* Clear any pending interrupts */ 616 /* Clear any pending interrupts */
539 mii_clear_phy_interrupt(priv->mii_info); 617 mii_clear_phy_interrupt(priv->mii_info);
540 618
541 /* Disable PHY Interrupts */ 619 /* Disable PHY Interrupts */
542 mii_configure_phy_interrupt(priv->mii_info, 620 mii_configure_phy_interrupt(priv->mii_info,
543 MII_INTERRUPT_DISABLED); 621 MII_INTERRUPT_DISABLED);
544 } 622 }
545 623
@@ -566,7 +644,7 @@ void stop_gfar(struct net_device *dev)
566 sizeof(struct txbd8)*priv->tx_ring_size 644 sizeof(struct txbd8)*priv->tx_ring_size
567 + sizeof(struct rxbd8)*priv->rx_ring_size, 645 + sizeof(struct rxbd8)*priv->rx_ring_size,
568 priv->tx_bd_base, 646 priv->tx_bd_base,
569 gfar_read(&regs->tbase)); 647 gfar_read(&regs->tbase0));
570} 648}
571 649
572/* If there are any tx skbs or rx skbs still around, free them. 650/* If there are any tx skbs or rx skbs still around, free them.
@@ -620,6 +698,34 @@ void free_skb_resources(struct gfar_private *priv)
620 } 698 }
621} 699}
622 700
701void gfar_start(struct net_device *dev)
702{
703 struct gfar_private *priv = netdev_priv(dev);
704 struct gfar *regs = priv->regs;
705 u32 tempval;
706
707 /* Enable Rx and Tx in MACCFG1 */
708 tempval = gfar_read(&regs->maccfg1);
709 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
710 gfar_write(&regs->maccfg1, tempval);
711
712 /* Initialize DMACTRL to have WWR and WOP */
713 tempval = gfar_read(&priv->regs->dmactrl);
714 tempval |= DMACTRL_INIT_SETTINGS;
715 gfar_write(&priv->regs->dmactrl, tempval);
716
717 /* Clear THLT, so that the DMA starts polling now */
718 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
719
720 /* Make sure we aren't stopped */
721 tempval = gfar_read(&priv->regs->dmactrl);
722 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
723 gfar_write(&priv->regs->dmactrl, tempval);
724
725 /* Unmask the interrupts we look for */
726 gfar_write(&regs->imask, IMASK_DEFAULT);
727}
728
623/* Bring the controller up and running */ 729/* Bring the controller up and running */
624int startup_gfar(struct net_device *dev) 730int startup_gfar(struct net_device *dev)
625{ 731{
@@ -630,33 +736,34 @@ int startup_gfar(struct net_device *dev)
630 int i; 736 int i;
631 struct gfar_private *priv = netdev_priv(dev); 737 struct gfar_private *priv = netdev_priv(dev);
632 struct gfar *regs = priv->regs; 738 struct gfar *regs = priv->regs;
633 u32 tempval;
634 int err = 0; 739 int err = 0;
740 u32 rctrl = 0;
635 741
636 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 742 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
637 743
638 /* Allocate memory for the buffer descriptors */ 744 /* Allocate memory for the buffer descriptors */
639 vaddr = (unsigned long) dma_alloc_coherent(NULL, 745 vaddr = (unsigned long) dma_alloc_coherent(NULL,
640 sizeof (struct txbd8) * priv->tx_ring_size + 746 sizeof (struct txbd8) * priv->tx_ring_size +
641 sizeof (struct rxbd8) * priv->rx_ring_size, 747 sizeof (struct rxbd8) * priv->rx_ring_size,
642 &addr, GFP_KERNEL); 748 &addr, GFP_KERNEL);
643 749
644 if (vaddr == 0) { 750 if (vaddr == 0) {
645 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", 751 if (netif_msg_ifup(priv))
646 dev->name); 752 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
753 dev->name);
647 return -ENOMEM; 754 return -ENOMEM;
648 } 755 }
649 756
650 priv->tx_bd_base = (struct txbd8 *) vaddr; 757 priv->tx_bd_base = (struct txbd8 *) vaddr;
651 758
652 /* enet DMA only understands physical addresses */ 759 /* enet DMA only understands physical addresses */
653 gfar_write(&regs->tbase, addr); 760 gfar_write(&regs->tbase0, addr);
654 761
655 /* Start the rx descriptor ring where the tx ring leaves off */ 762 /* Start the rx descriptor ring where the tx ring leaves off */
656 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; 763 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
657 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; 764 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
658 priv->rx_bd_base = (struct rxbd8 *) vaddr; 765 priv->rx_bd_base = (struct rxbd8 *) vaddr;
659 gfar_write(&regs->rbase, addr); 766 gfar_write(&regs->rbase0, addr);
660 767
661 /* Setup the skbuff rings */ 768 /* Setup the skbuff rings */
662 priv->tx_skbuff = 769 priv->tx_skbuff =
@@ -664,8 +771,9 @@ int startup_gfar(struct net_device *dev)
664 priv->tx_ring_size, GFP_KERNEL); 771 priv->tx_ring_size, GFP_KERNEL);
665 772
666 if (priv->tx_skbuff == NULL) { 773 if (priv->tx_skbuff == NULL) {
667 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", 774 if (netif_msg_ifup(priv))
668 dev->name); 775 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
776 dev->name);
669 err = -ENOMEM; 777 err = -ENOMEM;
670 goto tx_skb_fail; 778 goto tx_skb_fail;
671 } 779 }
@@ -678,8 +786,9 @@ int startup_gfar(struct net_device *dev)
678 priv->rx_ring_size, GFP_KERNEL); 786 priv->rx_ring_size, GFP_KERNEL);
679 787
680 if (priv->rx_skbuff == NULL) { 788 if (priv->rx_skbuff == NULL) {
681 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", 789 if (netif_msg_ifup(priv))
682 dev->name); 790 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
791 dev->name);
683 err = -ENOMEM; 792 err = -ENOMEM;
684 goto rx_skb_fail; 793 goto rx_skb_fail;
685 } 794 }
@@ -726,12 +835,13 @@ int startup_gfar(struct net_device *dev)
726 /* If the device has multiple interrupts, register for 835 /* If the device has multiple interrupts, register for
727 * them. Otherwise, only register for the one */ 836 * them. Otherwise, only register for the one */
728 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 837 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
729 /* Install our interrupt handlers for Error, 838 /* Install our interrupt handlers for Error,
730 * Transmit, and Receive */ 839 * Transmit, and Receive */
731 if (request_irq(priv->interruptError, gfar_error, 840 if (request_irq(priv->interruptError, gfar_error,
732 0, "enet_error", dev) < 0) { 841 0, "enet_error", dev) < 0) {
733 printk(KERN_ERR "%s: Can't get IRQ %d\n", 842 if (netif_msg_intr(priv))
734 dev->name, priv->interruptError); 843 printk(KERN_ERR "%s: Can't get IRQ %d\n",
844 dev->name, priv->interruptError);
735 845
736 err = -1; 846 err = -1;
737 goto err_irq_fail; 847 goto err_irq_fail;
@@ -739,8 +849,9 @@ int startup_gfar(struct net_device *dev)
739 849
740 if (request_irq(priv->interruptTransmit, gfar_transmit, 850 if (request_irq(priv->interruptTransmit, gfar_transmit,
741 0, "enet_tx", dev) < 0) { 851 0, "enet_tx", dev) < 0) {
742 printk(KERN_ERR "%s: Can't get IRQ %d\n", 852 if (netif_msg_intr(priv))
743 dev->name, priv->interruptTransmit); 853 printk(KERN_ERR "%s: Can't get IRQ %d\n",
854 dev->name, priv->interruptTransmit);
744 855
745 err = -1; 856 err = -1;
746 857
@@ -749,8 +860,9 @@ int startup_gfar(struct net_device *dev)
749 860
750 if (request_irq(priv->interruptReceive, gfar_receive, 861 if (request_irq(priv->interruptReceive, gfar_receive,
751 0, "enet_rx", dev) < 0) { 862 0, "enet_rx", dev) < 0) {
752 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 863 if (netif_msg_intr(priv))
753 dev->name, priv->interruptReceive); 864 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
865 dev->name, priv->interruptReceive);
754 866
755 err = -1; 867 err = -1;
756 goto rx_irq_fail; 868 goto rx_irq_fail;
@@ -758,8 +870,9 @@ int startup_gfar(struct net_device *dev)
758 } else { 870 } else {
759 if (request_irq(priv->interruptTransmit, gfar_interrupt, 871 if (request_irq(priv->interruptTransmit, gfar_interrupt,
760 0, "gfar_interrupt", dev) < 0) { 872 0, "gfar_interrupt", dev) < 0) {
761 printk(KERN_ERR "%s: Can't get IRQ %d\n", 873 if (netif_msg_intr(priv))
762 dev->name, priv->interruptError); 874 printk(KERN_ERR "%s: Can't get IRQ %d\n",
875 dev->name, priv->interruptError);
763 876
764 err = -1; 877 err = -1;
765 goto err_irq_fail; 878 goto err_irq_fail;
@@ -787,28 +900,22 @@ int startup_gfar(struct net_device *dev)
787 else 900 else
788 gfar_write(&regs->rxic, 0); 901 gfar_write(&regs->rxic, 0);
789 902
790 init_waitqueue_head(&priv->rxcleanupq); 903 if (priv->rx_csum_enable)
904 rctrl |= RCTRL_CHECKSUMMING;
791 905
792 /* Enable Rx and Tx in MACCFG1 */ 906 if (priv->extended_hash)
793 tempval = gfar_read(&regs->maccfg1); 907 rctrl |= RCTRL_EXTHASH;
794 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
795 gfar_write(&regs->maccfg1, tempval);
796 908
797 /* Initialize DMACTRL to have WWR and WOP */ 909 if (priv->vlan_enable)
798 tempval = gfar_read(&priv->regs->dmactrl); 910 rctrl |= RCTRL_VLAN;
799 tempval |= DMACTRL_INIT_SETTINGS;
800 gfar_write(&priv->regs->dmactrl, tempval);
801 911
802 /* Clear THLT, so that the DMA starts polling now */ 912 /* Init rctrl based on our settings */
803 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 913 gfar_write(&priv->regs->rctrl, rctrl);
804 914
805 /* Make sure we aren't stopped */ 915 if (dev->features & NETIF_F_IP_CSUM)
806 tempval = gfar_read(&priv->regs->dmactrl); 916 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
807 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
808 gfar_write(&priv->regs->dmactrl, tempval);
809 917
810 /* Unmask the interrupts we look for */ 918 gfar_start(dev);
811 gfar_write(&regs->imask, IMASK_DEFAULT);
812 919
813 return 0; 920 return 0;
814 921
@@ -824,7 +931,7 @@ tx_skb_fail:
824 sizeof(struct txbd8)*priv->tx_ring_size 931 sizeof(struct txbd8)*priv->tx_ring_size
825 + sizeof(struct rxbd8)*priv->rx_ring_size, 932 + sizeof(struct rxbd8)*priv->rx_ring_size,
826 priv->tx_bd_base, 933 priv->tx_bd_base,
827 gfar_read(&regs->tbase)); 934 gfar_read(&regs->tbase0));
828 935
829 if (priv->mii_info->phyinfo->close) 936 if (priv->mii_info->phyinfo->close)
830 priv->mii_info->phyinfo->close(priv->mii_info); 937 priv->mii_info->phyinfo->close(priv->mii_info);
@@ -857,11 +964,62 @@ static int gfar_enet_open(struct net_device *dev)
857 return err; 964 return err;
858} 965}
859 966
967static struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
968{
969 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
970
971 memset(fcb, 0, GMAC_FCB_LEN);
972
973 /* Flag the bd so the controller looks for the FCB */
974 bdp->status |= TXBD_TOE;
975
976 return fcb;
977}
978
979static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
980{
981 int len;
982
983 /* If we're here, it's a IP packet with a TCP or UDP
984 * payload. We set it to checksum, using a pseudo-header
985 * we provide
986 */
987 fcb->ip = 1;
988 fcb->tup = 1;
989 fcb->ctu = 1;
990 fcb->nph = 1;
991
992 /* Notify the controller what the protocol is */
993 if (skb->nh.iph->protocol == IPPROTO_UDP)
994 fcb->udp = 1;
995
996 /* l3os is the distance between the start of the
997 * frame (skb->data) and the start of the IP hdr.
998 * l4os is the distance between the start of the
999 * l3 hdr and the l4 hdr */
1000 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN);
1001 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw);
1002
1003 len = skb->nh.iph->tot_len - fcb->l4os;
1004
1005 /* Provide the pseudoheader csum */
1006 fcb->phcs = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1007 skb->nh.iph->daddr, len,
1008 skb->nh.iph->protocol, 0);
1009}
1010
1011void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1012{
1013 fcb->vln = 1;
1014 fcb->vlctl = vlan_tx_tag_get(skb);
1015}
1016
860/* This is called by the kernel when a frame is ready for transmission. */ 1017/* This is called by the kernel when a frame is ready for transmission. */
861/* It is pointed to by the dev->hard_start_xmit function pointer */ 1018/* It is pointed to by the dev->hard_start_xmit function pointer */
862static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1019static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
863{ 1020{
864 struct gfar_private *priv = netdev_priv(dev); 1021 struct gfar_private *priv = netdev_priv(dev);
1022 struct txfcb *fcb = NULL;
865 struct txbd8 *txbdp; 1023 struct txbd8 *txbdp;
866 1024
867 /* Update transmit stats */ 1025 /* Update transmit stats */
@@ -876,9 +1034,24 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
876 /* Clear all but the WRAP status flags */ 1034 /* Clear all but the WRAP status flags */
877 txbdp->status &= TXBD_WRAP; 1035 txbdp->status &= TXBD_WRAP;
878 1036
1037 /* Set up checksumming */
1038 if ((dev->features & NETIF_F_IP_CSUM)
1039 && (CHECKSUM_HW == skb->ip_summed)) {
1040 fcb = gfar_add_fcb(skb, txbdp);
1041 gfar_tx_checksum(skb, fcb);
1042 }
1043
1044 if (priv->vlan_enable &&
1045 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
1046 if (NULL == fcb)
1047 fcb = gfar_add_fcb(skb, txbdp);
1048
1049 gfar_tx_vlan(skb, fcb);
1050 }
1051
879 /* Set buffer length and pointer */ 1052 /* Set buffer length and pointer */
880 txbdp->length = skb->len; 1053 txbdp->length = skb->len;
881 txbdp->bufPtr = dma_map_single(NULL, skb->data, 1054 txbdp->bufPtr = dma_map_single(NULL, skb->data,
882 skb->len, DMA_TO_DEVICE); 1055 skb->len, DMA_TO_DEVICE);
883 1056
884 /* Save the skb pointer so we can free it later */ 1057 /* Save the skb pointer so we can free it later */
@@ -972,15 +1145,78 @@ int gfar_set_mac_address(struct net_device *dev)
972} 1145}
973 1146
974 1147
1148/* Enables and disables VLAN insertion/extraction */
1149static void gfar_vlan_rx_register(struct net_device *dev,
1150 struct vlan_group *grp)
1151{
1152 struct gfar_private *priv = netdev_priv(dev);
1153 unsigned long flags;
1154 u32 tempval;
1155
1156 spin_lock_irqsave(&priv->lock, flags);
1157
1158 priv->vlgrp = grp;
1159
1160 if (grp) {
1161 /* Enable VLAN tag insertion */
1162 tempval = gfar_read(&priv->regs->tctrl);
1163 tempval |= TCTRL_VLINS;
1164
1165 gfar_write(&priv->regs->tctrl, tempval);
1166
1167 /* Enable VLAN tag extraction */
1168 tempval = gfar_read(&priv->regs->rctrl);
1169 tempval |= RCTRL_VLEX;
1170 gfar_write(&priv->regs->rctrl, tempval);
1171 } else {
1172 /* Disable VLAN tag insertion */
1173 tempval = gfar_read(&priv->regs->tctrl);
1174 tempval &= ~TCTRL_VLINS;
1175 gfar_write(&priv->regs->tctrl, tempval);
1176
1177 /* Disable VLAN tag extraction */
1178 tempval = gfar_read(&priv->regs->rctrl);
1179 tempval &= ~RCTRL_VLEX;
1180 gfar_write(&priv->regs->rctrl, tempval);
1181 }
1182
1183 spin_unlock_irqrestore(&priv->lock, flags);
1184}
1185
1186
1187static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1188{
1189 struct gfar_private *priv = netdev_priv(dev);
1190 unsigned long flags;
1191
1192 spin_lock_irqsave(&priv->lock, flags);
1193
1194 if (priv->vlgrp)
1195 priv->vlgrp->vlan_devices[vid] = NULL;
1196
1197 spin_unlock_irqrestore(&priv->lock, flags);
1198}
1199
1200
975static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1201static int gfar_change_mtu(struct net_device *dev, int new_mtu)
976{ 1202{
977 int tempsize, tempval; 1203 int tempsize, tempval;
978 struct gfar_private *priv = netdev_priv(dev); 1204 struct gfar_private *priv = netdev_priv(dev);
979 int oldsize = priv->rx_buffer_size; 1205 int oldsize = priv->rx_buffer_size;
980 int frame_size = new_mtu + 18; 1206 int frame_size = new_mtu + ETH_HLEN;
1207
1208 if (priv->vlan_enable)
1209 frame_size += VLAN_ETH_HLEN;
1210
1211 if (gfar_uses_fcb(priv))
1212 frame_size += GMAC_FCB_LEN;
1213
1214 frame_size += priv->padding;
981 1215
982 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 1216 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
983 printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name); 1217 if (netif_msg_drv(priv))
1218 printk(KERN_ERR "%s: Invalid MTU setting\n",
1219 dev->name);
984 return -EINVAL; 1220 return -EINVAL;
985 } 1221 }
986 1222
@@ -1120,7 +1356,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1120 skb->dev = dev; 1356 skb->dev = dev;
1121 1357
1122 bdp->bufPtr = dma_map_single(NULL, skb->data, 1358 bdp->bufPtr = dma_map_single(NULL, skb->data,
1123 priv->rx_buffer_size + RXBUF_ALIGNMENT, 1359 priv->rx_buffer_size + RXBUF_ALIGNMENT,
1124 DMA_FROM_DEVICE); 1360 DMA_FROM_DEVICE);
1125 1361
1126 bdp->length = 0; 1362 bdp->length = 0;
@@ -1190,11 +1426,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1190 1426
1191 __netif_rx_schedule(dev); 1427 __netif_rx_schedule(dev);
1192 } else { 1428 } else {
1193#ifdef VERBOSE_GFAR_ERRORS 1429 if (netif_msg_rx_err(priv))
1194 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", 1430 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1195 dev->name, gfar_read(&priv->regs->ievent), 1431 dev->name, gfar_read(&priv->regs->ievent),
1196 gfar_read(&priv->regs->imask)); 1432 gfar_read(&priv->regs->imask));
1197#endif
1198 } 1433 }
1199#else 1434#else
1200 1435
@@ -1209,15 +1444,43 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1209 else 1444 else
1210 gfar_write(&priv->regs->rxic, 0); 1445 gfar_write(&priv->regs->rxic, 0);
1211 1446
1212 /* Just in case we need to wake the ring param changer */
1213 priv->rxclean = 1;
1214
1215 spin_unlock(&priv->lock); 1447 spin_unlock(&priv->lock);
1216#endif 1448#endif
1217 1449
1218 return IRQ_HANDLED; 1450 return IRQ_HANDLED;
1219} 1451}
1220 1452
1453static inline int gfar_rx_vlan(struct sk_buff *skb,
1454 struct vlan_group *vlgrp, unsigned short vlctl)
1455{
1456#ifdef CONFIG_GFAR_NAPI
1457 return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1458#else
1459 return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1460#endif
1461}
1462
1463static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1464{
1465 /* If valid headers were found, and valid sums
1466 * were verified, then we tell the kernel that no
1467 * checksumming is necessary. Otherwise, it is */
1468 if (fcb->cip && !fcb->eip && fcb->ctu && !fcb->etu)
1469 skb->ip_summed = CHECKSUM_UNNECESSARY;
1470 else
1471 skb->ip_summed = CHECKSUM_NONE;
1472}
1473
1474
1475static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1476{
1477 struct rxfcb *fcb = (struct rxfcb *)skb->data;
1478
1479 /* Remove the FCB from the skb */
1480 skb_pull(skb, GMAC_FCB_LEN);
1481
1482 return fcb;
1483}
1221 1484
1222/* gfar_process_frame() -- handle one incoming packet if skb 1485/* gfar_process_frame() -- handle one incoming packet if skb
1223 * isn't NULL. */ 1486 * isn't NULL. */
@@ -1225,35 +1488,51 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1225 int length) 1488 int length)
1226{ 1489{
1227 struct gfar_private *priv = netdev_priv(dev); 1490 struct gfar_private *priv = netdev_priv(dev);
1491 struct rxfcb *fcb = NULL;
1228 1492
1229 if (skb == NULL) { 1493 if (skb == NULL) {
1230#ifdef BRIEF_GFAR_ERRORS 1494 if (netif_msg_rx_err(priv))
1231 printk(KERN_WARNING "%s: Missing skb!!.\n", 1495 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1232 dev->name);
1233#endif
1234 priv->stats.rx_dropped++; 1496 priv->stats.rx_dropped++;
1235 priv->extra_stats.rx_skbmissing++; 1497 priv->extra_stats.rx_skbmissing++;
1236 } else { 1498 } else {
1499 int ret;
1500
1237 /* Prep the skb for the packet */ 1501 /* Prep the skb for the packet */
1238 skb_put(skb, length); 1502 skb_put(skb, length);
1239 1503
1504 /* Grab the FCB if there is one */
1505 if (gfar_uses_fcb(priv))
1506 fcb = gfar_get_fcb(skb);
1507
1508 /* Remove the padded bytes, if there are any */
1509 if (priv->padding)
1510 skb_pull(skb, priv->padding);
1511
1512 if (priv->rx_csum_enable)
1513 gfar_rx_checksum(skb, fcb);
1514
1240 /* Tell the skb what kind of packet this is */ 1515 /* Tell the skb what kind of packet this is */
1241 skb->protocol = eth_type_trans(skb, dev); 1516 skb->protocol = eth_type_trans(skb, dev);
1242 1517
1243 /* Send the packet up the stack */ 1518 /* Send the packet up the stack */
1244 if (RECEIVE(skb) == NET_RX_DROP) { 1519 if (unlikely(priv->vlgrp && fcb->vln))
1520 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1521 else
1522 ret = RECEIVE(skb);
1523
1524 if (NET_RX_DROP == ret)
1245 priv->extra_stats.kernel_dropped++; 1525 priv->extra_stats.kernel_dropped++;
1246 }
1247 } 1526 }
1248 1527
1249 return 0; 1528 return 0;
1250} 1529}
1251 1530
1252/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 1531/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1253 * until the budget/quota has been reached. Returns the number 1532 * until the budget/quota has been reached. Returns the number
1254 * of frames handled 1533 * of frames handled
1255 */ 1534 */
1256static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1535int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1257{ 1536{
1258 struct rxbd8 *bdp; 1537 struct rxbd8 *bdp;
1259 struct sk_buff *skb; 1538 struct sk_buff *skb;
@@ -1355,9 +1634,6 @@ static int gfar_poll(struct net_device *dev, int *budget)
1355 mk_ic_value(priv->rxcount, priv->rxtime)); 1634 mk_ic_value(priv->rxcount, priv->rxtime));
1356 else 1635 else
1357 gfar_write(&priv->regs->rxic, 0); 1636 gfar_write(&priv->regs->rxic, 0);
1358
1359 /* Signal to the ring size changer that it's safe to go */
1360 priv->rxclean = 1;
1361 } 1637 }
1362 1638
1363 return (rx_work_limit < 0) ? 1 : 0; 1639 return (rx_work_limit < 0) ? 1 : 0;
@@ -1393,10 +1669,8 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1393 if (events & IEVENT_CRL) 1669 if (events & IEVENT_CRL)
1394 priv->stats.tx_aborted_errors++; 1670 priv->stats.tx_aborted_errors++;
1395 if (events & IEVENT_XFUN) { 1671 if (events & IEVENT_XFUN) {
1396#ifdef VERBOSE_GFAR_ERRORS 1672 if (netif_msg_tx_err(priv))
1397 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", 1673 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", dev->name);
1398 dev->name);
1399#endif
1400 priv->stats.tx_dropped++; 1674 priv->stats.tx_dropped++;
1401 priv->extra_stats.tx_underrun++; 1675 priv->extra_stats.tx_underrun++;
1402 1676
@@ -1415,36 +1689,30 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1415 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1689 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1416#endif 1690#endif
1417 1691
1418#ifdef VERBOSE_GFAR_ERRORS 1692 if (netif_msg_rx_err(priv))
1419 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name, 1693 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1420 gfar_read(&priv->regs->rstat)); 1694 dev->name,
1421#endif 1695 gfar_read(&priv->regs->rstat));
1422 } 1696 }
1423 if (events & IEVENT_BABR) { 1697 if (events & IEVENT_BABR) {
1424 priv->stats.rx_errors++; 1698 priv->stats.rx_errors++;
1425 priv->extra_stats.rx_babr++; 1699 priv->extra_stats.rx_babr++;
1426 1700
1427#ifdef VERBOSE_GFAR_ERRORS 1701 if (netif_msg_rx_err(priv))
1428 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 1702 printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1429#endif
1430 } 1703 }
1431 if (events & IEVENT_EBERR) { 1704 if (events & IEVENT_EBERR) {
1432 priv->extra_stats.eberr++; 1705 priv->extra_stats.eberr++;
1433#ifdef VERBOSE_GFAR_ERRORS 1706 if (netif_msg_rx_err(priv))
1434 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 1707 printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1435#endif
1436 }
1437 if (events & IEVENT_RXC) {
1438#ifdef VERBOSE_GFAR_ERRORS
1439 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1440#endif
1441 } 1708 }
1709 if ((events & IEVENT_RXC) && (netif_msg_rx_err(priv)))
1710 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1442 1711
1443 if (events & IEVENT_BABT) { 1712 if (events & IEVENT_BABT) {
1444 priv->extra_stats.tx_babt++; 1713 priv->extra_stats.tx_babt++;
1445#ifdef VERBOSE_GFAR_ERRORS 1714 if (netif_msg_rx_err(priv))
1446 printk(KERN_DEBUG "%s: babt error\n", dev->name); 1715 printk(KERN_DEBUG "%s: babt error\n", dev->name);
1447#endif
1448 } 1716 }
1449 1717
1450 return IRQ_HANDLED; 1718 return IRQ_HANDLED;
@@ -1510,7 +1778,7 @@ static void gfar_phy_timer(unsigned long data)
1510 * If, after GFAR_AN_TIMEOUT seconds, it has not 1778 * If, after GFAR_AN_TIMEOUT seconds, it has not
1511 * finished, we switch to forced. 1779 * finished, we switch to forced.
1512 * Either way, once the process has completed, we either 1780 * Either way, once the process has completed, we either
1513 * request the interrupt, or switch the timer over to 1781 * request the interrupt, or switch the timer over to
1514 * using gfar_phy_timer to check status */ 1782 * using gfar_phy_timer to check status */
1515static void gfar_phy_startup_timer(unsigned long data) 1783static void gfar_phy_startup_timer(unsigned long data)
1516{ 1784{
@@ -1535,8 +1803,9 @@ static void gfar_phy_startup_timer(unsigned long data)
1535 1803
1536 /* Forcing failed! Give up */ 1804 /* Forcing failed! Give up */
1537 if(result) { 1805 if(result) {
1538 printk(KERN_ERR "%s: Forcing failed!\n", 1806 if (netif_msg_link(priv))
1539 mii_info->dev->name); 1807 printk(KERN_ERR "%s: Forcing failed!\n",
1808 mii_info->dev->name);
1540 return; 1809 return;
1541 } 1810 }
1542 } 1811 }
@@ -1546,16 +1815,17 @@ static void gfar_phy_startup_timer(unsigned long data)
1546 1815
1547 /* Grab the PHY interrupt, if necessary/possible */ 1816 /* Grab the PHY interrupt, if necessary/possible */
1548 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { 1817 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
1549 if (request_irq(priv->einfo->interruptPHY, 1818 if (request_irq(priv->einfo->interruptPHY,
1550 phy_interrupt, 1819 phy_interrupt,
1551 SA_SHIRQ, 1820 SA_SHIRQ,
1552 "phy_interrupt", 1821 "phy_interrupt",
1553 mii_info->dev) < 0) { 1822 mii_info->dev) < 0) {
1554 printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n", 1823 if (netif_msg_intr(priv))
1555 mii_info->dev->name, 1824 printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n",
1825 mii_info->dev->name,
1556 priv->einfo->interruptPHY); 1826 priv->einfo->interruptPHY);
1557 } else { 1827 } else {
1558 mii_configure_phy_interrupt(priv->mii_info, 1828 mii_configure_phy_interrupt(priv->mii_info,
1559 MII_INTERRUPT_ENABLED); 1829 MII_INTERRUPT_ENABLED);
1560 return; 1830 return;
1561 } 1831 }
@@ -1592,15 +1862,17 @@ static void adjust_link(struct net_device *dev)
1592 tempval &= ~(MACCFG2_FULL_DUPLEX); 1862 tempval &= ~(MACCFG2_FULL_DUPLEX);
1593 gfar_write(&regs->maccfg2, tempval); 1863 gfar_write(&regs->maccfg2, tempval);
1594 1864
1595 printk(KERN_INFO "%s: Half Duplex\n", 1865 if (netif_msg_link(priv))
1596 dev->name); 1866 printk(KERN_INFO "%s: Half Duplex\n",
1867 dev->name);
1597 } else { 1868 } else {
1598 tempval = gfar_read(&regs->maccfg2); 1869 tempval = gfar_read(&regs->maccfg2);
1599 tempval |= MACCFG2_FULL_DUPLEX; 1870 tempval |= MACCFG2_FULL_DUPLEX;
1600 gfar_write(&regs->maccfg2, tempval); 1871 gfar_write(&regs->maccfg2, tempval);
1601 1872
1602 printk(KERN_INFO "%s: Full Duplex\n", 1873 if (netif_msg_link(priv))
1603 dev->name); 1874 printk(KERN_INFO "%s: Full Duplex\n",
1875 dev->name);
1604 } 1876 }
1605 1877
1606 priv->oldduplex = mii_info->duplex; 1878 priv->oldduplex = mii_info->duplex;
@@ -1622,27 +1894,32 @@ static void adjust_link(struct net_device *dev)
1622 gfar_write(&regs->maccfg2, tempval); 1894 gfar_write(&regs->maccfg2, tempval);
1623 break; 1895 break;
1624 default: 1896 default:
1625 printk(KERN_WARNING 1897 if (netif_msg_link(priv))
1626 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 1898 printk(KERN_WARNING
1627 dev->name, mii_info->speed); 1899 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1900 dev->name, mii_info->speed);
1628 break; 1901 break;
1629 } 1902 }
1630 1903
1631 printk(KERN_INFO "%s: Speed %dBT\n", dev->name, 1904 if (netif_msg_link(priv))
1632 mii_info->speed); 1905 printk(KERN_INFO "%s: Speed %dBT\n", dev->name,
1906 mii_info->speed);
1633 1907
1634 priv->oldspeed = mii_info->speed; 1908 priv->oldspeed = mii_info->speed;
1635 } 1909 }
1636 1910
1637 if (!priv->oldlink) { 1911 if (!priv->oldlink) {
1638 printk(KERN_INFO "%s: Link is up\n", dev->name); 1912 if (netif_msg_link(priv))
1913 printk(KERN_INFO "%s: Link is up\n", dev->name);
1639 priv->oldlink = 1; 1914 priv->oldlink = 1;
1640 netif_carrier_on(dev); 1915 netif_carrier_on(dev);
1641 netif_schedule(dev); 1916 netif_schedule(dev);
1642 } 1917 }
1643 } else { 1918 } else {
1644 if (priv->oldlink) { 1919 if (priv->oldlink) {
1645 printk(KERN_INFO "%s: Link is down\n", dev->name); 1920 if (netif_msg_link(priv))
1921 printk(KERN_INFO "%s: Link is down\n",
1922 dev->name);
1646 priv->oldlink = 0; 1923 priv->oldlink = 0;
1647 priv->oldspeed = 0; 1924 priv->oldspeed = 0;
1648 priv->oldduplex = -1; 1925 priv->oldduplex = -1;
@@ -1664,8 +1941,9 @@ static void gfar_set_multi(struct net_device *dev)
1664 u32 tempval; 1941 u32 tempval;
1665 1942
1666 if(dev->flags & IFF_PROMISC) { 1943 if(dev->flags & IFF_PROMISC) {
1667 printk(KERN_INFO "%s: Entering promiscuous mode.\n", 1944 if (netif_msg_drv(priv))
1668 dev->name); 1945 printk(KERN_INFO "%s: Entering promiscuous mode.\n",
1946 dev->name);
1669 /* Set RCTRL to PROM */ 1947 /* Set RCTRL to PROM */
1670 tempval = gfar_read(&regs->rctrl); 1948 tempval = gfar_read(&regs->rctrl);
1671 tempval |= RCTRL_PROM; 1949 tempval |= RCTRL_PROM;
@@ -1679,6 +1957,14 @@ static void gfar_set_multi(struct net_device *dev)
1679 1957
1680 if(dev->flags & IFF_ALLMULTI) { 1958 if(dev->flags & IFF_ALLMULTI) {
1681 /* Set the hash to rx all multicast frames */ 1959 /* Set the hash to rx all multicast frames */
1960 gfar_write(&regs->igaddr0, 0xffffffff);
1961 gfar_write(&regs->igaddr1, 0xffffffff);
1962 gfar_write(&regs->igaddr2, 0xffffffff);
1963 gfar_write(&regs->igaddr3, 0xffffffff);
1964 gfar_write(&regs->igaddr4, 0xffffffff);
1965 gfar_write(&regs->igaddr5, 0xffffffff);
1966 gfar_write(&regs->igaddr6, 0xffffffff);
1967 gfar_write(&regs->igaddr7, 0xffffffff);
1682 gfar_write(&regs->gaddr0, 0xffffffff); 1968 gfar_write(&regs->gaddr0, 0xffffffff);
1683 gfar_write(&regs->gaddr1, 0xffffffff); 1969 gfar_write(&regs->gaddr1, 0xffffffff);
1684 gfar_write(&regs->gaddr2, 0xffffffff); 1970 gfar_write(&regs->gaddr2, 0xffffffff);
@@ -1689,6 +1975,14 @@ static void gfar_set_multi(struct net_device *dev)
1689 gfar_write(&regs->gaddr7, 0xffffffff); 1975 gfar_write(&regs->gaddr7, 0xffffffff);
1690 } else { 1976 } else {
1691 /* zero out the hash */ 1977 /* zero out the hash */
1978 gfar_write(&regs->igaddr0, 0x0);
1979 gfar_write(&regs->igaddr1, 0x0);
1980 gfar_write(&regs->igaddr2, 0x0);
1981 gfar_write(&regs->igaddr3, 0x0);
1982 gfar_write(&regs->igaddr4, 0x0);
1983 gfar_write(&regs->igaddr5, 0x0);
1984 gfar_write(&regs->igaddr6, 0x0);
1985 gfar_write(&regs->igaddr7, 0x0);
1692 gfar_write(&regs->gaddr0, 0x0); 1986 gfar_write(&regs->gaddr0, 0x0);
1693 gfar_write(&regs->gaddr1, 0x0); 1987 gfar_write(&regs->gaddr1, 0x0);
1694 gfar_write(&regs->gaddr2, 0x0); 1988 gfar_write(&regs->gaddr2, 0x0);
@@ -1727,16 +2021,15 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1727{ 2021{
1728 u32 tempval; 2022 u32 tempval;
1729 struct gfar_private *priv = netdev_priv(dev); 2023 struct gfar_private *priv = netdev_priv(dev);
1730 struct gfar *regs = priv->regs;
1731 u32 *hash = &regs->gaddr0;
1732 u32 result = ether_crc(MAC_ADDR_LEN, addr); 2024 u32 result = ether_crc(MAC_ADDR_LEN, addr);
1733 u8 whichreg = ((result >> 29) & 0x7); 2025 int width = priv->hash_width;
1734 u8 whichbit = ((result >> 24) & 0x1f); 2026 u8 whichbit = (result >> (32 - width)) & 0x1f;
2027 u8 whichreg = result >> (32 - width + 5);
1735 u32 value = (1 << (31-whichbit)); 2028 u32 value = (1 << (31-whichbit));
1736 2029
1737 tempval = gfar_read(&hash[whichreg]); 2030 tempval = gfar_read(priv->hash_regs[whichreg]);
1738 tempval |= value; 2031 tempval |= value;
1739 gfar_write(&hash[whichreg], tempval); 2032 gfar_write(priv->hash_regs[whichreg], tempval);
1740 2033
1741 return; 2034 return;
1742} 2035}
@@ -1754,10 +2047,9 @@ static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1754 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK); 2047 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1755 2048
1756 /* Hmm... */ 2049 /* Hmm... */
1757#if defined (BRIEF_GFAR_ERRORS) || defined (VERBOSE_GFAR_ERRORS) 2050 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
1758 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2051 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1759 dev->name, events, gfar_read(&priv->regs->imask)); 2052 dev->name, events, gfar_read(&priv->regs->imask));
1760#endif
1761 2053
1762 /* Update the error counters */ 2054 /* Update the error counters */
1763 if (events & IEVENT_TXE) { 2055 if (events & IEVENT_TXE) {
@@ -1768,19 +2060,17 @@ static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1768 if (events & IEVENT_CRL) 2060 if (events & IEVENT_CRL)
1769 priv->stats.tx_aborted_errors++; 2061 priv->stats.tx_aborted_errors++;
1770 if (events & IEVENT_XFUN) { 2062 if (events & IEVENT_XFUN) {
1771#ifdef VERBOSE_GFAR_ERRORS 2063 if (netif_msg_tx_err(priv))
1772 printk(KERN_DEBUG "%s: underrun. packet dropped.\n", 2064 printk(KERN_DEBUG "%s: underrun. packet dropped.\n",
1773 dev->name); 2065 dev->name);
1774#endif
1775 priv->stats.tx_dropped++; 2066 priv->stats.tx_dropped++;
1776 priv->extra_stats.tx_underrun++; 2067 priv->extra_stats.tx_underrun++;
1777 2068
1778 /* Reactivate the Tx Queues */ 2069 /* Reactivate the Tx Queues */
1779 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2070 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1780 } 2071 }
1781#ifdef VERBOSE_GFAR_ERRORS 2072 if (netif_msg_tx_err(priv))
1782 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 2073 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1783#endif
1784 } 2074 }
1785 if (events & IEVENT_BSY) { 2075 if (events & IEVENT_BSY) {
1786 priv->stats.rx_errors++; 2076 priv->stats.rx_errors++;
@@ -1793,35 +2083,31 @@ static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1793 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 2083 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1794#endif 2084#endif
1795 2085
1796#ifdef VERBOSE_GFAR_ERRORS 2086 if (netif_msg_rx_err(priv))
1797 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name, 2087 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1798 gfar_read(&priv->regs->rstat)); 2088 dev->name,
1799#endif 2089 gfar_read(&priv->regs->rstat));
1800 } 2090 }
1801 if (events & IEVENT_BABR) { 2091 if (events & IEVENT_BABR) {
1802 priv->stats.rx_errors++; 2092 priv->stats.rx_errors++;
1803 priv->extra_stats.rx_babr++; 2093 priv->extra_stats.rx_babr++;
1804 2094
1805#ifdef VERBOSE_GFAR_ERRORS 2095 if (netif_msg_rx_err(priv))
1806 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 2096 printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1807#endif
1808 } 2097 }
1809 if (events & IEVENT_EBERR) { 2098 if (events & IEVENT_EBERR) {
1810 priv->extra_stats.eberr++; 2099 priv->extra_stats.eberr++;
1811#ifdef VERBOSE_GFAR_ERRORS 2100 if (netif_msg_rx_err(priv))
1812 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 2101 printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1813#endif
1814 } 2102 }
1815 if (events & IEVENT_RXC) 2103 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
1816#ifdef VERBOSE_GFAR_ERRORS 2104 if (netif_msg_rx_status(priv))
1817 printk(KERN_DEBUG "%s: control frame\n", dev->name); 2105 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1818#endif
1819 2106
1820 if (events & IEVENT_BABT) { 2107 if (events & IEVENT_BABT) {
1821 priv->extra_stats.tx_babt++; 2108 priv->extra_stats.tx_babt++;
1822#ifdef VERBOSE_GFAR_ERRORS 2109 if (netif_msg_tx_err(priv))
1823 printk(KERN_DEBUG "%s: babt error\n", dev->name); 2110 printk(KERN_DEBUG "%s: babt error\n", dev->name);
1824#endif
1825 } 2111 }
1826 return IRQ_HANDLED; 2112 return IRQ_HANDLED;
1827} 2113}
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index c2f783a6a9fa..28af087d9fbb 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1,4 +1,4 @@
1/* 1/*
2 * drivers/net/gianfar.h 2 * drivers/net/gianfar.h
3 * 3 *
4 * Gianfar Ethernet Driver 4 * Gianfar Ethernet Driver
@@ -53,6 +53,12 @@
53/* The maximum number of packets to be handled in one call of gfar_poll */ 53/* The maximum number of packets to be handled in one call of gfar_poll */
54#define GFAR_DEV_WEIGHT 64 54#define GFAR_DEV_WEIGHT 64
55 55
56/* Length for FCB */
57#define GMAC_FCB_LEN 8
58
59/* Default padding amount */
60#define DEFAULT_PADDING 2
61
56/* Number of bytes to align the rx bufs to */ 62/* Number of bytes to align the rx bufs to */
57#define RXBUF_ALIGNMENT 64 63#define RXBUF_ALIGNMENT 64
58 64
@@ -91,7 +97,7 @@ extern const char gfar_driver_version[];
91#define JUMBO_FRAME_SIZE 9600 97#define JUMBO_FRAME_SIZE 9600
92 98
93/* Latency of interface clock in nanoseconds */ 99/* Latency of interface clock in nanoseconds */
94/* Interface clock latency , in this case, means the 100/* Interface clock latency , in this case, means the
95 * time described by a value of 1 in the interrupt 101 * time described by a value of 1 in the interrupt
96 * coalescing registers' time fields. Since those fields 102 * coalescing registers' time fields. Since those fields
97 * refer to the time it takes for 64 clocks to pass, the 103 * refer to the time it takes for 64 clocks to pass, the
@@ -166,9 +172,28 @@ extern const char gfar_driver_version[];
166 mk_ic_icft(count) | \ 172 mk_ic_icft(count) | \
167 mk_ic_ictt(time)) 173 mk_ic_ictt(time))
168 174
175#define RCTRL_PAL_MASK 0x001f0000
176#define RCTRL_VLEX 0x00002000
177#define RCTRL_FILREN 0x00001000
178#define RCTRL_GHTX 0x00000400
179#define RCTRL_IPCSEN 0x00000200
180#define RCTRL_TUCSEN 0x00000100
181#define RCTRL_PRSDEP_MASK 0x000000c0
182#define RCTRL_PRSDEP_INIT 0x000000c0
169#define RCTRL_PROM 0x00000008 183#define RCTRL_PROM 0x00000008
184#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN \
185 | RCTRL_TUCSEN | RCTRL_PRSDEP_INIT)
186#define RCTRL_EXTHASH (RCTRL_GHTX)
187#define RCTRL_VLAN (RCTRL_PRSDEP_INIT)
188
189
170#define RSTAT_CLEAR_RHALT 0x00800000 190#define RSTAT_CLEAR_RHALT 0x00800000
171 191
192#define TCTRL_IPCSEN 0x00004000
193#define TCTRL_TUCSEN 0x00002000
194#define TCTRL_VLINS 0x00001000
195#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
196
172#define IEVENT_INIT_CLEAR 0xffffffff 197#define IEVENT_INIT_CLEAR 0xffffffff
173#define IEVENT_BABR 0x80000000 198#define IEVENT_BABR 0x80000000
174#define IEVENT_RXC 0x40000000 199#define IEVENT_RXC 0x40000000
@@ -187,12 +212,16 @@ extern const char gfar_driver_version[];
187#define IEVENT_RXB0 0x00008000 212#define IEVENT_RXB0 0x00008000
188#define IEVENT_GRSC 0x00000100 213#define IEVENT_GRSC 0x00000100
189#define IEVENT_RXF0 0x00000080 214#define IEVENT_RXF0 0x00000080
215#define IEVENT_FIR 0x00000008
216#define IEVENT_FIQ 0x00000004
217#define IEVENT_DPE 0x00000002
218#define IEVENT_PERR 0x00000001
190#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0) 219#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0)
191#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) 220#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
192#define IEVENT_ERR_MASK \ 221#define IEVENT_ERR_MASK \
193(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ 222(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
194 IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ 223 IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
195 | IEVENT_CRL | IEVENT_XFUN) 224 | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR)
196 225
197#define IMASK_INIT_CLEAR 0x00000000 226#define IMASK_INIT_CLEAR 0x00000000
198#define IMASK_BABR 0x80000000 227#define IMASK_BABR 0x80000000
@@ -212,10 +241,15 @@ extern const char gfar_driver_version[];
212#define IMASK_RXB0 0x00008000 241#define IMASK_RXB0 0x00008000
213#define IMASK_GTSC 0x00000100 242#define IMASK_GTSC 0x00000100
214#define IMASK_RXFEN0 0x00000080 243#define IMASK_RXFEN0 0x00000080
244#define IMASK_FIR 0x00000008
245#define IMASK_FIQ 0x00000004
246#define IMASK_DPE 0x00000002
247#define IMASK_PERR 0x00000001
215#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY) 248#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY)
216#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \ 249#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
217 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ 250 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
218 IMASK_XFUN | IMASK_RXC | IMASK_BABT) 251 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
252 | IMASK_PERR)
219 253
220 254
221/* Attribute fields */ 255/* Attribute fields */
@@ -254,6 +288,18 @@ extern const char gfar_driver_version[];
254#define TXBD_RETRYLIMIT 0x0040 288#define TXBD_RETRYLIMIT 0x0040
255#define TXBD_RETRYCOUNTMASK 0x003c 289#define TXBD_RETRYCOUNTMASK 0x003c
256#define TXBD_UNDERRUN 0x0002 290#define TXBD_UNDERRUN 0x0002
291#define TXBD_TOE 0x0002
292
293/* Tx FCB param bits */
294#define TXFCB_VLN 0x80
295#define TXFCB_IP 0x40
296#define TXFCB_IP6 0x20
297#define TXFCB_TUP 0x10
298#define TXFCB_UDP 0x08
299#define TXFCB_CIP 0x04
300#define TXFCB_CTU 0x02
301#define TXFCB_NPH 0x01
302#define TXFCB_DEFAULT (TXFCB_IP|TXFCB_TUP|TXFCB_CTU|TXFCB_NPH)
257 303
258/* RxBD status field bits */ 304/* RxBD status field bits */
259#define RXBD_EMPTY 0x8000 305#define RXBD_EMPTY 0x8000
@@ -273,6 +319,18 @@ extern const char gfar_driver_version[];
273#define RXBD_TRUNCATED 0x0001 319#define RXBD_TRUNCATED 0x0001
274#define RXBD_STATS 0x01ff 320#define RXBD_STATS 0x01ff
275 321
322/* Rx FCB status field bits */
323#define RXFCB_VLN 0x8000
324#define RXFCB_IP 0x4000
325#define RXFCB_IP6 0x2000
326#define RXFCB_TUP 0x1000
327#define RXFCB_CIP 0x0800
328#define RXFCB_CTU 0x0400
329#define RXFCB_EIP 0x0200
330#define RXFCB_ETU 0x0100
331#define RXFCB_PERR_MASK 0x000c
332#define RXFCB_PERR_BADL3 0x0008
333
276struct txbd8 334struct txbd8
277{ 335{
278 u16 status; /* Status Fields */ 336 u16 status; /* Status Fields */
@@ -280,6 +338,22 @@ struct txbd8
280 u32 bufPtr; /* Buffer Pointer */ 338 u32 bufPtr; /* Buffer Pointer */
281}; 339};
282 340
341struct txfcb {
342 u8 vln:1,
343 ip:1,
344 ip6:1,
345 tup:1,
346 udp:1,
347 cip:1,
348 ctu:1,
349 nph:1;
350 u8 reserved;
351 u8 l4os; /* Level 4 Header Offset */
352 u8 l3os; /* Level 3 Header Offset */
353 u16 phcs; /* Pseudo-header Checksum */
354 u16 vlctl; /* VLAN control word */
355};
356
283struct rxbd8 357struct rxbd8
284{ 358{
285 u16 status; /* Status Fields */ 359 u16 status; /* Status Fields */
@@ -287,6 +361,21 @@ struct rxbd8
287 u32 bufPtr; /* Buffer Pointer */ 361 u32 bufPtr; /* Buffer Pointer */
288}; 362};
289 363
364struct rxfcb {
365 u16 vln:1,
366 ip:1,
367 ip6:1,
368 tup:1,
369 cip:1,
370 ctu:1,
371 eip:1,
372 etu:1;
373 u8 rq; /* Receive Queue index */
374 u8 pro; /* Layer 4 Protocol */
375 u16 reserved;
376 u16 vlctl; /* VLAN control word */
377};
378
290struct rmon_mib 379struct rmon_mib
291{ 380{
292 u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ 381 u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
@@ -371,90 +460,191 @@ struct gfar_stats {
371 460
372 461
373struct gfar { 462struct gfar {
374 u8 res1[16]; 463 u32 tsec_id; /* 0x.000 - Controller ID register */
375 u32 ievent; /* 0x.010 - Interrupt Event Register */ 464 u8 res1[12];
376 u32 imask; /* 0x.014 - Interrupt Mask Register */ 465 u32 ievent; /* 0x.010 - Interrupt Event Register */
377 u32 edis; /* 0x.018 - Error Disabled Register */ 466 u32 imask; /* 0x.014 - Interrupt Mask Register */
467 u32 edis; /* 0x.018 - Error Disabled Register */
378 u8 res2[4]; 468 u8 res2[4];
379 u32 ecntrl; /* 0x.020 - Ethernet Control Register */ 469 u32 ecntrl; /* 0x.020 - Ethernet Control Register */
380 u32 minflr; /* 0x.024 - Minimum Frame Length Register */ 470 u32 minflr; /* 0x.024 - Minimum Frame Length Register */
381 u32 ptv; /* 0x.028 - Pause Time Value Register */ 471 u32 ptv; /* 0x.028 - Pause Time Value Register */
382 u32 dmactrl; /* 0x.02c - DMA Control Register */ 472 u32 dmactrl; /* 0x.02c - DMA Control Register */
383 u32 tbipa; /* 0x.030 - TBI PHY Address Register */ 473 u32 tbipa; /* 0x.030 - TBI PHY Address Register */
384 u8 res3[88]; 474 u8 res3[88];
385 u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */ 475 u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
386 u8 res4[8]; 476 u8 res4[8];
387 u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */ 477 u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
388 u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */ 478 u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
389 u8 res5[96]; 479 u8 res5[4];
390 u32 tctrl; /* 0x.100 - Transmit Control Register */ 480 u32 fifo_rx_pause; /* 0x.0a4 - FIFO receive pause threshold register */
391 u32 tstat; /* 0x.104 - Transmit Status Register */ 481 u32 fifo_rx_alarm; /* 0x.0a8 - FIFO receive alarm threshold register */
392 u8 res6[4]; 482 u8 res6[84];
393 u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */ 483 u32 tctrl; /* 0x.100 - Transmit Control Register */
394 u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */ 484 u32 tstat; /* 0x.104 - Transmit Status Register */
395 u8 res7[16]; 485 u32 dfvlan; /* 0x.108 - Default VLAN Control word */
396 u32 ctbptr; /* 0x.124 - Current Transmit Buffer Descriptor Pointer Register */ 486 u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */
397 u8 res8[92]; 487 u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */
398 u32 tbptr; /* 0x.184 - Transmit Buffer Descriptor Pointer Low Register */ 488 u32 tqueue; /* 0x.114 - Transmit queue control register */
399 u8 res9[124]; 489 u8 res7[40];
400 u32 tbase; /* 0x.204 - Transmit Descriptor Base Address Register */ 490 u32 tr03wt; /* 0x.140 - TxBD Rings 0-3 round-robin weightings */
401 u8 res10[168]; 491 u32 tr47wt; /* 0x.144 - TxBD Rings 4-7 round-robin weightings */
402 u32 ostbd; /* 0x.2b0 - Out-of-Sequence Transmit Buffer Descriptor Register */ 492 u8 res8[52];
403 u32 ostbdp; /* 0x.2b4 - Out-of-Sequence Transmit Data Buffer Pointer Register */ 493 u32 tbdbph; /* 0x.17c - Tx data buffer pointer high */
404 u8 res11[72]; 494 u8 res9a[4];
405 u32 rctrl; /* 0x.300 - Receive Control Register */ 495 u32 tbptr0; /* 0x.184 - TxBD Pointer for ring 0 */
406 u32 rstat; /* 0x.304 - Receive Status Register */ 496 u8 res9b[4];
407 u8 res12[4]; 497 u32 tbptr1; /* 0x.18c - TxBD Pointer for ring 1 */
408 u32 rbdlen; /* 0x.30c - RxBD Data Length Register */ 498 u8 res9c[4];
409 u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */ 499 u32 tbptr2; /* 0x.194 - TxBD Pointer for ring 2 */
410 u8 res13[16]; 500 u8 res9d[4];
411 u32 crbptr; /* 0x.324 - Current Receive Buffer Descriptor Pointer */ 501 u32 tbptr3; /* 0x.19c - TxBD Pointer for ring 3 */
412 u8 res14[24]; 502 u8 res9e[4];
413 u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */ 503 u32 tbptr4; /* 0x.1a4 - TxBD Pointer for ring 4 */
414 u8 res15[64]; 504 u8 res9f[4];
415 u32 rbptr; /* 0x.384 - Receive Buffer Descriptor Pointer */ 505 u32 tbptr5; /* 0x.1ac - TxBD Pointer for ring 5 */
416 u8 res16[124]; 506 u8 res9g[4];
417 u32 rbase; /* 0x.404 - Receive Descriptor Base Address */ 507 u32 tbptr6; /* 0x.1b4 - TxBD Pointer for ring 6 */
418 u8 res17[248]; 508 u8 res9h[4];
419 u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */ 509 u32 tbptr7; /* 0x.1bc - TxBD Pointer for ring 7 */
420 u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */ 510 u8 res9[64];
421 u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */ 511 u32 tbaseh; /* 0x.200 - TxBD base address high */
422 u32 hafdup; /* 0x.50c - Half Duplex Register */ 512 u32 tbase0; /* 0x.204 - TxBD Base Address of ring 0 */
423 u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */ 513 u8 res10a[4];
514 u32 tbase1; /* 0x.20c - TxBD Base Address of ring 1 */
515 u8 res10b[4];
516 u32 tbase2; /* 0x.214 - TxBD Base Address of ring 2 */
517 u8 res10c[4];
518 u32 tbase3; /* 0x.21c - TxBD Base Address of ring 3 */
519 u8 res10d[4];
520 u32 tbase4; /* 0x.224 - TxBD Base Address of ring 4 */
521 u8 res10e[4];
522 u32 tbase5; /* 0x.22c - TxBD Base Address of ring 5 */
523 u8 res10f[4];
524 u32 tbase6; /* 0x.234 - TxBD Base Address of ring 6 */
525 u8 res10g[4];
526 u32 tbase7; /* 0x.23c - TxBD Base Address of ring 7 */
527 u8 res10[192];
528 u32 rctrl; /* 0x.300 - Receive Control Register */
529 u32 rstat; /* 0x.304 - Receive Status Register */
530 u8 res12[8];
531 u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
532 u32 rqueue; /* 0x.314 - Receive queue control register */
533 u8 res13[24];
534 u32 rbifx; /* 0x.330 - Receive bit field extract control register */
535 u32 rqfar; /* 0x.334 - Receive queue filing table address register */
536 u32 rqfcr; /* 0x.338 - Receive queue filing table control register */
537 u32 rqfpr; /* 0x.33c - Receive queue filing table property register */
538 u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */
539 u8 res14[56];
540 u32 rbdbph; /* 0x.37c - Rx data buffer pointer high */
541 u8 res15a[4];
542 u32 rbptr0; /* 0x.384 - RxBD pointer for ring 0 */
543 u8 res15b[4];
544 u32 rbptr1; /* 0x.38c - RxBD pointer for ring 1 */
545 u8 res15c[4];
546 u32 rbptr2; /* 0x.394 - RxBD pointer for ring 2 */
547 u8 res15d[4];
548 u32 rbptr3; /* 0x.39c - RxBD pointer for ring 3 */
549 u8 res15e[4];
550 u32 rbptr4; /* 0x.3a4 - RxBD pointer for ring 4 */
551 u8 res15f[4];
552 u32 rbptr5; /* 0x.3ac - RxBD pointer for ring 5 */
553 u8 res15g[4];
554 u32 rbptr6; /* 0x.3b4 - RxBD pointer for ring 6 */
555 u8 res15h[4];
556 u32 rbptr7; /* 0x.3bc - RxBD pointer for ring 7 */
557 u8 res16[64];
558 u32 rbaseh; /* 0x.400 - RxBD base address high */
559 u32 rbase0; /* 0x.404 - RxBD base address of ring 0 */
560 u8 res17a[4];
561 u32 rbase1; /* 0x.40c - RxBD base address of ring 1 */
562 u8 res17b[4];
563 u32 rbase2; /* 0x.414 - RxBD base address of ring 2 */
564 u8 res17c[4];
565 u32 rbase3; /* 0x.41c - RxBD base address of ring 3 */
566 u8 res17d[4];
567 u32 rbase4; /* 0x.424 - RxBD base address of ring 4 */
568 u8 res17e[4];
569 u32 rbase5; /* 0x.42c - RxBD base address of ring 5 */
570 u8 res17f[4];
571 u32 rbase6; /* 0x.434 - RxBD base address of ring 6 */
572 u8 res17g[4];
573 u32 rbase7; /* 0x.43c - RxBD base address of ring 7 */
574 u8 res17[192];
575 u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */
576 u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */
577 u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */
578 u32 hafdup; /* 0x.50c - Half Duplex Register */
579 u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
424 u8 res18[12]; 580 u8 res18[12];
425 u32 miimcfg; /* 0x.520 - MII Management Configuration Register */ 581 u32 miimcfg; /* 0x.520 - MII Management Configuration Register */
426 u32 miimcom; /* 0x.524 - MII Management Command Register */ 582 u32 miimcom; /* 0x.524 - MII Management Command Register */
427 u32 miimadd; /* 0x.528 - MII Management Address Register */ 583 u32 miimadd; /* 0x.528 - MII Management Address Register */
428 u32 miimcon; /* 0x.52c - MII Management Control Register */ 584 u32 miimcon; /* 0x.52c - MII Management Control Register */
429 u32 miimstat; /* 0x.530 - MII Management Status Register */ 585 u32 miimstat; /* 0x.530 - MII Management Status Register */
430 u32 miimind; /* 0x.534 - MII Management Indicator Register */ 586 u32 miimind; /* 0x.534 - MII Management Indicator Register */
431 u8 res19[4]; 587 u8 res19[4];
432 u32 ifstat; /* 0x.53c - Interface Status Register */ 588 u32 ifstat; /* 0x.53c - Interface Status Register */
433 u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */ 589 u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
434 u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */ 590 u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
435 u8 res20[312]; 591 u32 mac01addr1; /* 0x.548 - MAC exact match address 1, part 1 */
436 struct rmon_mib rmon; 592 u32 mac01addr2; /* 0x.54c - MAC exact match address 1, part 2 */
437 u8 res21[192]; 593 u32 mac02addr1; /* 0x.550 - MAC exact match address 2, part 1 */
438 u32 iaddr0; /* 0x.800 - Indivdual address register 0 */ 594 u32 mac02addr2; /* 0x.554 - MAC exact match address 2, part 2 */
439 u32 iaddr1; /* 0x.804 - Indivdual address register 1 */ 595 u32 mac03addr1; /* 0x.558 - MAC exact match address 3, part 1 */
440 u32 iaddr2; /* 0x.808 - Indivdual address register 2 */ 596 u32 mac03addr2; /* 0x.55c - MAC exact match address 3, part 2 */
441 u32 iaddr3; /* 0x.80c - Indivdual address register 3 */ 597 u32 mac04addr1; /* 0x.560 - MAC exact match address 4, part 1 */
442 u32 iaddr4; /* 0x.810 - Indivdual address register 4 */ 598 u32 mac04addr2; /* 0x.564 - MAC exact match address 4, part 2 */
443 u32 iaddr5; /* 0x.814 - Indivdual address register 5 */ 599 u32 mac05addr1; /* 0x.568 - MAC exact match address 5, part 1 */
444 u32 iaddr6; /* 0x.818 - Indivdual address register 6 */ 600 u32 mac05addr2; /* 0x.56c - MAC exact match address 5, part 2 */
445 u32 iaddr7; /* 0x.81c - Indivdual address register 7 */ 601 u32 mac06addr1; /* 0x.570 - MAC exact match address 6, part 1 */
602 u32 mac06addr2; /* 0x.574 - MAC exact match address 6, part 2 */
603 u32 mac07addr1; /* 0x.578 - MAC exact match address 7, part 1 */
604 u32 mac07addr2; /* 0x.57c - MAC exact match address 7, part 2 */
605 u32 mac08addr1; /* 0x.580 - MAC exact match address 8, part 1 */
606 u32 mac08addr2; /* 0x.584 - MAC exact match address 8, part 2 */
607 u32 mac09addr1; /* 0x.588 - MAC exact match address 9, part 1 */
608 u32 mac09addr2; /* 0x.58c - MAC exact match address 9, part 2 */
609 u32 mac10addr1; /* 0x.590 - MAC exact match address 10, part 1*/
610 u32 mac10addr2; /* 0x.594 - MAC exact match address 10, part 2*/
611 u32 mac11addr1; /* 0x.598 - MAC exact match address 11, part 1*/
612 u32 mac11addr2; /* 0x.59c - MAC exact match address 11, part 2*/
613 u32 mac12addr1; /* 0x.5a0 - MAC exact match address 12, part 1*/
614 u32 mac12addr2; /* 0x.5a4 - MAC exact match address 12, part 2*/
615 u32 mac13addr1; /* 0x.5a8 - MAC exact match address 13, part 1*/
616 u32 mac13addr2; /* 0x.5ac - MAC exact match address 13, part 2*/
617 u32 mac14addr1; /* 0x.5b0 - MAC exact match address 14, part 1*/
618 u32 mac14addr2; /* 0x.5b4 - MAC exact match address 14, part 2*/
619 u32 mac15addr1; /* 0x.5b8 - MAC exact match address 15, part 1*/
620 u32 mac15addr2; /* 0x.5bc - MAC exact match address 15, part 2*/
621 u8 res20[192];
622 struct rmon_mib rmon; /* 0x.680-0x.73c */
623 u32 rrej; /* 0x.740 - Receive filer rejected packet counter */
624 u8 res21[188];
625 u32 igaddr0; /* 0x.800 - Indivdual/Group address register 0*/
626 u32 igaddr1; /* 0x.804 - Indivdual/Group address register 1*/
627 u32 igaddr2; /* 0x.808 - Indivdual/Group address register 2*/
628 u32 igaddr3; /* 0x.80c - Indivdual/Group address register 3*/
629 u32 igaddr4; /* 0x.810 - Indivdual/Group address register 4*/
630 u32 igaddr5; /* 0x.814 - Indivdual/Group address register 5*/
631 u32 igaddr6; /* 0x.818 - Indivdual/Group address register 6*/
632 u32 igaddr7; /* 0x.81c - Indivdual/Group address register 7*/
446 u8 res22[96]; 633 u8 res22[96];
447 u32 gaddr0; /* 0x.880 - Global address register 0 */ 634 u32 gaddr0; /* 0x.880 - Group address register 0 */
448 u32 gaddr1; /* 0x.884 - Global address register 1 */ 635 u32 gaddr1; /* 0x.884 - Group address register 1 */
449 u32 gaddr2; /* 0x.888 - Global address register 2 */ 636 u32 gaddr2; /* 0x.888 - Group address register 2 */
450 u32 gaddr3; /* 0x.88c - Global address register 3 */ 637 u32 gaddr3; /* 0x.88c - Group address register 3 */
451 u32 gaddr4; /* 0x.890 - Global address register 4 */ 638 u32 gaddr4; /* 0x.890 - Group address register 4 */
452 u32 gaddr5; /* 0x.894 - Global address register 5 */ 639 u32 gaddr5; /* 0x.894 - Group address register 5 */
453 u32 gaddr6; /* 0x.898 - Global address register 6 */ 640 u32 gaddr6; /* 0x.898 - Group address register 6 */
454 u32 gaddr7; /* 0x.89c - Global address register 7 */ 641 u32 gaddr7; /* 0x.89c - Group address register 7 */
455 u8 res23[856]; 642 u8 res23a[352];
456 u32 attr; /* 0x.bf8 - Attributes Register */ 643 u32 fifocfg; /* 0x.a00 - FIFO interface config register */
457 u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ 644 u8 res23b[252];
645 u8 res23c[248];
646 u32 attr; /* 0x.bf8 - Attributes Register */
647 u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
458 u8 res24[1024]; 648 u8 res24[1024];
459 649
460}; 650};
@@ -496,6 +686,8 @@ struct gfar_private {
496 struct txbd8 *cur_tx; /* Next free ring entry */ 686 struct txbd8 *cur_tx; /* Next free ring entry */
497 struct txbd8 *dirty_tx; /* The Ring entry to be freed. */ 687 struct txbd8 *dirty_tx; /* The Ring entry to be freed. */
498 struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */ 688 struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */
689 u32 *hash_regs[16];
690 int hash_width;
499 struct gfar *phyregs; 691 struct gfar *phyregs;
500 struct work_struct tq; 692 struct work_struct tq;
501 struct timer_list phy_info_timer; 693 struct timer_list phy_info_timer;
@@ -506,9 +698,12 @@ struct gfar_private {
506 unsigned int rx_stash_size; 698 unsigned int rx_stash_size;
507 unsigned int tx_ring_size; 699 unsigned int tx_ring_size;
508 unsigned int rx_ring_size; 700 unsigned int rx_ring_size;
509 wait_queue_head_t rxcleanupq;
510 unsigned int rxclean;
511 701
702 unsigned char vlan_enable:1,
703 rx_csum_enable:1,
704 extended_hash:1;
705 unsigned short padding;
706 struct vlan_group *vlgrp;
512 /* Info structure initialized by board setup code */ 707 /* Info structure initialized by board setup code */
513 unsigned int interruptTransmit; 708 unsigned int interruptTransmit;
514 unsigned int interruptReceive; 709 unsigned int interruptReceive;
@@ -519,6 +714,8 @@ struct gfar_private {
519 int oldspeed; 714 int oldspeed;
520 int oldduplex; 715 int oldduplex;
521 int oldlink; 716 int oldlink;
717
718 uint32_t msg_enable;
522}; 719};
523 720
524extern inline u32 gfar_read(volatile unsigned *addr) 721extern inline u32 gfar_read(volatile unsigned *addr)
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 28046e9e88ba..a451de629197 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -46,16 +46,18 @@
46 46
47extern int startup_gfar(struct net_device *dev); 47extern int startup_gfar(struct net_device *dev);
48extern void stop_gfar(struct net_device *dev); 48extern void stop_gfar(struct net_device *dev);
49extern void gfar_receive(int irq, void *dev_id, struct pt_regs *regs); 49extern void gfar_halt(struct net_device *dev);
50extern void gfar_start(struct net_device *dev);
51extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
50 52
51void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, 53static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
52 u64 * buf); 54 u64 * buf);
53void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); 55static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
54int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); 56static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
55int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); 57static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
56void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals); 58static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
57int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals); 59static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
58void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); 60static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
59 61
60static char stat_gstrings[][ETH_GSTRING_LEN] = { 62static char stat_gstrings[][ETH_GSTRING_LEN] = {
61 "rx-dropped-by-kernel", 63 "rx-dropped-by-kernel",
@@ -118,57 +120,56 @@ static char stat_gstrings[][ETH_GSTRING_LEN] = {
118 "tx-fragmented-frames", 120 "tx-fragmented-frames",
119}; 121};
120 122
123/* Fill in a buffer with the strings which correspond to the
124 * stats */
125static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
126{
127 struct gfar_private *priv = netdev_priv(dev);
128
129 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
131 else
132 memcpy(buf, stat_gstrings,
133 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
134}
135
121/* Fill in an array of 64-bit statistics from various sources. 136/* Fill in an array of 64-bit statistics from various sources.
122 * This array will be appended to the end of the ethtool_stats 137 * This array will be appended to the end of the ethtool_stats
123 * structure, and returned to user space 138 * structure, and returned to user space
124 */ 139 */
125void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf) 140static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
126{ 141{
127 int i; 142 int i;
128 struct gfar_private *priv = netdev_priv(dev); 143 struct gfar_private *priv = netdev_priv(dev);
129 u32 *rmon = (u32 *) & priv->regs->rmon;
130 u64 *extra = (u64 *) & priv->extra_stats; 144 u64 *extra = (u64 *) & priv->extra_stats;
131 struct gfar_stats *stats = (struct gfar_stats *) buf;
132 145
133 for (i = 0; i < GFAR_RMON_LEN; i++) { 146 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
134 stats->rmon[i] = (u64) (rmon[i]); 147 u32 *rmon = (u32 *) & priv->regs->rmon;
135 } 148 struct gfar_stats *stats = (struct gfar_stats *) buf;
136
137 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) {
138 stats->extra[i] = extra[i];
139 }
140}
141 149
142/* Returns the number of stats (and their corresponding strings) */ 150 for (i = 0; i < GFAR_RMON_LEN; i++)
143int gfar_stats_count(struct net_device *dev) 151 stats->rmon[i] = (u64) (rmon[i]);
144{
145 return GFAR_STATS_LEN;
146}
147 152
148void gfar_gstrings_normon(struct net_device *dev, u32 stringset, u8 * buf) 153 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
149{ 154 stats->extra[i] = extra[i];
150 memcpy(buf, stat_gstrings, GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); 155 } else
156 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
157 buf[i] = extra[i];
151} 158}
152 159
153void gfar_fill_stats_normon(struct net_device *dev, 160/* Returns the number of stats (and their corresponding strings) */
154 struct ethtool_stats *dummy, u64 * buf) 161static int gfar_stats_count(struct net_device *dev)
155{ 162{
156 int i;
157 struct gfar_private *priv = netdev_priv(dev); 163 struct gfar_private *priv = netdev_priv(dev);
158 u64 *extra = (u64 *) & priv->extra_stats;
159 164
160 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) { 165 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
161 buf[i] = extra[i]; 166 return GFAR_STATS_LEN;
162 } 167 else
168 return GFAR_EXTRA_STATS_LEN;
163} 169}
164 170
165
166int gfar_stats_count_normon(struct net_device *dev)
167{
168 return GFAR_EXTRA_STATS_LEN;
169}
170/* Fills in the drvinfo structure with some basic info */ 171/* Fills in the drvinfo structure with some basic info */
171void gfar_gdrvinfo(struct net_device *dev, struct 172static void gfar_gdrvinfo(struct net_device *dev, struct
172 ethtool_drvinfo *drvinfo) 173 ethtool_drvinfo *drvinfo)
173{ 174{
174 strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN); 175 strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
@@ -182,7 +183,7 @@ void gfar_gdrvinfo(struct net_device *dev, struct
182} 183}
183 184
184/* Return the current settings in the ethtool_cmd structure */ 185/* Return the current settings in the ethtool_cmd structure */
185int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) 186static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
186{ 187{
187 struct gfar_private *priv = netdev_priv(dev); 188 struct gfar_private *priv = netdev_priv(dev);
188 uint gigabit_support = 189 uint gigabit_support =
@@ -216,13 +217,13 @@ int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
216} 217}
217 218
218/* Return the length of the register structure */ 219/* Return the length of the register structure */
219int gfar_reglen(struct net_device *dev) 220static int gfar_reglen(struct net_device *dev)
220{ 221{
221 return sizeof (struct gfar); 222 return sizeof (struct gfar);
222} 223}
223 224
224/* Return a dump of the GFAR register space */ 225/* Return a dump of the GFAR register space */
225void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) 226static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
226{ 227{
227 int i; 228 int i;
228 struct gfar_private *priv = netdev_priv(dev); 229 struct gfar_private *priv = netdev_priv(dev);
@@ -233,13 +234,6 @@ void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regb
233 buf[i] = theregs[i]; 234 buf[i] = theregs[i];
234} 235}
235 236
236/* Fill in a buffer with the strings which correspond to the
237 * stats */
238void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
239{
240 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
241}
242
243/* Convert microseconds to ethernet clock ticks, which changes 237/* Convert microseconds to ethernet clock ticks, which changes
244 * depending on what speed the controller is running at */ 238 * depending on what speed the controller is running at */
245static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs) 239static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
@@ -291,9 +285,12 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
291 285
292/* Get the coalescing parameters, and put them in the cvals 286/* Get the coalescing parameters, and put them in the cvals
293 * structure. */ 287 * structure. */
294int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 288static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
295{ 289{
296 struct gfar_private *priv = netdev_priv(dev); 290 struct gfar_private *priv = netdev_priv(dev);
291
292 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
293 return -EOPNOTSUPP;
297 294
298 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime); 295 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime);
299 cvals->rx_max_coalesced_frames = priv->rxcount; 296 cvals->rx_max_coalesced_frames = priv->rxcount;
@@ -337,10 +334,13 @@ int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
337 * Both cvals->*_usecs and cvals->*_frames have to be > 0 334 * Both cvals->*_usecs and cvals->*_frames have to be > 0
338 * in order for coalescing to be active 335 * in order for coalescing to be active
339 */ 336 */
340int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 337static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
341{ 338{
342 struct gfar_private *priv = netdev_priv(dev); 339 struct gfar_private *priv = netdev_priv(dev);
343 340
341 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
342 return -EOPNOTSUPP;
343
344 /* Set up rx coalescing */ 344 /* Set up rx coalescing */
345 if ((cvals->rx_coalesce_usecs == 0) || 345 if ((cvals->rx_coalesce_usecs == 0) ||
346 (cvals->rx_max_coalesced_frames == 0)) 346 (cvals->rx_max_coalesced_frames == 0))
@@ -379,7 +379,7 @@ int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
379/* Fills in rvals with the current ring parameters. Currently, 379/* Fills in rvals with the current ring parameters. Currently,
380 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and 380 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
381 * jumbo are ignored by the driver */ 381 * jumbo are ignored by the driver */
382void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 382static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
383{ 383{
384 struct gfar_private *priv = netdev_priv(dev); 384 struct gfar_private *priv = netdev_priv(dev);
385 385
@@ -401,9 +401,8 @@ void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
401 * necessary so that we don't mess things up while we're in 401 * necessary so that we don't mess things up while we're in
402 * motion. We wait for the ring to be clean before reallocating 402 * motion. We wait for the ring to be clean before reallocating
403 * the rings. */ 403 * the rings. */
404int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 404static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
405{ 405{
406 u32 tempval;
407 struct gfar_private *priv = netdev_priv(dev); 406 struct gfar_private *priv = netdev_priv(dev);
408 int err = 0; 407 int err = 0;
409 408
@@ -425,37 +424,54 @@ int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
425 return -EINVAL; 424 return -EINVAL;
426 } 425 }
427 426
428 /* Stop the controller so we don't rx any more frames */ 427 if (dev->flags & IFF_UP) {
429 /* But first, make sure we clear the bits */ 428 unsigned long flags;
430 tempval = gfar_read(&priv->regs->dmactrl);
431 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
432 gfar_write(&priv->regs->dmactrl, tempval);
433 429
434 tempval = gfar_read(&priv->regs->dmactrl); 430 /* Halt TX and RX, and process the frames which
435 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 431 * have already been received */
436 gfar_write(&priv->regs->dmactrl, tempval); 432 spin_lock_irqsave(&priv->lock, flags);
433 gfar_halt(dev);
434 gfar_clean_rx_ring(dev, priv->rx_ring_size);
435 spin_unlock_irqrestore(&priv->lock, flags);
437 436
438 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) 437 /* Now we take down the rings to rebuild them */
439 cpu_relax(); 438 stop_gfar(dev);
439 }
440 440
441 /* Note that rx is not clean right now */ 441 /* Change the size */
442 priv->rxclean = 0; 442 priv->rx_ring_size = rvals->rx_pending;
443 priv->tx_ring_size = rvals->tx_pending;
443 444
444 if (dev->flags & IFF_UP) { 445 /* Rebuild the rings with the new size */
445 /* Tell the driver to process the rest of the frames */ 446 if (dev->flags & IFF_UP)
446 gfar_receive(0, (void *) dev, NULL); 447 err = startup_gfar(dev);
447 448
448 /* Now wait for it to be done */ 449 return err;
449 wait_event_interruptible(priv->rxcleanupq, priv->rxclean); 450}
450 451
451 /* Ok, all packets have been handled. Now we bring it down, 452static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
452 * change the ring size, and bring it up */ 453{
454 struct gfar_private *priv = netdev_priv(dev);
455 int err = 0;
453 456
457 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
458 return -EOPNOTSUPP;
459
460 if (dev->flags & IFF_UP) {
461 unsigned long flags;
462
463 /* Halt TX and RX, and process the frames which
464 * have already been received */
465 spin_lock_irqsave(&priv->lock, flags);
466 gfar_halt(dev);
467 gfar_clean_rx_ring(dev, priv->rx_ring_size);
468 spin_unlock_irqrestore(&priv->lock, flags);
469
470 /* Now we take down the rings to rebuild them */
454 stop_gfar(dev); 471 stop_gfar(dev);
455 } 472 }
456 473
457 priv->rx_ring_size = rvals->rx_pending; 474 priv->rx_csum_enable = data;
458 priv->tx_ring_size = rvals->tx_pending;
459 475
460 if (dev->flags & IFF_UP) 476 if (dev->flags & IFF_UP)
461 err = startup_gfar(dev); 477 err = startup_gfar(dev);
@@ -463,6 +479,61 @@ int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
463 return err; 479 return err;
464} 480}
465 481
482static uint32_t gfar_get_rx_csum(struct net_device *dev)
483{
484 struct gfar_private *priv = netdev_priv(dev);
485
486 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
487 return 0;
488
489 return priv->rx_csum_enable;
490}
491
492static int gfar_set_tx_csum(struct net_device *dev, uint32_t data)
493{
494 unsigned long flags;
495 struct gfar_private *priv = netdev_priv(dev);
496
497 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
498 return -EOPNOTSUPP;
499
500 spin_lock_irqsave(&priv->lock, flags);
501 gfar_halt(dev);
502
503 if (data)
504 dev->features |= NETIF_F_IP_CSUM;
505 else
506 dev->features &= ~NETIF_F_IP_CSUM;
507
508 gfar_start(dev);
509 spin_unlock_irqrestore(&priv->lock, flags);
510
511 return 0;
512}
513
514static uint32_t gfar_get_tx_csum(struct net_device *dev)
515{
516 struct gfar_private *priv = netdev_priv(dev);
517
518 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
519 return 0;
520
521 return (dev->features & NETIF_F_IP_CSUM) != 0;
522}
523
524static uint32_t gfar_get_msglevel(struct net_device *dev)
525{
526 struct gfar_private *priv = netdev_priv(dev);
527 return priv->msg_enable;
528}
529
530static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
531{
532 struct gfar_private *priv = netdev_priv(dev);
533 priv->msg_enable = data;
534}
535
536
466struct ethtool_ops gfar_ethtool_ops = { 537struct ethtool_ops gfar_ethtool_ops = {
467 .get_settings = gfar_gsettings, 538 .get_settings = gfar_gsettings,
468 .get_drvinfo = gfar_gdrvinfo, 539 .get_drvinfo = gfar_gdrvinfo,
@@ -476,52 +547,10 @@ struct ethtool_ops gfar_ethtool_ops = {
476 .get_strings = gfar_gstrings, 547 .get_strings = gfar_gstrings,
477 .get_stats_count = gfar_stats_count, 548 .get_stats_count = gfar_stats_count,
478 .get_ethtool_stats = gfar_fill_stats, 549 .get_ethtool_stats = gfar_fill_stats,
479}; 550 .get_rx_csum = gfar_get_rx_csum,
480 551 .get_tx_csum = gfar_get_tx_csum,
481struct ethtool_ops gfar_normon_nocoalesce_ethtool_ops = { 552 .set_rx_csum = gfar_set_rx_csum,
482 .get_settings = gfar_gsettings, 553 .set_tx_csum = gfar_set_tx_csum,
483 .get_drvinfo = gfar_gdrvinfo, 554 .get_msglevel = gfar_get_msglevel,
484 .get_regs_len = gfar_reglen, 555 .set_msglevel = gfar_set_msglevel,
485 .get_regs = gfar_get_regs,
486 .get_link = ethtool_op_get_link,
487 .get_ringparam = gfar_gringparam,
488 .set_ringparam = gfar_sringparam,
489 .get_strings = gfar_gstrings_normon,
490 .get_stats_count = gfar_stats_count_normon,
491 .get_ethtool_stats = gfar_fill_stats_normon,
492};
493
494struct ethtool_ops gfar_nocoalesce_ethtool_ops = {
495 .get_settings = gfar_gsettings,
496 .get_drvinfo = gfar_gdrvinfo,
497 .get_regs_len = gfar_reglen,
498 .get_regs = gfar_get_regs,
499 .get_link = ethtool_op_get_link,
500 .get_ringparam = gfar_gringparam,
501 .set_ringparam = gfar_sringparam,
502 .get_strings = gfar_gstrings,
503 .get_stats_count = gfar_stats_count,
504 .get_ethtool_stats = gfar_fill_stats,
505};
506
507struct ethtool_ops gfar_normon_ethtool_ops = {
508 .get_settings = gfar_gsettings,
509 .get_drvinfo = gfar_gdrvinfo,
510 .get_regs_len = gfar_reglen,
511 .get_regs = gfar_get_regs,
512 .get_link = ethtool_op_get_link,
513 .get_coalesce = gfar_gcoalesce,
514 .set_coalesce = gfar_scoalesce,
515 .get_ringparam = gfar_gringparam,
516 .set_ringparam = gfar_sringparam,
517 .get_strings = gfar_gstrings_normon,
518 .get_stats_count = gfar_stats_count_normon,
519 .get_ethtool_stats = gfar_fill_stats_normon,
520};
521
522struct ethtool_ops *gfar_op_array[] = {
523 &gfar_ethtool_ops,
524 &gfar_normon_ethtool_ops,
525 &gfar_nocoalesce_ethtool_ops,
526 &gfar_normon_nocoalesce_ethtool_ops
527}; 556};