aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/gianfar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r--drivers/net/gianfar.c652
1 files changed, 469 insertions, 183 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b43b2b11aacd..6518334b9280 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * drivers/net/gianfar.c 2 * drivers/net/gianfar.c
3 * 3 *
4 * Gianfar Ethernet Driver 4 * Gianfar Ethernet Driver
@@ -22,10 +22,9 @@
22 * B-V +1.62 22 * B-V +1.62
23 * 23 *
24 * Theory of operation 24 * Theory of operation
25 * This driver is designed for the Triple-speed Ethernet 25 * This driver is designed for the non-CPM ethernet controllers
26 * controllers on the Freescale 8540/8560 integrated processors, 26 * on the 85xx and 83xx family of integrated processors
27 * as well as the Fast Ethernet Controller on the 8540. 27 *
28 *
29 * The driver is initialized through platform_device. Structures which 28 * The driver is initialized through platform_device. Structures which
30 * define the configuration needed by the board are defined in a 29 * define the configuration needed by the board are defined in a
31 * board structure in arch/ppc/platforms (though I do not 30 * board structure in arch/ppc/platforms (though I do not
@@ -39,12 +38,12 @@
39 * 38 *
40 * The Gianfar Ethernet Controller uses a ring of buffer 39 * The Gianfar Ethernet Controller uses a ring of buffer
41 * descriptors. The beginning is indicated by a register 40 * descriptors. The beginning is indicated by a register
42 * pointing to the physical address of the start of the ring. 41 * pointing to the physical address of the start of the ring.
43 * The end is determined by a "wrap" bit being set in the 42 * The end is determined by a "wrap" bit being set in the
44 * last descriptor of the ring. 43 * last descriptor of the ring.
45 * 44 *
46 * When a packet is received, the RXF bit in the 45 * When a packet is received, the RXF bit in the
47 * IEVENT register is set, triggering an interrupt when the 46 * IEVENT register is set, triggering an interrupt when the
48 * corresponding bit in the IMASK register is also set (if 47 * corresponding bit in the IMASK register is also set (if
49 * interrupt coalescing is active, then the interrupt may not 48 * interrupt coalescing is active, then the interrupt may not
50 * happen immediately, but will wait until either a set number 49 * happen immediately, but will wait until either a set number
@@ -52,7 +51,7 @@
52 * interrupt handler will signal there is work to be done, and 51 * interrupt handler will signal there is work to be done, and
53 * exit. Without NAPI, the packet(s) will be handled 52 * exit. Without NAPI, the packet(s) will be handled
54 * immediately. Both methods will start at the last known empty 53 * immediately. Both methods will start at the last known empty
55 * descriptor, and process every subsequent descriptor until there 54 * descriptor, and process every subsequent descriptor until there
56 * are none left with data (NAPI will stop after a set number of 55 * are none left with data (NAPI will stop after a set number of
57 * packets to give time to other tasks, but will eventually 56 * packets to give time to other tasks, but will eventually
58 * process all the packets). The data arrives inside a 57 * process all the packets). The data arrives inside a
@@ -83,9 +82,13 @@
83#include <linux/netdevice.h> 82#include <linux/netdevice.h>
84#include <linux/etherdevice.h> 83#include <linux/etherdevice.h>
85#include <linux/skbuff.h> 84#include <linux/skbuff.h>
85#include <linux/if_vlan.h>
86#include <linux/spinlock.h> 86#include <linux/spinlock.h>
87#include <linux/mm.h> 87#include <linux/mm.h>
88#include <linux/device.h> 88#include <linux/device.h>
89#include <linux/ip.h>
90#include <linux/tcp.h>
91#include <linux/udp.h>
89 92
90#include <asm/io.h> 93#include <asm/io.h>
91#include <asm/irq.h> 94#include <asm/irq.h>
@@ -123,7 +126,7 @@ static int gfar_set_mac_address(struct net_device *dev);
123static int gfar_change_mtu(struct net_device *dev, int new_mtu); 126static int gfar_change_mtu(struct net_device *dev, int new_mtu);
124static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs); 127static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
125static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs); 128static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
126irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs); 129static irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
127static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs); 130static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
128static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs); 131static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
129static void gfar_phy_change(void *data); 132static void gfar_phy_change(void *data);
@@ -139,9 +142,12 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
139#ifdef CONFIG_GFAR_NAPI 142#ifdef CONFIG_GFAR_NAPI
140static int gfar_poll(struct net_device *dev, int *budget); 143static int gfar_poll(struct net_device *dev, int *budget);
141#endif 144#endif
142static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 145int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
143static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 146static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
144static void gfar_phy_startup_timer(unsigned long data); 147static void gfar_phy_startup_timer(unsigned long data);
148static void gfar_vlan_rx_register(struct net_device *netdev,
149 struct vlan_group *grp);
150static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
145 151
146extern struct ethtool_ops gfar_ethtool_ops; 152extern struct ethtool_ops gfar_ethtool_ops;
147 153
@@ -149,6 +155,13 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc");
149MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 155MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150MODULE_LICENSE("GPL"); 156MODULE_LICENSE("GPL");
151 157
158int gfar_uses_fcb(struct gfar_private *priv)
159{
160 if (priv->vlan_enable || priv->rx_csum_enable)
161 return 1;
162 else
163 return 0;
164}
152static int gfar_probe(struct device *device) 165static int gfar_probe(struct device *device)
153{ 166{
154 u32 tempval; 167 u32 tempval;
@@ -159,7 +172,6 @@ static int gfar_probe(struct device *device)
159 struct resource *r; 172 struct resource *r;
160 int idx; 173 int idx;
161 int err = 0; 174 int err = 0;
162 int dev_ethtool_ops = 0;
163 175
164 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 176 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
165 177
@@ -265,15 +277,69 @@ static int gfar_probe(struct device *device)
265 dev->mtu = 1500; 277 dev->mtu = 1500;
266 dev->set_multicast_list = gfar_set_multi; 278 dev->set_multicast_list = gfar_set_multi;
267 279
268 /* Index into the array of possible ethtool 280 dev->ethtool_ops = &gfar_ethtool_ops;
269 * ops to catch all 4 possibilities */ 281
270 if((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) == 0) 282 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
271 dev_ethtool_ops += 1; 283 priv->rx_csum_enable = 1;
284 dev->features |= NETIF_F_IP_CSUM;
285 } else
286 priv->rx_csum_enable = 0;
287
288 priv->vlgrp = NULL;
272 289
273 if((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE) == 0) 290 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
274 dev_ethtool_ops += 2; 291 dev->vlan_rx_register = gfar_vlan_rx_register;
292 dev->vlan_rx_kill_vid = gfar_vlan_rx_kill_vid;
275 293
276 dev->ethtool_ops = gfar_op_array[dev_ethtool_ops]; 294 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
295
296 priv->vlan_enable = 1;
297 }
298
299 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
300 priv->extended_hash = 1;
301 priv->hash_width = 9;
302
303 priv->hash_regs[0] = &priv->regs->igaddr0;
304 priv->hash_regs[1] = &priv->regs->igaddr1;
305 priv->hash_regs[2] = &priv->regs->igaddr2;
306 priv->hash_regs[3] = &priv->regs->igaddr3;
307 priv->hash_regs[4] = &priv->regs->igaddr4;
308 priv->hash_regs[5] = &priv->regs->igaddr5;
309 priv->hash_regs[6] = &priv->regs->igaddr6;
310 priv->hash_regs[7] = &priv->regs->igaddr7;
311 priv->hash_regs[8] = &priv->regs->gaddr0;
312 priv->hash_regs[9] = &priv->regs->gaddr1;
313 priv->hash_regs[10] = &priv->regs->gaddr2;
314 priv->hash_regs[11] = &priv->regs->gaddr3;
315 priv->hash_regs[12] = &priv->regs->gaddr4;
316 priv->hash_regs[13] = &priv->regs->gaddr5;
317 priv->hash_regs[14] = &priv->regs->gaddr6;
318 priv->hash_regs[15] = &priv->regs->gaddr7;
319
320 } else {
321 priv->extended_hash = 0;
322 priv->hash_width = 8;
323
324 priv->hash_regs[0] = &priv->regs->gaddr0;
325 priv->hash_regs[1] = &priv->regs->gaddr1;
326 priv->hash_regs[2] = &priv->regs->gaddr2;
327 priv->hash_regs[3] = &priv->regs->gaddr3;
328 priv->hash_regs[4] = &priv->regs->gaddr4;
329 priv->hash_regs[5] = &priv->regs->gaddr5;
330 priv->hash_regs[6] = &priv->regs->gaddr6;
331 priv->hash_regs[7] = &priv->regs->gaddr7;
332 }
333
334 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
335 priv->padding = DEFAULT_PADDING;
336 else
337 priv->padding = 0;
338
339 dev->hard_header_len += priv->padding;
340
341 if (dev->features & NETIF_F_IP_CSUM)
342 dev->hard_header_len += GMAC_FCB_LEN;
277 343
278 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 344 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
279#ifdef CONFIG_GFAR_BUFSTASH 345#ifdef CONFIG_GFAR_BUFSTASH
@@ -289,6 +355,9 @@ static int gfar_probe(struct device *device)
289 priv->rxcount = DEFAULT_RXCOUNT; 355 priv->rxcount = DEFAULT_RXCOUNT;
290 priv->rxtime = DEFAULT_RXTIME; 356 priv->rxtime = DEFAULT_RXTIME;
291 357
358 /* Enable most messages by default */
359 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
360
292 err = register_netdev(dev); 361 err = register_netdev(dev);
293 362
294 if (err) { 363 if (err) {
@@ -360,8 +429,9 @@ static int init_phy(struct net_device *dev)
360 GFP_KERNEL); 429 GFP_KERNEL);
361 430
362 if(NULL == mii_info) { 431 if(NULL == mii_info) {
363 printk(KERN_ERR "%s: Could not allocate mii_info\n", 432 if (netif_msg_ifup(priv))
364 dev->name); 433 printk(KERN_ERR "%s: Could not allocate mii_info\n",
434 dev->name);
365 return -ENOMEM; 435 return -ENOMEM;
366 } 436 }
367 437
@@ -410,7 +480,8 @@ static int init_phy(struct net_device *dev)
410 curphy = get_phy_info(priv->mii_info); 480 curphy = get_phy_info(priv->mii_info);
411 481
412 if (curphy == NULL) { 482 if (curphy == NULL) {
413 printk(KERN_ERR "%s: No PHY found\n", dev->name); 483 if (netif_msg_ifup(priv))
484 printk(KERN_ERR "%s: No PHY found\n", dev->name);
414 err = -1; 485 err = -1;
415 goto no_phy; 486 goto no_phy;
416 } 487 }
@@ -421,7 +492,7 @@ static int init_phy(struct net_device *dev)
421 if(curphy->init) { 492 if(curphy->init) {
422 err = curphy->init(priv->mii_info); 493 err = curphy->init(priv->mii_info);
423 494
424 if (err) 495 if (err)
425 goto phy_init_fail; 496 goto phy_init_fail;
426 } 497 }
427 498
@@ -446,14 +517,14 @@ static void init_registers(struct net_device *dev)
446 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 517 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
447 518
448 /* Init hash registers to zero */ 519 /* Init hash registers to zero */
449 gfar_write(&priv->regs->iaddr0, 0); 520 gfar_write(&priv->regs->igaddr0, 0);
450 gfar_write(&priv->regs->iaddr1, 0); 521 gfar_write(&priv->regs->igaddr1, 0);
451 gfar_write(&priv->regs->iaddr2, 0); 522 gfar_write(&priv->regs->igaddr2, 0);
452 gfar_write(&priv->regs->iaddr3, 0); 523 gfar_write(&priv->regs->igaddr3, 0);
453 gfar_write(&priv->regs->iaddr4, 0); 524 gfar_write(&priv->regs->igaddr4, 0);
454 gfar_write(&priv->regs->iaddr5, 0); 525 gfar_write(&priv->regs->igaddr5, 0);
455 gfar_write(&priv->regs->iaddr6, 0); 526 gfar_write(&priv->regs->igaddr6, 0);
456 gfar_write(&priv->regs->iaddr7, 0); 527 gfar_write(&priv->regs->igaddr7, 0);
457 528
458 gfar_write(&priv->regs->gaddr0, 0); 529 gfar_write(&priv->regs->gaddr0, 0);
459 gfar_write(&priv->regs->gaddr1, 0); 530 gfar_write(&priv->regs->gaddr1, 0);
@@ -464,9 +535,6 @@ static void init_registers(struct net_device *dev)
464 gfar_write(&priv->regs->gaddr6, 0); 535 gfar_write(&priv->regs->gaddr6, 0);
465 gfar_write(&priv->regs->gaddr7, 0); 536 gfar_write(&priv->regs->gaddr7, 0);
466 537
467 /* Zero out rctrl */
468 gfar_write(&priv->regs->rctrl, 0x00000000);
469
470 /* Zero out the rmon mib registers if it has them */ 538 /* Zero out the rmon mib registers if it has them */
471 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 539 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
472 memset((void *) &(priv->regs->rmon), 0, 540 memset((void *) &(priv->regs->rmon), 0,
@@ -497,20 +565,14 @@ static void init_registers(struct net_device *dev)
497 gfar_write(&priv->regs->tbipa, TBIPA_VALUE); 565 gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
498} 566}
499 567
500void stop_gfar(struct net_device *dev) 568
569/* Halt the receive and transmit queues */
570void gfar_halt(struct net_device *dev)
501{ 571{
502 struct gfar_private *priv = netdev_priv(dev); 572 struct gfar_private *priv = netdev_priv(dev);
503 struct gfar *regs = priv->regs; 573 struct gfar *regs = priv->regs;
504 unsigned long flags;
505 u32 tempval; 574 u32 tempval;
506 575
507 /* Lock it down */
508 spin_lock_irqsave(&priv->lock, flags);
509
510 /* Tell the kernel the link is down */
511 priv->mii_info->link = 0;
512 adjust_link(dev);
513
514 /* Mask all interrupts */ 576 /* Mask all interrupts */
515 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 577 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
516 578
@@ -533,13 +595,29 @@ void stop_gfar(struct net_device *dev)
533 tempval = gfar_read(&regs->maccfg1); 595 tempval = gfar_read(&regs->maccfg1);
534 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 596 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
535 gfar_write(&regs->maccfg1, tempval); 597 gfar_write(&regs->maccfg1, tempval);
598}
599
600void stop_gfar(struct net_device *dev)
601{
602 struct gfar_private *priv = netdev_priv(dev);
603 struct gfar *regs = priv->regs;
604 unsigned long flags;
605
606 /* Lock it down */
607 spin_lock_irqsave(&priv->lock, flags);
608
609 /* Tell the kernel the link is down */
610 priv->mii_info->link = 0;
611 adjust_link(dev);
612
613 gfar_halt(dev);
536 614
537 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { 615 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
538 /* Clear any pending interrupts */ 616 /* Clear any pending interrupts */
539 mii_clear_phy_interrupt(priv->mii_info); 617 mii_clear_phy_interrupt(priv->mii_info);
540 618
541 /* Disable PHY Interrupts */ 619 /* Disable PHY Interrupts */
542 mii_configure_phy_interrupt(priv->mii_info, 620 mii_configure_phy_interrupt(priv->mii_info,
543 MII_INTERRUPT_DISABLED); 621 MII_INTERRUPT_DISABLED);
544 } 622 }
545 623
@@ -566,7 +644,7 @@ void stop_gfar(struct net_device *dev)
566 sizeof(struct txbd8)*priv->tx_ring_size 644 sizeof(struct txbd8)*priv->tx_ring_size
567 + sizeof(struct rxbd8)*priv->rx_ring_size, 645 + sizeof(struct rxbd8)*priv->rx_ring_size,
568 priv->tx_bd_base, 646 priv->tx_bd_base,
569 gfar_read(&regs->tbase)); 647 gfar_read(&regs->tbase0));
570} 648}
571 649
572/* If there are any tx skbs or rx skbs still around, free them. 650/* If there are any tx skbs or rx skbs still around, free them.
@@ -620,6 +698,34 @@ void free_skb_resources(struct gfar_private *priv)
620 } 698 }
621} 699}
622 700
701void gfar_start(struct net_device *dev)
702{
703 struct gfar_private *priv = netdev_priv(dev);
704 struct gfar *regs = priv->regs;
705 u32 tempval;
706
707 /* Enable Rx and Tx in MACCFG1 */
708 tempval = gfar_read(&regs->maccfg1);
709 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
710 gfar_write(&regs->maccfg1, tempval);
711
712 /* Initialize DMACTRL to have WWR and WOP */
713 tempval = gfar_read(&priv->regs->dmactrl);
714 tempval |= DMACTRL_INIT_SETTINGS;
715 gfar_write(&priv->regs->dmactrl, tempval);
716
717 /* Clear THLT, so that the DMA starts polling now */
718 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
719
720 /* Make sure we aren't stopped */
721 tempval = gfar_read(&priv->regs->dmactrl);
722 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
723 gfar_write(&priv->regs->dmactrl, tempval);
724
725 /* Unmask the interrupts we look for */
726 gfar_write(&regs->imask, IMASK_DEFAULT);
727}
728
623/* Bring the controller up and running */ 729/* Bring the controller up and running */
624int startup_gfar(struct net_device *dev) 730int startup_gfar(struct net_device *dev)
625{ 731{
@@ -630,33 +736,34 @@ int startup_gfar(struct net_device *dev)
630 int i; 736 int i;
631 struct gfar_private *priv = netdev_priv(dev); 737 struct gfar_private *priv = netdev_priv(dev);
632 struct gfar *regs = priv->regs; 738 struct gfar *regs = priv->regs;
633 u32 tempval;
634 int err = 0; 739 int err = 0;
740 u32 rctrl = 0;
635 741
636 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 742 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
637 743
638 /* Allocate memory for the buffer descriptors */ 744 /* Allocate memory for the buffer descriptors */
639 vaddr = (unsigned long) dma_alloc_coherent(NULL, 745 vaddr = (unsigned long) dma_alloc_coherent(NULL,
640 sizeof (struct txbd8) * priv->tx_ring_size + 746 sizeof (struct txbd8) * priv->tx_ring_size +
641 sizeof (struct rxbd8) * priv->rx_ring_size, 747 sizeof (struct rxbd8) * priv->rx_ring_size,
642 &addr, GFP_KERNEL); 748 &addr, GFP_KERNEL);
643 749
644 if (vaddr == 0) { 750 if (vaddr == 0) {
645 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", 751 if (netif_msg_ifup(priv))
646 dev->name); 752 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
753 dev->name);
647 return -ENOMEM; 754 return -ENOMEM;
648 } 755 }
649 756
650 priv->tx_bd_base = (struct txbd8 *) vaddr; 757 priv->tx_bd_base = (struct txbd8 *) vaddr;
651 758
652 /* enet DMA only understands physical addresses */ 759 /* enet DMA only understands physical addresses */
653 gfar_write(&regs->tbase, addr); 760 gfar_write(&regs->tbase0, addr);
654 761
655 /* Start the rx descriptor ring where the tx ring leaves off */ 762 /* Start the rx descriptor ring where the tx ring leaves off */
656 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; 763 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
657 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; 764 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
658 priv->rx_bd_base = (struct rxbd8 *) vaddr; 765 priv->rx_bd_base = (struct rxbd8 *) vaddr;
659 gfar_write(&regs->rbase, addr); 766 gfar_write(&regs->rbase0, addr);
660 767
661 /* Setup the skbuff rings */ 768 /* Setup the skbuff rings */
662 priv->tx_skbuff = 769 priv->tx_skbuff =
@@ -664,8 +771,9 @@ int startup_gfar(struct net_device *dev)
664 priv->tx_ring_size, GFP_KERNEL); 771 priv->tx_ring_size, GFP_KERNEL);
665 772
666 if (priv->tx_skbuff == NULL) { 773 if (priv->tx_skbuff == NULL) {
667 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", 774 if (netif_msg_ifup(priv))
668 dev->name); 775 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
776 dev->name);
669 err = -ENOMEM; 777 err = -ENOMEM;
670 goto tx_skb_fail; 778 goto tx_skb_fail;
671 } 779 }
@@ -678,8 +786,9 @@ int startup_gfar(struct net_device *dev)
678 priv->rx_ring_size, GFP_KERNEL); 786 priv->rx_ring_size, GFP_KERNEL);
679 787
680 if (priv->rx_skbuff == NULL) { 788 if (priv->rx_skbuff == NULL) {
681 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", 789 if (netif_msg_ifup(priv))
682 dev->name); 790 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
791 dev->name);
683 err = -ENOMEM; 792 err = -ENOMEM;
684 goto rx_skb_fail; 793 goto rx_skb_fail;
685 } 794 }
@@ -726,12 +835,13 @@ int startup_gfar(struct net_device *dev)
726 /* If the device has multiple interrupts, register for 835 /* If the device has multiple interrupts, register for
727 * them. Otherwise, only register for the one */ 836 * them. Otherwise, only register for the one */
728 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 837 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
729 /* Install our interrupt handlers for Error, 838 /* Install our interrupt handlers for Error,
730 * Transmit, and Receive */ 839 * Transmit, and Receive */
731 if (request_irq(priv->interruptError, gfar_error, 840 if (request_irq(priv->interruptError, gfar_error,
732 0, "enet_error", dev) < 0) { 841 0, "enet_error", dev) < 0) {
733 printk(KERN_ERR "%s: Can't get IRQ %d\n", 842 if (netif_msg_intr(priv))
734 dev->name, priv->interruptError); 843 printk(KERN_ERR "%s: Can't get IRQ %d\n",
844 dev->name, priv->interruptError);
735 845
736 err = -1; 846 err = -1;
737 goto err_irq_fail; 847 goto err_irq_fail;
@@ -739,8 +849,9 @@ int startup_gfar(struct net_device *dev)
739 849
740 if (request_irq(priv->interruptTransmit, gfar_transmit, 850 if (request_irq(priv->interruptTransmit, gfar_transmit,
741 0, "enet_tx", dev) < 0) { 851 0, "enet_tx", dev) < 0) {
742 printk(KERN_ERR "%s: Can't get IRQ %d\n", 852 if (netif_msg_intr(priv))
743 dev->name, priv->interruptTransmit); 853 printk(KERN_ERR "%s: Can't get IRQ %d\n",
854 dev->name, priv->interruptTransmit);
744 855
745 err = -1; 856 err = -1;
746 857
@@ -749,8 +860,9 @@ int startup_gfar(struct net_device *dev)
749 860
750 if (request_irq(priv->interruptReceive, gfar_receive, 861 if (request_irq(priv->interruptReceive, gfar_receive,
751 0, "enet_rx", dev) < 0) { 862 0, "enet_rx", dev) < 0) {
752 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 863 if (netif_msg_intr(priv))
753 dev->name, priv->interruptReceive); 864 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
865 dev->name, priv->interruptReceive);
754 866
755 err = -1; 867 err = -1;
756 goto rx_irq_fail; 868 goto rx_irq_fail;
@@ -758,8 +870,9 @@ int startup_gfar(struct net_device *dev)
758 } else { 870 } else {
759 if (request_irq(priv->interruptTransmit, gfar_interrupt, 871 if (request_irq(priv->interruptTransmit, gfar_interrupt,
760 0, "gfar_interrupt", dev) < 0) { 872 0, "gfar_interrupt", dev) < 0) {
761 printk(KERN_ERR "%s: Can't get IRQ %d\n", 873 if (netif_msg_intr(priv))
762 dev->name, priv->interruptError); 874 printk(KERN_ERR "%s: Can't get IRQ %d\n",
875 dev->name, priv->interruptError);
763 876
764 err = -1; 877 err = -1;
765 goto err_irq_fail; 878 goto err_irq_fail;
@@ -787,28 +900,22 @@ int startup_gfar(struct net_device *dev)
787 else 900 else
788 gfar_write(&regs->rxic, 0); 901 gfar_write(&regs->rxic, 0);
789 902
790 init_waitqueue_head(&priv->rxcleanupq); 903 if (priv->rx_csum_enable)
904 rctrl |= RCTRL_CHECKSUMMING;
791 905
792 /* Enable Rx and Tx in MACCFG1 */ 906 if (priv->extended_hash)
793 tempval = gfar_read(&regs->maccfg1); 907 rctrl |= RCTRL_EXTHASH;
794 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
795 gfar_write(&regs->maccfg1, tempval);
796 908
797 /* Initialize DMACTRL to have WWR and WOP */ 909 if (priv->vlan_enable)
798 tempval = gfar_read(&priv->regs->dmactrl); 910 rctrl |= RCTRL_VLAN;
799 tempval |= DMACTRL_INIT_SETTINGS;
800 gfar_write(&priv->regs->dmactrl, tempval);
801 911
802 /* Clear THLT, so that the DMA starts polling now */ 912 /* Init rctrl based on our settings */
803 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 913 gfar_write(&priv->regs->rctrl, rctrl);
804 914
805 /* Make sure we aren't stopped */ 915 if (dev->features & NETIF_F_IP_CSUM)
806 tempval = gfar_read(&priv->regs->dmactrl); 916 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
807 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
808 gfar_write(&priv->regs->dmactrl, tempval);
809 917
810 /* Unmask the interrupts we look for */ 918 gfar_start(dev);
811 gfar_write(&regs->imask, IMASK_DEFAULT);
812 919
813 return 0; 920 return 0;
814 921
@@ -824,7 +931,7 @@ tx_skb_fail:
824 sizeof(struct txbd8)*priv->tx_ring_size 931 sizeof(struct txbd8)*priv->tx_ring_size
825 + sizeof(struct rxbd8)*priv->rx_ring_size, 932 + sizeof(struct rxbd8)*priv->rx_ring_size,
826 priv->tx_bd_base, 933 priv->tx_bd_base,
827 gfar_read(&regs->tbase)); 934 gfar_read(&regs->tbase0));
828 935
829 if (priv->mii_info->phyinfo->close) 936 if (priv->mii_info->phyinfo->close)
830 priv->mii_info->phyinfo->close(priv->mii_info); 937 priv->mii_info->phyinfo->close(priv->mii_info);
@@ -857,11 +964,62 @@ static int gfar_enet_open(struct net_device *dev)
857 return err; 964 return err;
858} 965}
859 966
967static struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
968{
969 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
970
971 memset(fcb, 0, GMAC_FCB_LEN);
972
973 /* Flag the bd so the controller looks for the FCB */
974 bdp->status |= TXBD_TOE;
975
976 return fcb;
977}
978
979static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
980{
981 int len;
982
983 /* If we're here, it's a IP packet with a TCP or UDP
984 * payload. We set it to checksum, using a pseudo-header
985 * we provide
986 */
987 fcb->ip = 1;
988 fcb->tup = 1;
989 fcb->ctu = 1;
990 fcb->nph = 1;
991
992 /* Notify the controller what the protocol is */
993 if (skb->nh.iph->protocol == IPPROTO_UDP)
994 fcb->udp = 1;
995
996 /* l3os is the distance between the start of the
997 * frame (skb->data) and the start of the IP hdr.
998 * l4os is the distance between the start of the
999 * l3 hdr and the l4 hdr */
1000 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN);
1001 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw);
1002
1003 len = skb->nh.iph->tot_len - fcb->l4os;
1004
1005 /* Provide the pseudoheader csum */
1006 fcb->phcs = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1007 skb->nh.iph->daddr, len,
1008 skb->nh.iph->protocol, 0);
1009}
1010
1011void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1012{
1013 fcb->vln = 1;
1014 fcb->vlctl = vlan_tx_tag_get(skb);
1015}
1016
860/* This is called by the kernel when a frame is ready for transmission. */ 1017/* This is called by the kernel when a frame is ready for transmission. */
861/* It is pointed to by the dev->hard_start_xmit function pointer */ 1018/* It is pointed to by the dev->hard_start_xmit function pointer */
862static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1019static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
863{ 1020{
864 struct gfar_private *priv = netdev_priv(dev); 1021 struct gfar_private *priv = netdev_priv(dev);
1022 struct txfcb *fcb = NULL;
865 struct txbd8 *txbdp; 1023 struct txbd8 *txbdp;
866 1024
867 /* Update transmit stats */ 1025 /* Update transmit stats */
@@ -876,9 +1034,24 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
876 /* Clear all but the WRAP status flags */ 1034 /* Clear all but the WRAP status flags */
877 txbdp->status &= TXBD_WRAP; 1035 txbdp->status &= TXBD_WRAP;
878 1036
1037 /* Set up checksumming */
1038 if ((dev->features & NETIF_F_IP_CSUM)
1039 && (CHECKSUM_HW == skb->ip_summed)) {
1040 fcb = gfar_add_fcb(skb, txbdp);
1041 gfar_tx_checksum(skb, fcb);
1042 }
1043
1044 if (priv->vlan_enable &&
1045 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
1046 if (NULL == fcb)
1047 fcb = gfar_add_fcb(skb, txbdp);
1048
1049 gfar_tx_vlan(skb, fcb);
1050 }
1051
879 /* Set buffer length and pointer */ 1052 /* Set buffer length and pointer */
880 txbdp->length = skb->len; 1053 txbdp->length = skb->len;
881 txbdp->bufPtr = dma_map_single(NULL, skb->data, 1054 txbdp->bufPtr = dma_map_single(NULL, skb->data,
882 skb->len, DMA_TO_DEVICE); 1055 skb->len, DMA_TO_DEVICE);
883 1056
884 /* Save the skb pointer so we can free it later */ 1057 /* Save the skb pointer so we can free it later */
@@ -972,15 +1145,78 @@ int gfar_set_mac_address(struct net_device *dev)
972} 1145}
973 1146
974 1147
1148/* Enables and disables VLAN insertion/extraction */
1149static void gfar_vlan_rx_register(struct net_device *dev,
1150 struct vlan_group *grp)
1151{
1152 struct gfar_private *priv = netdev_priv(dev);
1153 unsigned long flags;
1154 u32 tempval;
1155
1156 spin_lock_irqsave(&priv->lock, flags);
1157
1158 priv->vlgrp = grp;
1159
1160 if (grp) {
1161 /* Enable VLAN tag insertion */
1162 tempval = gfar_read(&priv->regs->tctrl);
1163 tempval |= TCTRL_VLINS;
1164
1165 gfar_write(&priv->regs->tctrl, tempval);
1166
1167 /* Enable VLAN tag extraction */
1168 tempval = gfar_read(&priv->regs->rctrl);
1169 tempval |= RCTRL_VLEX;
1170 gfar_write(&priv->regs->rctrl, tempval);
1171 } else {
1172 /* Disable VLAN tag insertion */
1173 tempval = gfar_read(&priv->regs->tctrl);
1174 tempval &= ~TCTRL_VLINS;
1175 gfar_write(&priv->regs->tctrl, tempval);
1176
1177 /* Disable VLAN tag extraction */
1178 tempval = gfar_read(&priv->regs->rctrl);
1179 tempval &= ~RCTRL_VLEX;
1180 gfar_write(&priv->regs->rctrl, tempval);
1181 }
1182
1183 spin_unlock_irqrestore(&priv->lock, flags);
1184}
1185
1186
1187static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1188{
1189 struct gfar_private *priv = netdev_priv(dev);
1190 unsigned long flags;
1191
1192 spin_lock_irqsave(&priv->lock, flags);
1193
1194 if (priv->vlgrp)
1195 priv->vlgrp->vlan_devices[vid] = NULL;
1196
1197 spin_unlock_irqrestore(&priv->lock, flags);
1198}
1199
1200
975static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1201static int gfar_change_mtu(struct net_device *dev, int new_mtu)
976{ 1202{
977 int tempsize, tempval; 1203 int tempsize, tempval;
978 struct gfar_private *priv = netdev_priv(dev); 1204 struct gfar_private *priv = netdev_priv(dev);
979 int oldsize = priv->rx_buffer_size; 1205 int oldsize = priv->rx_buffer_size;
980 int frame_size = new_mtu + 18; 1206 int frame_size = new_mtu + ETH_HLEN;
1207
1208 if (priv->vlan_enable)
1209 frame_size += VLAN_ETH_HLEN;
1210
1211 if (gfar_uses_fcb(priv))
1212 frame_size += GMAC_FCB_LEN;
1213
1214 frame_size += priv->padding;
981 1215
982 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 1216 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
983 printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name); 1217 if (netif_msg_drv(priv))
1218 printk(KERN_ERR "%s: Invalid MTU setting\n",
1219 dev->name);
984 return -EINVAL; 1220 return -EINVAL;
985 } 1221 }
986 1222
@@ -1120,7 +1356,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1120 skb->dev = dev; 1356 skb->dev = dev;
1121 1357
1122 bdp->bufPtr = dma_map_single(NULL, skb->data, 1358 bdp->bufPtr = dma_map_single(NULL, skb->data,
1123 priv->rx_buffer_size + RXBUF_ALIGNMENT, 1359 priv->rx_buffer_size + RXBUF_ALIGNMENT,
1124 DMA_FROM_DEVICE); 1360 DMA_FROM_DEVICE);
1125 1361
1126 bdp->length = 0; 1362 bdp->length = 0;
@@ -1190,11 +1426,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1190 1426
1191 __netif_rx_schedule(dev); 1427 __netif_rx_schedule(dev);
1192 } else { 1428 } else {
1193#ifdef VERBOSE_GFAR_ERRORS 1429 if (netif_msg_rx_err(priv))
1194 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", 1430 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1195 dev->name, gfar_read(&priv->regs->ievent), 1431 dev->name, gfar_read(&priv->regs->ievent),
1196 gfar_read(&priv->regs->imask)); 1432 gfar_read(&priv->regs->imask));
1197#endif
1198 } 1433 }
1199#else 1434#else
1200 1435
@@ -1209,15 +1444,43 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1209 else 1444 else
1210 gfar_write(&priv->regs->rxic, 0); 1445 gfar_write(&priv->regs->rxic, 0);
1211 1446
1212 /* Just in case we need to wake the ring param changer */
1213 priv->rxclean = 1;
1214
1215 spin_unlock(&priv->lock); 1447 spin_unlock(&priv->lock);
1216#endif 1448#endif
1217 1449
1218 return IRQ_HANDLED; 1450 return IRQ_HANDLED;
1219} 1451}
1220 1452
1453static inline int gfar_rx_vlan(struct sk_buff *skb,
1454 struct vlan_group *vlgrp, unsigned short vlctl)
1455{
1456#ifdef CONFIG_GFAR_NAPI
1457 return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1458#else
1459 return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1460#endif
1461}
1462
1463static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1464{
1465 /* If valid headers were found, and valid sums
1466 * were verified, then we tell the kernel that no
1467 * checksumming is necessary. Otherwise, it is */
1468 if (fcb->cip && !fcb->eip && fcb->ctu && !fcb->etu)
1469 skb->ip_summed = CHECKSUM_UNNECESSARY;
1470 else
1471 skb->ip_summed = CHECKSUM_NONE;
1472}
1473
1474
1475static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1476{
1477 struct rxfcb *fcb = (struct rxfcb *)skb->data;
1478
1479 /* Remove the FCB from the skb */
1480 skb_pull(skb, GMAC_FCB_LEN);
1481
1482 return fcb;
1483}
1221 1484
1222/* gfar_process_frame() -- handle one incoming packet if skb 1485/* gfar_process_frame() -- handle one incoming packet if skb
1223 * isn't NULL. */ 1486 * isn't NULL. */
@@ -1225,35 +1488,51 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1225 int length) 1488 int length)
1226{ 1489{
1227 struct gfar_private *priv = netdev_priv(dev); 1490 struct gfar_private *priv = netdev_priv(dev);
1491 struct rxfcb *fcb = NULL;
1228 1492
1229 if (skb == NULL) { 1493 if (skb == NULL) {
1230#ifdef BRIEF_GFAR_ERRORS 1494 if (netif_msg_rx_err(priv))
1231 printk(KERN_WARNING "%s: Missing skb!!.\n", 1495 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1232 dev->name);
1233#endif
1234 priv->stats.rx_dropped++; 1496 priv->stats.rx_dropped++;
1235 priv->extra_stats.rx_skbmissing++; 1497 priv->extra_stats.rx_skbmissing++;
1236 } else { 1498 } else {
1499 int ret;
1500
1237 /* Prep the skb for the packet */ 1501 /* Prep the skb for the packet */
1238 skb_put(skb, length); 1502 skb_put(skb, length);
1239 1503
1504 /* Grab the FCB if there is one */
1505 if (gfar_uses_fcb(priv))
1506 fcb = gfar_get_fcb(skb);
1507
1508 /* Remove the padded bytes, if there are any */
1509 if (priv->padding)
1510 skb_pull(skb, priv->padding);
1511
1512 if (priv->rx_csum_enable)
1513 gfar_rx_checksum(skb, fcb);
1514
1240 /* Tell the skb what kind of packet this is */ 1515 /* Tell the skb what kind of packet this is */
1241 skb->protocol = eth_type_trans(skb, dev); 1516 skb->protocol = eth_type_trans(skb, dev);
1242 1517
1243 /* Send the packet up the stack */ 1518 /* Send the packet up the stack */
1244 if (RECEIVE(skb) == NET_RX_DROP) { 1519 if (unlikely(priv->vlgrp && fcb->vln))
1520 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1521 else
1522 ret = RECEIVE(skb);
1523
1524 if (NET_RX_DROP == ret)
1245 priv->extra_stats.kernel_dropped++; 1525 priv->extra_stats.kernel_dropped++;
1246 }
1247 } 1526 }
1248 1527
1249 return 0; 1528 return 0;
1250} 1529}
1251 1530
1252/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 1531/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1253 * until the budget/quota has been reached. Returns the number 1532 * until the budget/quota has been reached. Returns the number
1254 * of frames handled 1533 * of frames handled
1255 */ 1534 */
1256static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1535int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1257{ 1536{
1258 struct rxbd8 *bdp; 1537 struct rxbd8 *bdp;
1259 struct sk_buff *skb; 1538 struct sk_buff *skb;
@@ -1355,9 +1634,6 @@ static int gfar_poll(struct net_device *dev, int *budget)
1355 mk_ic_value(priv->rxcount, priv->rxtime)); 1634 mk_ic_value(priv->rxcount, priv->rxtime));
1356 else 1635 else
1357 gfar_write(&priv->regs->rxic, 0); 1636 gfar_write(&priv->regs->rxic, 0);
1358
1359 /* Signal to the ring size changer that it's safe to go */
1360 priv->rxclean = 1;
1361 } 1637 }
1362 1638
1363 return (rx_work_limit < 0) ? 1 : 0; 1639 return (rx_work_limit < 0) ? 1 : 0;
@@ -1393,10 +1669,8 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1393 if (events & IEVENT_CRL) 1669 if (events & IEVENT_CRL)
1394 priv->stats.tx_aborted_errors++; 1670 priv->stats.tx_aborted_errors++;
1395 if (events & IEVENT_XFUN) { 1671 if (events & IEVENT_XFUN) {
1396#ifdef VERBOSE_GFAR_ERRORS 1672 if (netif_msg_tx_err(priv))
1397 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", 1673 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", dev->name);
1398 dev->name);
1399#endif
1400 priv->stats.tx_dropped++; 1674 priv->stats.tx_dropped++;
1401 priv->extra_stats.tx_underrun++; 1675 priv->extra_stats.tx_underrun++;
1402 1676
@@ -1415,36 +1689,30 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1415 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1689 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1416#endif 1690#endif
1417 1691
1418#ifdef VERBOSE_GFAR_ERRORS 1692 if (netif_msg_rx_err(priv))
1419 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name, 1693 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1420 gfar_read(&priv->regs->rstat)); 1694 dev->name,
1421#endif 1695 gfar_read(&priv->regs->rstat));
1422 } 1696 }
1423 if (events & IEVENT_BABR) { 1697 if (events & IEVENT_BABR) {
1424 priv->stats.rx_errors++; 1698 priv->stats.rx_errors++;
1425 priv->extra_stats.rx_babr++; 1699 priv->extra_stats.rx_babr++;
1426 1700
1427#ifdef VERBOSE_GFAR_ERRORS 1701 if (netif_msg_rx_err(priv))
1428 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 1702 printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1429#endif
1430 } 1703 }
1431 if (events & IEVENT_EBERR) { 1704 if (events & IEVENT_EBERR) {
1432 priv->extra_stats.eberr++; 1705 priv->extra_stats.eberr++;
1433#ifdef VERBOSE_GFAR_ERRORS 1706 if (netif_msg_rx_err(priv))
1434 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 1707 printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1435#endif
1436 }
1437 if (events & IEVENT_RXC) {
1438#ifdef VERBOSE_GFAR_ERRORS
1439 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1440#endif
1441 } 1708 }
1709 if ((events & IEVENT_RXC) && (netif_msg_rx_err(priv)))
1710 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1442 1711
1443 if (events & IEVENT_BABT) { 1712 if (events & IEVENT_BABT) {
1444 priv->extra_stats.tx_babt++; 1713 priv->extra_stats.tx_babt++;
1445#ifdef VERBOSE_GFAR_ERRORS 1714 if (netif_msg_rx_err(priv))
1446 printk(KERN_DEBUG "%s: babt error\n", dev->name); 1715 printk(KERN_DEBUG "%s: babt error\n", dev->name);
1447#endif
1448 } 1716 }
1449 1717
1450 return IRQ_HANDLED; 1718 return IRQ_HANDLED;
@@ -1510,7 +1778,7 @@ static void gfar_phy_timer(unsigned long data)
1510 * If, after GFAR_AN_TIMEOUT seconds, it has not 1778 * If, after GFAR_AN_TIMEOUT seconds, it has not
1511 * finished, we switch to forced. 1779 * finished, we switch to forced.
1512 * Either way, once the process has completed, we either 1780 * Either way, once the process has completed, we either
1513 * request the interrupt, or switch the timer over to 1781 * request the interrupt, or switch the timer over to
1514 * using gfar_phy_timer to check status */ 1782 * using gfar_phy_timer to check status */
1515static void gfar_phy_startup_timer(unsigned long data) 1783static void gfar_phy_startup_timer(unsigned long data)
1516{ 1784{
@@ -1535,8 +1803,9 @@ static void gfar_phy_startup_timer(unsigned long data)
1535 1803
1536 /* Forcing failed! Give up */ 1804 /* Forcing failed! Give up */
1537 if(result) { 1805 if(result) {
1538 printk(KERN_ERR "%s: Forcing failed!\n", 1806 if (netif_msg_link(priv))
1539 mii_info->dev->name); 1807 printk(KERN_ERR "%s: Forcing failed!\n",
1808 mii_info->dev->name);
1540 return; 1809 return;
1541 } 1810 }
1542 } 1811 }
@@ -1546,16 +1815,17 @@ static void gfar_phy_startup_timer(unsigned long data)
1546 1815
1547 /* Grab the PHY interrupt, if necessary/possible */ 1816 /* Grab the PHY interrupt, if necessary/possible */
1548 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { 1817 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
1549 if (request_irq(priv->einfo->interruptPHY, 1818 if (request_irq(priv->einfo->interruptPHY,
1550 phy_interrupt, 1819 phy_interrupt,
1551 SA_SHIRQ, 1820 SA_SHIRQ,
1552 "phy_interrupt", 1821 "phy_interrupt",
1553 mii_info->dev) < 0) { 1822 mii_info->dev) < 0) {
1554 printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n", 1823 if (netif_msg_intr(priv))
1555 mii_info->dev->name, 1824 printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n",
1825 mii_info->dev->name,
1556 priv->einfo->interruptPHY); 1826 priv->einfo->interruptPHY);
1557 } else { 1827 } else {
1558 mii_configure_phy_interrupt(priv->mii_info, 1828 mii_configure_phy_interrupt(priv->mii_info,
1559 MII_INTERRUPT_ENABLED); 1829 MII_INTERRUPT_ENABLED);
1560 return; 1830 return;
1561 } 1831 }
@@ -1592,15 +1862,17 @@ static void adjust_link(struct net_device *dev)
1592 tempval &= ~(MACCFG2_FULL_DUPLEX); 1862 tempval &= ~(MACCFG2_FULL_DUPLEX);
1593 gfar_write(&regs->maccfg2, tempval); 1863 gfar_write(&regs->maccfg2, tempval);
1594 1864
1595 printk(KERN_INFO "%s: Half Duplex\n", 1865 if (netif_msg_link(priv))
1596 dev->name); 1866 printk(KERN_INFO "%s: Half Duplex\n",
1867 dev->name);
1597 } else { 1868 } else {
1598 tempval = gfar_read(&regs->maccfg2); 1869 tempval = gfar_read(&regs->maccfg2);
1599 tempval |= MACCFG2_FULL_DUPLEX; 1870 tempval |= MACCFG2_FULL_DUPLEX;
1600 gfar_write(&regs->maccfg2, tempval); 1871 gfar_write(&regs->maccfg2, tempval);
1601 1872
1602 printk(KERN_INFO "%s: Full Duplex\n", 1873 if (netif_msg_link(priv))
1603 dev->name); 1874 printk(KERN_INFO "%s: Full Duplex\n",
1875 dev->name);
1604 } 1876 }
1605 1877
1606 priv->oldduplex = mii_info->duplex; 1878 priv->oldduplex = mii_info->duplex;
@@ -1622,27 +1894,32 @@ static void adjust_link(struct net_device *dev)
1622 gfar_write(&regs->maccfg2, tempval); 1894 gfar_write(&regs->maccfg2, tempval);
1623 break; 1895 break;
1624 default: 1896 default:
1625 printk(KERN_WARNING 1897 if (netif_msg_link(priv))
1626 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 1898 printk(KERN_WARNING
1627 dev->name, mii_info->speed); 1899 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1900 dev->name, mii_info->speed);
1628 break; 1901 break;
1629 } 1902 }
1630 1903
1631 printk(KERN_INFO "%s: Speed %dBT\n", dev->name, 1904 if (netif_msg_link(priv))
1632 mii_info->speed); 1905 printk(KERN_INFO "%s: Speed %dBT\n", dev->name,
1906 mii_info->speed);
1633 1907
1634 priv->oldspeed = mii_info->speed; 1908 priv->oldspeed = mii_info->speed;
1635 } 1909 }
1636 1910
1637 if (!priv->oldlink) { 1911 if (!priv->oldlink) {
1638 printk(KERN_INFO "%s: Link is up\n", dev->name); 1912 if (netif_msg_link(priv))
1913 printk(KERN_INFO "%s: Link is up\n", dev->name);
1639 priv->oldlink = 1; 1914 priv->oldlink = 1;
1640 netif_carrier_on(dev); 1915 netif_carrier_on(dev);
1641 netif_schedule(dev); 1916 netif_schedule(dev);
1642 } 1917 }
1643 } else { 1918 } else {
1644 if (priv->oldlink) { 1919 if (priv->oldlink) {
1645 printk(KERN_INFO "%s: Link is down\n", dev->name); 1920 if (netif_msg_link(priv))
1921 printk(KERN_INFO "%s: Link is down\n",
1922 dev->name);
1646 priv->oldlink = 0; 1923 priv->oldlink = 0;
1647 priv->oldspeed = 0; 1924 priv->oldspeed = 0;
1648 priv->oldduplex = -1; 1925 priv->oldduplex = -1;
@@ -1664,8 +1941,9 @@ static void gfar_set_multi(struct net_device *dev)
1664 u32 tempval; 1941 u32 tempval;
1665 1942
1666 if(dev->flags & IFF_PROMISC) { 1943 if(dev->flags & IFF_PROMISC) {
1667 printk(KERN_INFO "%s: Entering promiscuous mode.\n", 1944 if (netif_msg_drv(priv))
1668 dev->name); 1945 printk(KERN_INFO "%s: Entering promiscuous mode.\n",
1946 dev->name);
1669 /* Set RCTRL to PROM */ 1947 /* Set RCTRL to PROM */
1670 tempval = gfar_read(&regs->rctrl); 1948 tempval = gfar_read(&regs->rctrl);
1671 tempval |= RCTRL_PROM; 1949 tempval |= RCTRL_PROM;
@@ -1679,6 +1957,14 @@ static void gfar_set_multi(struct net_device *dev)
1679 1957
1680 if(dev->flags & IFF_ALLMULTI) { 1958 if(dev->flags & IFF_ALLMULTI) {
1681 /* Set the hash to rx all multicast frames */ 1959 /* Set the hash to rx all multicast frames */
1960 gfar_write(&regs->igaddr0, 0xffffffff);
1961 gfar_write(&regs->igaddr1, 0xffffffff);
1962 gfar_write(&regs->igaddr2, 0xffffffff);
1963 gfar_write(&regs->igaddr3, 0xffffffff);
1964 gfar_write(&regs->igaddr4, 0xffffffff);
1965 gfar_write(&regs->igaddr5, 0xffffffff);
1966 gfar_write(&regs->igaddr6, 0xffffffff);
1967 gfar_write(&regs->igaddr7, 0xffffffff);
1682 gfar_write(&regs->gaddr0, 0xffffffff); 1968 gfar_write(&regs->gaddr0, 0xffffffff);
1683 gfar_write(&regs->gaddr1, 0xffffffff); 1969 gfar_write(&regs->gaddr1, 0xffffffff);
1684 gfar_write(&regs->gaddr2, 0xffffffff); 1970 gfar_write(&regs->gaddr2, 0xffffffff);
@@ -1689,6 +1975,14 @@ static void gfar_set_multi(struct net_device *dev)
1689 gfar_write(&regs->gaddr7, 0xffffffff); 1975 gfar_write(&regs->gaddr7, 0xffffffff);
1690 } else { 1976 } else {
1691 /* zero out the hash */ 1977 /* zero out the hash */
1978 gfar_write(&regs->igaddr0, 0x0);
1979 gfar_write(&regs->igaddr1, 0x0);
1980 gfar_write(&regs->igaddr2, 0x0);
1981 gfar_write(&regs->igaddr3, 0x0);
1982 gfar_write(&regs->igaddr4, 0x0);
1983 gfar_write(&regs->igaddr5, 0x0);
1984 gfar_write(&regs->igaddr6, 0x0);
1985 gfar_write(&regs->igaddr7, 0x0);
1692 gfar_write(&regs->gaddr0, 0x0); 1986 gfar_write(&regs->gaddr0, 0x0);
1693 gfar_write(&regs->gaddr1, 0x0); 1987 gfar_write(&regs->gaddr1, 0x0);
1694 gfar_write(&regs->gaddr2, 0x0); 1988 gfar_write(&regs->gaddr2, 0x0);
@@ -1727,16 +2021,15 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1727{ 2021{
1728 u32 tempval; 2022 u32 tempval;
1729 struct gfar_private *priv = netdev_priv(dev); 2023 struct gfar_private *priv = netdev_priv(dev);
1730 struct gfar *regs = priv->regs;
1731 u32 *hash = &regs->gaddr0;
1732 u32 result = ether_crc(MAC_ADDR_LEN, addr); 2024 u32 result = ether_crc(MAC_ADDR_LEN, addr);
1733 u8 whichreg = ((result >> 29) & 0x7); 2025 int width = priv->hash_width;
1734 u8 whichbit = ((result >> 24) & 0x1f); 2026 u8 whichbit = (result >> (32 - width)) & 0x1f;
2027 u8 whichreg = result >> (32 - width + 5);
1735 u32 value = (1 << (31-whichbit)); 2028 u32 value = (1 << (31-whichbit));
1736 2029
1737 tempval = gfar_read(&hash[whichreg]); 2030 tempval = gfar_read(priv->hash_regs[whichreg]);
1738 tempval |= value; 2031 tempval |= value;
1739 gfar_write(&hash[whichreg], tempval); 2032 gfar_write(priv->hash_regs[whichreg], tempval);
1740 2033
1741 return; 2034 return;
1742} 2035}
@@ -1754,10 +2047,9 @@ static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1754 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK); 2047 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1755 2048
1756 /* Hmm... */ 2049 /* Hmm... */
1757#if defined (BRIEF_GFAR_ERRORS) || defined (VERBOSE_GFAR_ERRORS) 2050 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
1758 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2051 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1759 dev->name, events, gfar_read(&priv->regs->imask)); 2052 dev->name, events, gfar_read(&priv->regs->imask));
1760#endif
1761 2053
1762 /* Update the error counters */ 2054 /* Update the error counters */
1763 if (events & IEVENT_TXE) { 2055 if (events & IEVENT_TXE) {
@@ -1768,19 +2060,17 @@ static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1768 if (events & IEVENT_CRL) 2060 if (events & IEVENT_CRL)
1769 priv->stats.tx_aborted_errors++; 2061 priv->stats.tx_aborted_errors++;
1770 if (events & IEVENT_XFUN) { 2062 if (events & IEVENT_XFUN) {
1771#ifdef VERBOSE_GFAR_ERRORS 2063 if (netif_msg_tx_err(priv))
1772 printk(KERN_DEBUG "%s: underrun. packet dropped.\n", 2064 printk(KERN_DEBUG "%s: underrun. packet dropped.\n",
1773 dev->name); 2065 dev->name);
1774#endif
1775 priv->stats.tx_dropped++; 2066 priv->stats.tx_dropped++;
1776 priv->extra_stats.tx_underrun++; 2067 priv->extra_stats.tx_underrun++;
1777 2068
1778 /* Reactivate the Tx Queues */ 2069 /* Reactivate the Tx Queues */
1779 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2070 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1780 } 2071 }
1781#ifdef VERBOSE_GFAR_ERRORS 2072 if (netif_msg_tx_err(priv))
1782 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 2073 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1783#endif
1784 } 2074 }
1785 if (events & IEVENT_BSY) { 2075 if (events & IEVENT_BSY) {
1786 priv->stats.rx_errors++; 2076 priv->stats.rx_errors++;
@@ -1793,35 +2083,31 @@ static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1793 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 2083 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1794#endif 2084#endif
1795 2085
1796#ifdef VERBOSE_GFAR_ERRORS 2086 if (netif_msg_rx_err(priv))
1797 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name, 2087 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1798 gfar_read(&priv->regs->rstat)); 2088 dev->name,
1799#endif 2089 gfar_read(&priv->regs->rstat));
1800 } 2090 }
1801 if (events & IEVENT_BABR) { 2091 if (events & IEVENT_BABR) {
1802 priv->stats.rx_errors++; 2092 priv->stats.rx_errors++;
1803 priv->extra_stats.rx_babr++; 2093 priv->extra_stats.rx_babr++;
1804 2094
1805#ifdef VERBOSE_GFAR_ERRORS 2095 if (netif_msg_rx_err(priv))
1806 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 2096 printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1807#endif
1808 } 2097 }
1809 if (events & IEVENT_EBERR) { 2098 if (events & IEVENT_EBERR) {
1810 priv->extra_stats.eberr++; 2099 priv->extra_stats.eberr++;
1811#ifdef VERBOSE_GFAR_ERRORS 2100 if (netif_msg_rx_err(priv))
1812 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 2101 printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1813#endif
1814 } 2102 }
1815 if (events & IEVENT_RXC) 2103 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
1816#ifdef VERBOSE_GFAR_ERRORS 2104 if (netif_msg_rx_status(priv))
1817 printk(KERN_DEBUG "%s: control frame\n", dev->name); 2105 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1818#endif
1819 2106
1820 if (events & IEVENT_BABT) { 2107 if (events & IEVENT_BABT) {
1821 priv->extra_stats.tx_babt++; 2108 priv->extra_stats.tx_babt++;
1822#ifdef VERBOSE_GFAR_ERRORS 2109 if (netif_msg_tx_err(priv))
1823 printk(KERN_DEBUG "%s: babt error\n", dev->name); 2110 printk(KERN_DEBUG "%s: babt error\n", dev->name);
1824#endif
1825 } 2111 }
1826 return IRQ_HANDLED; 2112 return IRQ_HANDLED;
1827} 2113}