diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 14:47:58 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 14:47:58 -0400 |
commit | 6ba74014c1ab0e37af7de6f64b4eccbbae3cb9e7 (patch) | |
tree | 8f3892fc44f1e403675a6d7e88fda5c70e56ee4c /drivers/net/gianfar.c | |
parent | 5abd9ccced7a726c817dd6b5b96bc933859138d1 (diff) | |
parent | 3ff1c25927e3af61c6bf0e4ed959504058ae4565 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1443 commits)
phy/marvell: add 88ec048 support
igb: Program MDICNFG register prior to PHY init
e1000e: correct MAC-PHY interconnect register offset for 82579
hso: Add new product ID
can: Add driver for esd CAN-USB/2 device
l2tp: fix export of header file for userspace
can-raw: Fix skb_orphan_try handling
Revert "net: remove zap_completion_queue"
net: cleanup inclusion
phy/marvell: add 88e1121 interface mode support
u32: negative offset fix
net: Fix a typo from "dev" to "ndev"
igb: Use irq_synchronize per vector when using MSI-X
ixgbevf: fix null pointer dereference due to filter being set for VLAN 0
e1000e: Fix irq_synchronize in MSI-X case
e1000e: register pm_qos request on hardware activation
ip_fragment: fix subtracting PPPOE_SES_HLEN from mtu twice
net: Add getsockopt support for TCP thin-streams
cxgb4: update driver version
cxgb4: add new PCI IDs
...
Manually fix up conflicts in:
- drivers/net/e1000e/netdev.c: due to pm_qos registration
infrastructure changes
- drivers/net/phy/marvell.c: conflict between adding 88ec048 support
and cleaning up the IDs
- drivers/net/wireless/ipw2x00/ipw2100.c: trivial ipw2100_pm_qos_req
conflict (registration change vs marking it static)
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r-- | drivers/net/gianfar.c | 152 |
1 files changed, 115 insertions, 37 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 28b53d1cd4f1..27f02970d898 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -85,6 +85,7 @@ | |||
85 | #include <linux/net_tstamp.h> | 85 | #include <linux/net_tstamp.h> |
86 | 86 | ||
87 | #include <asm/io.h> | 87 | #include <asm/io.h> |
88 | #include <asm/reg.h> | ||
88 | #include <asm/irq.h> | 89 | #include <asm/irq.h> |
89 | #include <asm/uaccess.h> | 90 | #include <asm/uaccess.h> |
90 | #include <linux/module.h> | 91 | #include <linux/module.h> |
@@ -685,8 +686,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | |||
685 | priv->rx_queue[i] = NULL; | 686 | priv->rx_queue[i] = NULL; |
686 | 687 | ||
687 | for (i = 0; i < priv->num_tx_queues; i++) { | 688 | for (i = 0; i < priv->num_tx_queues; i++) { |
688 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( | 689 | priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), |
689 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); | 690 | GFP_KERNEL); |
690 | if (!priv->tx_queue[i]) { | 691 | if (!priv->tx_queue[i]) { |
691 | err = -ENOMEM; | 692 | err = -ENOMEM; |
692 | goto tx_alloc_failed; | 693 | goto tx_alloc_failed; |
@@ -698,8 +699,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | |||
698 | } | 699 | } |
699 | 700 | ||
700 | for (i = 0; i < priv->num_rx_queues; i++) { | 701 | for (i = 0; i < priv->num_rx_queues; i++) { |
701 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( | 702 | priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), |
702 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); | 703 | GFP_KERNEL); |
703 | if (!priv->rx_queue[i]) { | 704 | if (!priv->rx_queue[i]) { |
704 | err = -ENOMEM; | 705 | err = -ENOMEM; |
705 | goto rx_alloc_failed; | 706 | goto rx_alloc_failed; |
@@ -846,7 +847,7 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
846 | if (!priv->phydev) | 847 | if (!priv->phydev) |
847 | return -ENODEV; | 848 | return -ENODEV; |
848 | 849 | ||
849 | return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); | 850 | return phy_mii_ioctl(priv->phydev, rq, cmd); |
850 | } | 851 | } |
851 | 852 | ||
852 | static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) | 853 | static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) |
@@ -928,6 +929,34 @@ static void gfar_init_filer_table(struct gfar_private *priv) | |||
928 | } | 929 | } |
929 | } | 930 | } |
930 | 931 | ||
932 | static void gfar_detect_errata(struct gfar_private *priv) | ||
933 | { | ||
934 | struct device *dev = &priv->ofdev->dev; | ||
935 | unsigned int pvr = mfspr(SPRN_PVR); | ||
936 | unsigned int svr = mfspr(SPRN_SVR); | ||
937 | unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ | ||
938 | unsigned int rev = svr & 0xffff; | ||
939 | |||
940 | /* MPC8313 Rev 2.0 and higher; All MPC837x */ | ||
941 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || | ||
942 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | ||
943 | priv->errata |= GFAR_ERRATA_74; | ||
944 | |||
945 | /* MPC8313 and MPC837x all rev */ | ||
946 | if ((pvr == 0x80850010 && mod == 0x80b0) || | ||
947 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | ||
948 | priv->errata |= GFAR_ERRATA_76; | ||
949 | |||
950 | /* MPC8313 and MPC837x all rev */ | ||
951 | if ((pvr == 0x80850010 && mod == 0x80b0) || | ||
952 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | ||
953 | priv->errata |= GFAR_ERRATA_A002; | ||
954 | |||
955 | if (priv->errata) | ||
956 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", | ||
957 | priv->errata); | ||
958 | } | ||
959 | |||
931 | /* Set up the ethernet device structure, private data, | 960 | /* Set up the ethernet device structure, private data, |
932 | * and anything else we need before we start */ | 961 | * and anything else we need before we start */ |
933 | static int gfar_probe(struct of_device *ofdev, | 962 | static int gfar_probe(struct of_device *ofdev, |
@@ -960,6 +989,8 @@ static int gfar_probe(struct of_device *ofdev, | |||
960 | dev_set_drvdata(&ofdev->dev, priv); | 989 | dev_set_drvdata(&ofdev->dev, priv); |
961 | regs = priv->gfargrp[0].regs; | 990 | regs = priv->gfargrp[0].regs; |
962 | 991 | ||
992 | gfar_detect_errata(priv); | ||
993 | |||
963 | /* Stop the DMA engine now, in case it was running before */ | 994 | /* Stop the DMA engine now, in case it was running before */ |
964 | /* (The firmware could have used it, and left it running). */ | 995 | /* (The firmware could have used it, and left it running). */ |
965 | gfar_halt(dev); | 996 | gfar_halt(dev); |
@@ -974,7 +1005,10 @@ static int gfar_probe(struct of_device *ofdev, | |||
974 | gfar_write(®s->maccfg1, tempval); | 1005 | gfar_write(®s->maccfg1, tempval); |
975 | 1006 | ||
976 | /* Initialize MACCFG2. */ | 1007 | /* Initialize MACCFG2. */ |
977 | gfar_write(®s->maccfg2, MACCFG2_INIT_SETTINGS); | 1008 | tempval = MACCFG2_INIT_SETTINGS; |
1009 | if (gfar_has_errata(priv, GFAR_ERRATA_74)) | ||
1010 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; | ||
1011 | gfar_write(®s->maccfg2, tempval); | ||
978 | 1012 | ||
979 | /* Initialize ECNTRL */ | 1013 | /* Initialize ECNTRL */ |
980 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); | 1014 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); |
@@ -1541,6 +1575,29 @@ static void init_registers(struct net_device *dev) | |||
1541 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); | 1575 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); |
1542 | } | 1576 | } |
1543 | 1577 | ||
1578 | static int __gfar_is_rx_idle(struct gfar_private *priv) | ||
1579 | { | ||
1580 | u32 res; | ||
1581 | |||
1582 | /* | ||
1583 | * Normaly TSEC should not hang on GRS commands, so we should | ||
1584 | * actually wait for IEVENT_GRSC flag. | ||
1585 | */ | ||
1586 | if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) | ||
1587 | return 0; | ||
1588 | |||
1589 | /* | ||
1590 | * Read the eTSEC register at offset 0xD1C. If bits 7-14 are | ||
1591 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle | ||
1592 | * and the Rx can be safely reset. | ||
1593 | */ | ||
1594 | res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); | ||
1595 | res &= 0x7f807f80; | ||
1596 | if ((res & 0xffff) == (res >> 16)) | ||
1597 | return 1; | ||
1598 | |||
1599 | return 0; | ||
1600 | } | ||
1544 | 1601 | ||
1545 | /* Halt the receive and transmit queues */ | 1602 | /* Halt the receive and transmit queues */ |
1546 | static void gfar_halt_nodisable(struct net_device *dev) | 1603 | static void gfar_halt_nodisable(struct net_device *dev) |
@@ -1564,12 +1621,18 @@ static void gfar_halt_nodisable(struct net_device *dev) | |||
1564 | tempval = gfar_read(®s->dmactrl); | 1621 | tempval = gfar_read(®s->dmactrl); |
1565 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) | 1622 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) |
1566 | != (DMACTRL_GRS | DMACTRL_GTS)) { | 1623 | != (DMACTRL_GRS | DMACTRL_GTS)) { |
1624 | int ret; | ||
1625 | |||
1567 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | 1626 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
1568 | gfar_write(®s->dmactrl, tempval); | 1627 | gfar_write(®s->dmactrl, tempval); |
1569 | 1628 | ||
1570 | spin_event_timeout(((gfar_read(®s->ievent) & | 1629 | do { |
1571 | (IEVENT_GRSC | IEVENT_GTSC)) == | 1630 | ret = spin_event_timeout(((gfar_read(®s->ievent) & |
1572 | (IEVENT_GRSC | IEVENT_GTSC)), -1, 0); | 1631 | (IEVENT_GRSC | IEVENT_GTSC)) == |
1632 | (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); | ||
1633 | if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) | ||
1634 | ret = __gfar_is_rx_idle(priv); | ||
1635 | } while (!ret); | ||
1573 | } | 1636 | } |
1574 | } | 1637 | } |
1575 | 1638 | ||
@@ -1987,6 +2050,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1987 | unsigned int nr_frags, nr_txbds, length; | 2050 | unsigned int nr_frags, nr_txbds, length; |
1988 | union skb_shared_tx *shtx; | 2051 | union skb_shared_tx *shtx; |
1989 | 2052 | ||
2053 | /* | ||
2054 | * TOE=1 frames larger than 2500 bytes may see excess delays | ||
2055 | * before start of transmission. | ||
2056 | */ | ||
2057 | if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && | ||
2058 | skb->ip_summed == CHECKSUM_PARTIAL && | ||
2059 | skb->len > 2500)) { | ||
2060 | int ret; | ||
2061 | |||
2062 | ret = skb_checksum_help(skb); | ||
2063 | if (ret) | ||
2064 | return ret; | ||
2065 | } | ||
2066 | |||
1990 | rq = skb->queue_mapping; | 2067 | rq = skb->queue_mapping; |
1991 | tx_queue = priv->tx_queue[rq]; | 2068 | tx_queue = priv->tx_queue[rq]; |
1992 | txq = netdev_get_tx_queue(dev, rq); | 2069 | txq = netdev_get_tx_queue(dev, rq); |
@@ -2300,7 +2377,8 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) | |||
2300 | * to allow huge frames, and to check the length */ | 2377 | * to allow huge frames, and to check the length */ |
2301 | tempval = gfar_read(®s->maccfg2); | 2378 | tempval = gfar_read(®s->maccfg2); |
2302 | 2379 | ||
2303 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) | 2380 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || |
2381 | gfar_has_errata(priv, GFAR_ERRATA_74)) | ||
2304 | tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | 2382 | tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); |
2305 | else | 2383 | else |
2306 | tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | 2384 | tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); |
@@ -2342,6 +2420,15 @@ static void gfar_timeout(struct net_device *dev) | |||
2342 | schedule_work(&priv->reset_task); | 2420 | schedule_work(&priv->reset_task); |
2343 | } | 2421 | } |
2344 | 2422 | ||
2423 | static void gfar_align_skb(struct sk_buff *skb) | ||
2424 | { | ||
2425 | /* We need the data buffer to be aligned properly. We will reserve | ||
2426 | * as many bytes as needed to align the data properly | ||
2427 | */ | ||
2428 | skb_reserve(skb, RXBUF_ALIGNMENT - | ||
2429 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); | ||
2430 | } | ||
2431 | |||
2345 | /* Interrupt Handler for Transmit complete */ | 2432 | /* Interrupt Handler for Transmit complete */ |
2346 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | 2433 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
2347 | { | 2434 | { |
@@ -2426,9 +2513,10 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2426 | */ | 2513 | */ |
2427 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && | 2514 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && |
2428 | skb_recycle_check(skb, priv->rx_buffer_size + | 2515 | skb_recycle_check(skb, priv->rx_buffer_size + |
2429 | RXBUF_ALIGNMENT)) | 2516 | RXBUF_ALIGNMENT)) { |
2517 | gfar_align_skb(skb); | ||
2430 | __skb_queue_head(&priv->rx_recycle, skb); | 2518 | __skb_queue_head(&priv->rx_recycle, skb); |
2431 | else | 2519 | } else |
2432 | dev_kfree_skb_any(skb); | 2520 | dev_kfree_skb_any(skb); |
2433 | 2521 | ||
2434 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; | 2522 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
@@ -2491,29 +2579,28 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | |||
2491 | gfar_init_rxbdp(rx_queue, bdp, buf); | 2579 | gfar_init_rxbdp(rx_queue, bdp, buf); |
2492 | } | 2580 | } |
2493 | 2581 | ||
2494 | 2582 | static struct sk_buff * gfar_alloc_skb(struct net_device *dev) | |
2495 | struct sk_buff * gfar_new_skb(struct net_device *dev) | ||
2496 | { | 2583 | { |
2497 | unsigned int alignamount; | ||
2498 | struct gfar_private *priv = netdev_priv(dev); | 2584 | struct gfar_private *priv = netdev_priv(dev); |
2499 | struct sk_buff *skb = NULL; | 2585 | struct sk_buff *skb = NULL; |
2500 | 2586 | ||
2501 | skb = __skb_dequeue(&priv->rx_recycle); | 2587 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); |
2502 | if (!skb) | ||
2503 | skb = netdev_alloc_skb(dev, | ||
2504 | priv->rx_buffer_size + RXBUF_ALIGNMENT); | ||
2505 | |||
2506 | if (!skb) | 2588 | if (!skb) |
2507 | return NULL; | 2589 | return NULL; |
2508 | 2590 | ||
2509 | alignamount = RXBUF_ALIGNMENT - | 2591 | gfar_align_skb(skb); |
2510 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); | ||
2511 | 2592 | ||
2512 | /* We need the data buffer to be aligned properly. We will reserve | 2593 | return skb; |
2513 | * as many bytes as needed to align the data properly | 2594 | } |
2514 | */ | 2595 | |
2515 | skb_reserve(skb, alignamount); | 2596 | struct sk_buff * gfar_new_skb(struct net_device *dev) |
2516 | GFAR_CB(skb)->alignamount = alignamount; | 2597 | { |
2598 | struct gfar_private *priv = netdev_priv(dev); | ||
2599 | struct sk_buff *skb = NULL; | ||
2600 | |||
2601 | skb = __skb_dequeue(&priv->rx_recycle); | ||
2602 | if (!skb) | ||
2603 | skb = gfar_alloc_skb(dev); | ||
2517 | 2604 | ||
2518 | return skb; | 2605 | return skb; |
2519 | } | 2606 | } |
@@ -2666,17 +2753,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
2666 | 2753 | ||
2667 | if (unlikely(!newskb)) | 2754 | if (unlikely(!newskb)) |
2668 | newskb = skb; | 2755 | newskb = skb; |
2669 | else if (skb) { | 2756 | else if (skb) |
2670 | /* | ||
2671 | * We need to un-reserve() the skb to what it | ||
2672 | * was before gfar_new_skb() re-aligned | ||
2673 | * it to an RXBUF_ALIGNMENT boundary | ||
2674 | * before we put the skb back on the | ||
2675 | * recycle list. | ||
2676 | */ | ||
2677 | skb_reserve(skb, -GFAR_CB(skb)->alignamount); | ||
2678 | __skb_queue_head(&priv->rx_recycle, skb); | 2757 | __skb_queue_head(&priv->rx_recycle, skb); |
2679 | } | ||
2680 | } else { | 2758 | } else { |
2681 | /* Increment the number of packets */ | 2759 | /* Increment the number of packets */ |
2682 | rx_queue->stats.rx_packets++; | 2760 | rx_queue->stats.rx_packets++; |