diff options
author | Jan Ceuleers <jan.ceuleers@computer.org> | 2012-06-04 23:42:12 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-06-05 18:38:40 -0400 |
commit | 0977f817df4d0f629952b4c31d650640188b4e45 (patch) | |
tree | 7120da68c030d306300ea9bfb42d6ec772a97a62 /drivers/net/ethernet/freescale | |
parent | 2281a0f3346ae891e3c2216ad05359e0b7934bf0 (diff) |
gianfar: comment cleanup
Signed-off-by: Jan Ceuleers <jan.ceuleers@computer.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r-- | drivers/net/ethernet/freescale/gianfar.c | 155 |
1 files changed, 83 insertions, 72 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 5ca7b9eaa84a..314456000335 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* drivers/net/ethernet/freescale/gianfar.c |
2 | * drivers/net/ethernet/freescale/gianfar.c | ||
3 | * | 2 | * |
4 | * Gianfar Ethernet Driver | 3 | * Gianfar Ethernet Driver |
5 | * This driver is designed for the non-CPM ethernet controllers | 4 | * This driver is designed for the non-CPM ethernet controllers |
@@ -405,7 +404,8 @@ static void gfar_init_mac(struct net_device *ndev) | |||
405 | gfar_write(®s->attreli, attrs); | 404 | gfar_write(®s->attreli, attrs); |
406 | 405 | ||
407 | /* Start with defaults, and add stashing or locking | 406 | /* Start with defaults, and add stashing or locking |
408 | * depending on the approprate variables */ | 407 | * depending on the approprate variables |
408 | */ | ||
409 | attrs = ATTR_INIT_SETTINGS; | 409 | attrs = ATTR_INIT_SETTINGS; |
410 | 410 | ||
411 | if (priv->bd_stash_en) | 411 | if (priv->bd_stash_en) |
@@ -652,7 +652,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
652 | priv->num_rx_queues = num_rx_qs; | 652 | priv->num_rx_queues = num_rx_qs; |
653 | priv->num_grps = 0x0; | 653 | priv->num_grps = 0x0; |
654 | 654 | ||
655 | /* Init Rx queue filer rule set linked list*/ | 655 | /* Init Rx queue filer rule set linked list */ |
656 | INIT_LIST_HEAD(&priv->rx_list.list); | 656 | INIT_LIST_HEAD(&priv->rx_list.list); |
657 | priv->rx_list.count = 0; | 657 | priv->rx_list.count = 0; |
658 | mutex_init(&priv->rx_queue_access); | 658 | mutex_init(&priv->rx_queue_access); |
@@ -960,7 +960,8 @@ static void gfar_detect_errata(struct gfar_private *priv) | |||
960 | } | 960 | } |
961 | 961 | ||
962 | /* Set up the ethernet device structure, private data, | 962 | /* Set up the ethernet device structure, private data, |
963 | * and anything else we need before we start */ | 963 | * and anything else we need before we start |
964 | */ | ||
964 | static int gfar_probe(struct platform_device *ofdev) | 965 | static int gfar_probe(struct platform_device *ofdev) |
965 | { | 966 | { |
966 | u32 tempval; | 967 | u32 tempval; |
@@ -991,8 +992,9 @@ static int gfar_probe(struct platform_device *ofdev) | |||
991 | 992 | ||
992 | gfar_detect_errata(priv); | 993 | gfar_detect_errata(priv); |
993 | 994 | ||
994 | /* Stop the DMA engine now, in case it was running before */ | 995 | /* Stop the DMA engine now, in case it was running before |
995 | /* (The firmware could have used it, and left it running). */ | 996 | * (The firmware could have used it, and left it running). |
997 | */ | ||
996 | gfar_halt(dev); | 998 | gfar_halt(dev); |
997 | 999 | ||
998 | /* Reset MAC layer */ | 1000 | /* Reset MAC layer */ |
@@ -1098,7 +1100,8 @@ static int gfar_probe(struct platform_device *ofdev) | |||
1098 | 1100 | ||
1099 | /* Need to reverse the bit maps as bit_map's MSB is q0 | 1101 | /* Need to reverse the bit maps as bit_map's MSB is q0 |
1100 | * but, for_each_set_bit parses from right to left, which | 1102 | * but, for_each_set_bit parses from right to left, which |
1101 | * basically reverses the queue numbers */ | 1103 | * basically reverses the queue numbers |
1104 | */ | ||
1102 | for (i = 0; i< priv->num_grps; i++) { | 1105 | for (i = 0; i< priv->num_grps; i++) { |
1103 | priv->gfargrp[i].tx_bit_map = reverse_bitmap( | 1106 | priv->gfargrp[i].tx_bit_map = reverse_bitmap( |
1104 | priv->gfargrp[i].tx_bit_map, MAX_TX_QS); | 1107 | priv->gfargrp[i].tx_bit_map, MAX_TX_QS); |
@@ -1107,7 +1110,8 @@ static int gfar_probe(struct platform_device *ofdev) | |||
1107 | } | 1110 | } |
1108 | 1111 | ||
1109 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | 1112 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, |
1110 | * also assign queues to groups */ | 1113 | * also assign queues to groups |
1114 | */ | ||
1111 | for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { | 1115 | for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { |
1112 | priv->gfargrp[grp_idx].num_rx_queues = 0x0; | 1116 | priv->gfargrp[grp_idx].num_rx_queues = 0x0; |
1113 | for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, | 1117 | for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, |
@@ -1149,7 +1153,7 @@ static int gfar_probe(struct platform_device *ofdev) | |||
1149 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | 1153 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; |
1150 | } | 1154 | } |
1151 | 1155 | ||
1152 | /* always enable rx filer*/ | 1156 | /* always enable rx filer */ |
1153 | priv->rx_filer_enable = 1; | 1157 | priv->rx_filer_enable = 1; |
1154 | /* Enable most messages by default */ | 1158 | /* Enable most messages by default */ |
1155 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | 1159 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
@@ -1189,8 +1193,9 @@ static int gfar_probe(struct platform_device *ofdev) | |||
1189 | /* Print out the device info */ | 1193 | /* Print out the device info */ |
1190 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); | 1194 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
1191 | 1195 | ||
1192 | /* Even more device info helps when determining which kernel */ | 1196 | /* Even more device info helps when determining which kernel |
1193 | /* provided which set of benchmarks. */ | 1197 | * provided which set of benchmarks. |
1198 | */ | ||
1194 | netdev_info(dev, "Running with NAPI enabled\n"); | 1199 | netdev_info(dev, "Running with NAPI enabled\n"); |
1195 | for (i = 0; i < priv->num_rx_queues; i++) | 1200 | for (i = 0; i < priv->num_rx_queues; i++) |
1196 | netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", | 1201 | netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", |
@@ -1398,8 +1403,7 @@ static phy_interface_t gfar_get_interface(struct net_device *dev) | |||
1398 | else { | 1403 | else { |
1399 | phy_interface_t interface = priv->interface; | 1404 | phy_interface_t interface = priv->interface; |
1400 | 1405 | ||
1401 | /* | 1406 | /* This isn't autodetected right now, so it must |
1402 | * This isn't autodetected right now, so it must | ||
1403 | * be set by the device tree or platform code. | 1407 | * be set by the device tree or platform code. |
1404 | */ | 1408 | */ |
1405 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | 1409 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) |
@@ -1453,8 +1457,7 @@ static int init_phy(struct net_device *dev) | |||
1453 | return 0; | 1457 | return 0; |
1454 | } | 1458 | } |
1455 | 1459 | ||
1456 | /* | 1460 | /* Initialize TBI PHY interface for communicating with the |
1457 | * Initialize TBI PHY interface for communicating with the | ||
1458 | * SERDES lynx PHY on the chip. We communicate with this PHY | 1461 | * SERDES lynx PHY on the chip. We communicate with this PHY |
1459 | * through the MDIO bus on each controller, treating it as a | 1462 | * through the MDIO bus on each controller, treating it as a |
1460 | * "normal" PHY at the address found in the TBIPA register. We assume | 1463 | * "normal" PHY at the address found in the TBIPA register. We assume |
@@ -1479,8 +1482,7 @@ static void gfar_configure_serdes(struct net_device *dev) | |||
1479 | return; | 1482 | return; |
1480 | } | 1483 | } |
1481 | 1484 | ||
1482 | /* | 1485 | /* If the link is already up, we must already be ok, and don't need to |
1483 | * If the link is already up, we must already be ok, and don't need to | ||
1484 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured | 1486 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
1485 | * everything for us? Resetting it takes the link down and requires | 1487 | * everything for us? Resetting it takes the link down and requires |
1486 | * several seconds for it to come back. | 1488 | * several seconds for it to come back. |
@@ -1554,15 +1556,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv) | |||
1554 | { | 1556 | { |
1555 | u32 res; | 1557 | u32 res; |
1556 | 1558 | ||
1557 | /* | 1559 | /* Normaly TSEC should not hang on GRS commands, so we should |
1558 | * Normaly TSEC should not hang on GRS commands, so we should | ||
1559 | * actually wait for IEVENT_GRSC flag. | 1560 | * actually wait for IEVENT_GRSC flag. |
1560 | */ | 1561 | */ |
1561 | if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) | 1562 | if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) |
1562 | return 0; | 1563 | return 0; |
1563 | 1564 | ||
1564 | /* | 1565 | /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are |
1565 | * Read the eTSEC register at offset 0xD1C. If bits 7-14 are | ||
1566 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle | 1566 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle |
1567 | * and the Rx can be safely reset. | 1567 | * and the Rx can be safely reset. |
1568 | */ | 1568 | */ |
@@ -1718,7 +1718,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) | |||
1718 | } | 1718 | } |
1719 | 1719 | ||
1720 | /* If there are any tx skbs or rx skbs still around, free them. | 1720 | /* If there are any tx skbs or rx skbs still around, free them. |
1721 | * Then free tx_skbuff and rx_skbuff */ | 1721 | * Then free tx_skbuff and rx_skbuff |
1722 | */ | ||
1722 | static void free_skb_resources(struct gfar_private *priv) | 1723 | static void free_skb_resources(struct gfar_private *priv) |
1723 | { | 1724 | { |
1724 | struct gfar_priv_tx_q *tx_queue = NULL; | 1725 | struct gfar_priv_tx_q *tx_queue = NULL; |
@@ -1827,10 +1828,12 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) | |||
1827 | int err; | 1828 | int err; |
1828 | 1829 | ||
1829 | /* If the device has multiple interrupts, register for | 1830 | /* If the device has multiple interrupts, register for |
1830 | * them. Otherwise, only register for the one */ | 1831 | * them. Otherwise, only register for the one |
1832 | */ | ||
1831 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | 1833 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
1832 | /* Install our interrupt handlers for Error, | 1834 | /* Install our interrupt handlers for Error, |
1833 | * Transmit, and Receive */ | 1835 | * Transmit, and Receive |
1836 | */ | ||
1834 | if ((err = request_irq(grp->interruptError, gfar_error, 0, | 1837 | if ((err = request_irq(grp->interruptError, gfar_error, 0, |
1835 | grp->int_name_er,grp)) < 0) { | 1838 | grp->int_name_er,grp)) < 0) { |
1836 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | 1839 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
@@ -1914,8 +1917,9 @@ irq_fail: | |||
1914 | return err; | 1917 | return err; |
1915 | } | 1918 | } |
1916 | 1919 | ||
1917 | /* Called when something needs to use the ethernet device */ | 1920 | /* Called when something needs to use the ethernet device |
1918 | /* Returns 0 for success. */ | 1921 | * Returns 0 for success. |
1922 | */ | ||
1919 | static int gfar_enet_open(struct net_device *dev) | 1923 | static int gfar_enet_open(struct net_device *dev) |
1920 | { | 1924 | { |
1921 | struct gfar_private *priv = netdev_priv(dev); | 1925 | struct gfar_private *priv = netdev_priv(dev); |
@@ -1970,8 +1974,9 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, | |||
1970 | */ | 1974 | */ |
1971 | flags = TXFCB_DEFAULT; | 1975 | flags = TXFCB_DEFAULT; |
1972 | 1976 | ||
1973 | /* Tell the controller what the protocol is */ | 1977 | /* Tell the controller what the protocol is |
1974 | /* And provide the already calculated phcs */ | 1978 | * And provide the already calculated phcs |
1979 | */ | ||
1975 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { | 1980 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
1976 | flags |= TXFCB_UDP; | 1981 | flags |= TXFCB_UDP; |
1977 | fcb->phcs = udp_hdr(skb)->check; | 1982 | fcb->phcs = udp_hdr(skb)->check; |
@@ -1981,7 +1986,8 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, | |||
1981 | /* l3os is the distance between the start of the | 1986 | /* l3os is the distance between the start of the |
1982 | * frame (skb->data) and the start of the IP hdr. | 1987 | * frame (skb->data) and the start of the IP hdr. |
1983 | * l4os is the distance between the start of the | 1988 | * l4os is the distance between the start of the |
1984 | * l3 hdr and the l4 hdr */ | 1989 | * l3 hdr and the l4 hdr |
1990 | */ | ||
1985 | fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); | 1991 | fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); |
1986 | fcb->l4os = skb_network_header_len(skb); | 1992 | fcb->l4os = skb_network_header_len(skb); |
1987 | 1993 | ||
@@ -2008,8 +2014,9 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | |||
2008 | return skip_txbd(bdp, 1, base, ring_size); | 2014 | return skip_txbd(bdp, 1, base, ring_size); |
2009 | } | 2015 | } |
2010 | 2016 | ||
2011 | /* This is called by the kernel when a frame is ready for transmission. */ | 2017 | /* This is called by the kernel when a frame is ready for transmission. |
2012 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | 2018 | * It is pointed to by the dev->hard_start_xmit function pointer |
2019 | */ | ||
2013 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | 2020 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2014 | { | 2021 | { |
2015 | struct gfar_private *priv = netdev_priv(dev); | 2022 | struct gfar_private *priv = netdev_priv(dev); |
@@ -2024,8 +2031,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2024 | unsigned long flags; | 2031 | unsigned long flags; |
2025 | unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; | 2032 | unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; |
2026 | 2033 | ||
2027 | /* | 2034 | /* TOE=1 frames larger than 2500 bytes may see excess delays |
2028 | * TOE=1 frames larger than 2500 bytes may see excess delays | ||
2029 | * before start of transmission. | 2035 | * before start of transmission. |
2030 | */ | 2036 | */ |
2031 | if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && | 2037 | if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && |
@@ -2177,8 +2183,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2177 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, | 2183 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, |
2178 | skb_headlen(skb), DMA_TO_DEVICE); | 2184 | skb_headlen(skb), DMA_TO_DEVICE); |
2179 | 2185 | ||
2180 | /* | 2186 | /* If time stamping is requested one additional TxBD must be set up. The |
2181 | * If time stamping is requested one additional TxBD must be set up. The | ||
2182 | * first TxBD points to the FCB and must have a data length of | 2187 | * first TxBD points to the FCB and must have a data length of |
2183 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with | 2188 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with |
2184 | * the full frame length. | 2189 | * the full frame length. |
@@ -2194,8 +2199,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2194 | 2199 | ||
2195 | netdev_tx_sent_queue(txq, skb->len); | 2200 | netdev_tx_sent_queue(txq, skb->len); |
2196 | 2201 | ||
2197 | /* | 2202 | /* We can work in parallel with gfar_clean_tx_ring(), except |
2198 | * We can work in parallel with gfar_clean_tx_ring(), except | ||
2199 | * when modifying num_txbdfree. Note that we didn't grab the lock | 2203 | * when modifying num_txbdfree. Note that we didn't grab the lock |
2200 | * when we were reading the num_txbdfree and checking for available | 2204 | * when we were reading the num_txbdfree and checking for available |
2201 | * space, that's because outside of this function it can only grow, | 2205 | * space, that's because outside of this function it can only grow, |
@@ -2208,8 +2212,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2208 | */ | 2212 | */ |
2209 | spin_lock_irqsave(&tx_queue->txlock, flags); | 2213 | spin_lock_irqsave(&tx_queue->txlock, flags); |
2210 | 2214 | ||
2211 | /* | 2215 | /* The powerpc-specific eieio() is used, as wmb() has too strong |
2212 | * The powerpc-specific eieio() is used, as wmb() has too strong | ||
2213 | * semantics (it requires synchronization between cacheable and | 2216 | * semantics (it requires synchronization between cacheable and |
2214 | * uncacheable mappings, which eieio doesn't provide and which we | 2217 | * uncacheable mappings, which eieio doesn't provide and which we |
2215 | * don't need), thus requiring a more expensive sync instruction. At | 2218 | * don't need), thus requiring a more expensive sync instruction. At |
@@ -2225,7 +2228,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2225 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; | 2228 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; |
2226 | 2229 | ||
2227 | /* Update the current skb pointer to the next entry we will use | 2230 | /* Update the current skb pointer to the next entry we will use |
2228 | * (wrapping if necessary) */ | 2231 | * (wrapping if necessary) |
2232 | */ | ||
2229 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & | 2233 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
2230 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); | 2234 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); |
2231 | 2235 | ||
@@ -2235,7 +2239,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2235 | tx_queue->num_txbdfree -= (nr_txbds); | 2239 | tx_queue->num_txbdfree -= (nr_txbds); |
2236 | 2240 | ||
2237 | /* If the next BD still needs to be cleaned up, then the bds | 2241 | /* If the next BD still needs to be cleaned up, then the bds |
2238 | are full. We need to tell the kernel to stop sending us stuff. */ | 2242 | * are full. We need to tell the kernel to stop sending us stuff. |
2243 | */ | ||
2239 | if (!tx_queue->num_txbdfree) { | 2244 | if (!tx_queue->num_txbdfree) { |
2240 | netif_tx_stop_queue(txq); | 2245 | netif_tx_stop_queue(txq); |
2241 | 2246 | ||
@@ -2365,7 +2370,8 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) | |||
2365 | INCREMENTAL_BUFFER_SIZE; | 2370 | INCREMENTAL_BUFFER_SIZE; |
2366 | 2371 | ||
2367 | /* Only stop and start the controller if it isn't already | 2372 | /* Only stop and start the controller if it isn't already |
2368 | * stopped, and we changed something */ | 2373 | * stopped, and we changed something |
2374 | */ | ||
2369 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | 2375 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) |
2370 | stop_gfar(dev); | 2376 | stop_gfar(dev); |
2371 | 2377 | ||
@@ -2378,7 +2384,8 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) | |||
2378 | 2384 | ||
2379 | /* If the mtu is larger than the max size for standard | 2385 | /* If the mtu is larger than the max size for standard |
2380 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | 2386 | * ethernet frames (ie, a jumbo frame), then set maccfg2 |
2381 | * to allow huge frames, and to check the length */ | 2387 | * to allow huge frames, and to check the length |
2388 | */ | ||
2382 | tempval = gfar_read(®s->maccfg2); | 2389 | tempval = gfar_read(®s->maccfg2); |
2383 | 2390 | ||
2384 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || | 2391 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || |
@@ -2464,8 +2471,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2464 | 2471 | ||
2465 | frags = skb_shinfo(skb)->nr_frags; | 2472 | frags = skb_shinfo(skb)->nr_frags; |
2466 | 2473 | ||
2467 | /* | 2474 | /* When time stamping, one additional TxBD must be freed. |
2468 | * When time stamping, one additional TxBD must be freed. | ||
2469 | * Also, we need to dma_unmap_single() the TxPAL. | 2475 | * Also, we need to dma_unmap_single() the TxPAL. |
2470 | */ | 2476 | */ |
2471 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) | 2477 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) |
@@ -2516,8 +2522,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2516 | 2522 | ||
2517 | bytes_sent += skb->len; | 2523 | bytes_sent += skb->len; |
2518 | 2524 | ||
2519 | /* | 2525 | /* If there's room in the queue (limit it to rx_buffer_size) |
2520 | * If there's room in the queue (limit it to rx_buffer_size) | ||
2521 | * we add this skb back into the pool, if it's the right size | 2526 | * we add this skb back into the pool, if it's the right size |
2522 | */ | 2527 | */ |
2523 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && | 2528 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && |
@@ -2561,8 +2566,7 @@ static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) | |||
2561 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); | 2566 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); |
2562 | __napi_schedule(&gfargrp->napi); | 2567 | __napi_schedule(&gfargrp->napi); |
2563 | } else { | 2568 | } else { |
2564 | /* | 2569 | /* Clear IEVENT, so interrupts aren't called again |
2565 | * Clear IEVENT, so interrupts aren't called again | ||
2566 | * because of the packets that have already arrived. | 2570 | * because of the packets that have already arrived. |
2567 | */ | 2571 | */ |
2568 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); | 2572 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); |
@@ -2622,8 +2626,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev) | |||
2622 | struct net_device_stats *stats = &dev->stats; | 2626 | struct net_device_stats *stats = &dev->stats; |
2623 | struct gfar_extra_stats *estats = &priv->extra_stats; | 2627 | struct gfar_extra_stats *estats = &priv->extra_stats; |
2624 | 2628 | ||
2625 | /* If the packet was truncated, none of the other errors | 2629 | /* If the packet was truncated, none of the other errors matter */ |
2626 | * matter */ | ||
2627 | if (status & RXBD_TRUNCATED) { | 2630 | if (status & RXBD_TRUNCATED) { |
2628 | stats->rx_length_errors++; | 2631 | stats->rx_length_errors++; |
2629 | 2632 | ||
@@ -2664,7 +2667,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) | |||
2664 | { | 2667 | { |
2665 | /* If valid headers were found, and valid sums | 2668 | /* If valid headers were found, and valid sums |
2666 | * were verified, then we tell the kernel that no | 2669 | * were verified, then we tell the kernel that no |
2667 | * checksumming is necessary. Otherwise, it is */ | 2670 | * checksumming is necessary. Otherwise, it is [FIXME] |
2671 | */ | ||
2668 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) | 2672 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) |
2669 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 2673 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2670 | else | 2674 | else |
@@ -2672,8 +2676,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) | |||
2672 | } | 2676 | } |
2673 | 2677 | ||
2674 | 2678 | ||
2675 | /* gfar_process_frame() -- handle one incoming packet if skb | 2679 | /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ |
2676 | * isn't NULL. */ | ||
2677 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | 2680 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
2678 | int amount_pull, struct napi_struct *napi) | 2681 | int amount_pull, struct napi_struct *napi) |
2679 | { | 2682 | { |
@@ -2685,8 +2688,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
2685 | /* fcb is at the beginning if exists */ | 2688 | /* fcb is at the beginning if exists */ |
2686 | fcb = (struct rxfcb *)skb->data; | 2689 | fcb = (struct rxfcb *)skb->data; |
2687 | 2690 | ||
2688 | /* Remove the FCB from the skb */ | 2691 | /* Remove the FCB from the skb |
2689 | /* Remove the padded bytes, if there are any */ | 2692 | * Remove the padded bytes, if there are any |
2693 | */ | ||
2690 | if (amount_pull) { | 2694 | if (amount_pull) { |
2691 | skb_record_rx_queue(skb, fcb->rq); | 2695 | skb_record_rx_queue(skb, fcb->rq); |
2692 | skb_pull(skb, amount_pull); | 2696 | skb_pull(skb, amount_pull); |
@@ -2709,8 +2713,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
2709 | /* Tell the skb what kind of packet this is */ | 2713 | /* Tell the skb what kind of packet this is */ |
2710 | skb->protocol = eth_type_trans(skb, dev); | 2714 | skb->protocol = eth_type_trans(skb, dev); |
2711 | 2715 | ||
2712 | /* | 2716 | /* There's need to check for NETIF_F_HW_VLAN_RX here. |
2713 | * There's need to check for NETIF_F_HW_VLAN_RX here. | ||
2714 | * Even if vlan rx accel is disabled, on some chips | 2717 | * Even if vlan rx accel is disabled, on some chips |
2715 | * RXFCB_VLN is pseudo randomly set. | 2718 | * RXFCB_VLN is pseudo randomly set. |
2716 | */ | 2719 | */ |
@@ -2831,7 +2834,8 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
2831 | budget_per_queue = budget/num_queues; | 2834 | budget_per_queue = budget/num_queues; |
2832 | 2835 | ||
2833 | /* Clear IEVENT, so interrupts aren't called again | 2836 | /* Clear IEVENT, so interrupts aren't called again |
2834 | * because of the packets that have already arrived */ | 2837 | * because of the packets that have already arrived |
2838 | */ | ||
2835 | gfar_write(®s->ievent, IEVENT_RTX_MASK); | 2839 | gfar_write(®s->ievent, IEVENT_RTX_MASK); |
2836 | 2840 | ||
2837 | while (num_queues && left_over_budget) { | 2841 | while (num_queues && left_over_budget) { |
@@ -2869,8 +2873,9 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
2869 | 2873 | ||
2870 | gfar_write(®s->imask, IMASK_DEFAULT); | 2874 | gfar_write(®s->imask, IMASK_DEFAULT); |
2871 | 2875 | ||
2872 | /* If we are coalescing interrupts, update the timer */ | 2876 | /* If we are coalescing interrupts, update the timer |
2873 | /* Otherwise, clear it */ | 2877 | * Otherwise, clear it |
2878 | */ | ||
2874 | gfar_configure_coalescing(priv, | 2879 | gfar_configure_coalescing(priv, |
2875 | gfargrp->rx_bit_map, gfargrp->tx_bit_map); | 2880 | gfargrp->rx_bit_map, gfargrp->tx_bit_map); |
2876 | } | 2881 | } |
@@ -2879,8 +2884,7 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
2879 | } | 2884 | } |
2880 | 2885 | ||
2881 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2886 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2882 | /* | 2887 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
2883 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
2884 | * without having to re-enable interrupts. It's not called while | 2888 | * without having to re-enable interrupts. It's not called while |
2885 | * the interrupt routine is executing. | 2889 | * the interrupt routine is executing. |
2886 | */ | 2890 | */ |
@@ -2957,7 +2961,8 @@ static void adjust_link(struct net_device *dev) | |||
2957 | u32 ecntrl = gfar_read(®s->ecntrl); | 2961 | u32 ecntrl = gfar_read(®s->ecntrl); |
2958 | 2962 | ||
2959 | /* Now we make sure that we can be in full duplex mode. | 2963 | /* Now we make sure that we can be in full duplex mode. |
2960 | * If not, we operate in half-duplex mode. */ | 2964 | * If not, we operate in half-duplex mode. |
2965 | */ | ||
2961 | if (phydev->duplex != priv->oldduplex) { | 2966 | if (phydev->duplex != priv->oldduplex) { |
2962 | new_state = 1; | 2967 | new_state = 1; |
2963 | if (!(phydev->duplex)) | 2968 | if (!(phydev->duplex)) |
@@ -2983,7 +2988,8 @@ static void adjust_link(struct net_device *dev) | |||
2983 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | 2988 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); |
2984 | 2989 | ||
2985 | /* Reduced mode distinguishes | 2990 | /* Reduced mode distinguishes |
2986 | * between 10 and 100 */ | 2991 | * between 10 and 100 |
2992 | */ | ||
2987 | if (phydev->speed == SPEED_100) | 2993 | if (phydev->speed == SPEED_100) |
2988 | ecntrl |= ECNTRL_R100; | 2994 | ecntrl |= ECNTRL_R100; |
2989 | else | 2995 | else |
@@ -3022,7 +3028,8 @@ static void adjust_link(struct net_device *dev) | |||
3022 | /* Update the hash table based on the current list of multicast | 3028 | /* Update the hash table based on the current list of multicast |
3023 | * addresses we subscribe to. Also, change the promiscuity of | 3029 | * addresses we subscribe to. Also, change the promiscuity of |
3024 | * the device based on the flags (this function is called | 3030 | * the device based on the flags (this function is called |
3025 | * whenever dev->flags is changed */ | 3031 | * whenever dev->flags is changed |
3032 | */ | ||
3026 | static void gfar_set_multi(struct net_device *dev) | 3033 | static void gfar_set_multi(struct net_device *dev) |
3027 | { | 3034 | { |
3028 | struct netdev_hw_addr *ha; | 3035 | struct netdev_hw_addr *ha; |
@@ -3084,7 +3091,8 @@ static void gfar_set_multi(struct net_device *dev) | |||
3084 | 3091 | ||
3085 | /* If we have extended hash tables, we need to | 3092 | /* If we have extended hash tables, we need to |
3086 | * clear the exact match registers to prepare for | 3093 | * clear the exact match registers to prepare for |
3087 | * setting them */ | 3094 | * setting them |
3095 | */ | ||
3088 | if (priv->extended_hash) { | 3096 | if (priv->extended_hash) { |
3089 | em_num = GFAR_EM_NUM + 1; | 3097 | em_num = GFAR_EM_NUM + 1; |
3090 | gfar_clear_exact_match(dev); | 3098 | gfar_clear_exact_match(dev); |
@@ -3110,7 +3118,8 @@ static void gfar_set_multi(struct net_device *dev) | |||
3110 | 3118 | ||
3111 | 3119 | ||
3112 | /* Clears each of the exact match registers to zero, so they | 3120 | /* Clears each of the exact match registers to zero, so they |
3113 | * don't interfere with normal reception */ | 3121 | * don't interfere with normal reception |
3122 | */ | ||
3114 | static void gfar_clear_exact_match(struct net_device *dev) | 3123 | static void gfar_clear_exact_match(struct net_device *dev) |
3115 | { | 3124 | { |
3116 | int idx; | 3125 | int idx; |
@@ -3132,7 +3141,8 @@ static void gfar_clear_exact_match(struct net_device *dev) | |||
3132 | * hash index which gaddr register to use, and the 5 other bits | 3141 | * hash index which gaddr register to use, and the 5 other bits |
3133 | * indicate which bit (assuming an IBM numbering scheme, which | 3142 | * indicate which bit (assuming an IBM numbering scheme, which |
3134 | * for PowerPC (tm) is usually the case) in the register holds | 3143 | * for PowerPC (tm) is usually the case) in the register holds |
3135 | * the entry. */ | 3144 | * the entry. |
3145 | */ | ||
3136 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) | 3146 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) |
3137 | { | 3147 | { |
3138 | u32 tempval; | 3148 | u32 tempval; |
@@ -3164,8 +3174,9 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, | |||
3164 | 3174 | ||
3165 | macptr += num*2; | 3175 | macptr += num*2; |
3166 | 3176 | ||
3167 | /* Now copy it into the mac registers backwards, cuz */ | 3177 | /* Now copy it into the mac registers backwards, cuz |
3168 | /* little endian is silly */ | 3178 | * little endian is silly |
3179 | */ | ||
3169 | for (idx = 0; idx < ETH_ALEN; idx++) | 3180 | for (idx = 0; idx < ETH_ALEN; idx++) |
3170 | tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; | 3181 | tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; |
3171 | 3182 | ||