aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
authorJan Ceuleers <jan.ceuleers@computer.org>2012-06-04 23:42:13 -0400
committerDavid S. Miller <davem@davemloft.net>2012-06-05 18:38:40 -0400
commitbc4598bc076fcafa662c82b8ad3ace2d1b5fbdc1 (patch)
tree818d10f8170d4167218b45654551270ef4581b99 /drivers/net/ethernet/freescale
parent0977f817df4d0f629952b4c31d650640188b4e45 (diff)
gianfar: various coding style and whitespace cleanups
Signed-off-by: Jan Ceuleers <jan.ceuleers@computer.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c298
1 files changed, 154 insertions, 144 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 314456000335..ed0b1369a5d9 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -113,7 +113,7 @@ static void gfar_timeout(struct net_device *dev);
113static int gfar_close(struct net_device *dev); 113static int gfar_close(struct net_device *dev);
114struct sk_buff *gfar_new_skb(struct net_device *dev); 114struct sk_buff *gfar_new_skb(struct net_device *dev);
115static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 115static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
116 struct sk_buff *skb); 116 struct sk_buff *skb);
117static int gfar_set_mac_address(struct net_device *dev); 117static int gfar_set_mac_address(struct net_device *dev);
118static int gfar_change_mtu(struct net_device *dev, int new_mtu); 118static int gfar_change_mtu(struct net_device *dev, int new_mtu);
119static irqreturn_t gfar_error(int irq, void *dev_id); 119static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -265,8 +265,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
265 tx_queue->tx_bd_dma_base = addr; 265 tx_queue->tx_bd_dma_base = addr;
266 tx_queue->dev = ndev; 266 tx_queue->dev = ndev;
267 /* enet DMA only understands physical addresses */ 267 /* enet DMA only understands physical addresses */
268 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 268 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
269 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 269 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
270 } 270 }
271 271
272 /* Start the rx descriptor ring where the tx ring leaves off */ 272 /* Start the rx descriptor ring where the tx ring leaves off */
@@ -275,15 +275,16 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
275 rx_queue->rx_bd_base = vaddr; 275 rx_queue->rx_bd_base = vaddr;
276 rx_queue->rx_bd_dma_base = addr; 276 rx_queue->rx_bd_dma_base = addr;
277 rx_queue->dev = ndev; 277 rx_queue->dev = ndev;
278 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 278 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
279 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 279 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
280 } 280 }
281 281
282 /* Setup the skbuff rings */ 282 /* Setup the skbuff rings */
283 for (i = 0; i < priv->num_tx_queues; i++) { 283 for (i = 0; i < priv->num_tx_queues; i++) {
284 tx_queue = priv->tx_queue[i]; 284 tx_queue = priv->tx_queue[i];
285 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * 285 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
286 tx_queue->tx_ring_size, GFP_KERNEL); 286 tx_queue->tx_ring_size,
287 GFP_KERNEL);
287 if (!tx_queue->tx_skbuff) { 288 if (!tx_queue->tx_skbuff) {
288 netif_err(priv, ifup, ndev, 289 netif_err(priv, ifup, ndev,
289 "Could not allocate tx_skbuff\n"); 290 "Could not allocate tx_skbuff\n");
@@ -297,7 +298,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
297 for (i = 0; i < priv->num_rx_queues; i++) { 298 for (i = 0; i < priv->num_rx_queues; i++) {
298 rx_queue = priv->rx_queue[i]; 299 rx_queue = priv->rx_queue[i];
299 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * 300 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
300 rx_queue->rx_ring_size, GFP_KERNEL); 301 rx_queue->rx_ring_size,
302 GFP_KERNEL);
301 303
302 if (!rx_queue->rx_skbuff) { 304 if (!rx_queue->rx_skbuff) {
303 netif_err(priv, ifup, ndev, 305 netif_err(priv, ifup, ndev,
@@ -326,15 +328,15 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
326 int i; 328 int i;
327 329
328 baddr = &regs->tbase0; 330 baddr = &regs->tbase0;
329 for(i = 0; i < priv->num_tx_queues; i++) { 331 for (i = 0; i < priv->num_tx_queues; i++) {
330 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 332 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
331 baddr += 2; 333 baddr += 2;
332 } 334 }
333 335
334 baddr = &regs->rbase0; 336 baddr = &regs->rbase0;
335 for(i = 0; i < priv->num_rx_queues; i++) { 337 for (i = 0; i < priv->num_rx_queues; i++) {
336 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 338 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
337 baddr += 2; 339 baddr += 2;
338 } 340 }
339} 341}
340 342
@@ -430,12 +432,12 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
430 432
431 for (i = 0; i < priv->num_rx_queues; i++) { 433 for (i = 0; i < priv->num_rx_queues; i++) {
432 rx_packets += priv->rx_queue[i]->stats.rx_packets; 434 rx_packets += priv->rx_queue[i]->stats.rx_packets;
433 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 435 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
434 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 436 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
435 } 437 }
436 438
437 dev->stats.rx_packets = rx_packets; 439 dev->stats.rx_packets = rx_packets;
438 dev->stats.rx_bytes = rx_bytes; 440 dev->stats.rx_bytes = rx_bytes;
439 dev->stats.rx_dropped = rx_dropped; 441 dev->stats.rx_dropped = rx_dropped;
440 442
441 for (i = 0; i < priv->num_tx_queues; i++) { 443 for (i = 0; i < priv->num_tx_queues; i++) {
@@ -443,7 +445,7 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
443 tx_packets += priv->tx_queue[i]->stats.tx_packets; 445 tx_packets += priv->tx_queue[i]->stats.tx_packets;
444 } 446 }
445 447
446 dev->stats.tx_bytes = tx_bytes; 448 dev->stats.tx_bytes = tx_bytes;
447 dev->stats.tx_packets = tx_packets; 449 dev->stats.tx_packets = tx_packets;
448 450
449 return &dev->stats; 451 return &dev->stats;
@@ -508,8 +510,8 @@ static bool gfar_is_vlan_on(struct gfar_private *priv)
508static inline int gfar_uses_fcb(struct gfar_private *priv) 510static inline int gfar_uses_fcb(struct gfar_private *priv)
509{ 511{
510 return gfar_is_vlan_on(priv) || 512 return gfar_is_vlan_on(priv) ||
511 (priv->ndev->features & NETIF_F_RXCSUM) || 513 (priv->ndev->features & NETIF_F_RXCSUM) ||
512 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); 514 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
513} 515}
514 516
515static void free_tx_pointers(struct gfar_private *priv) 517static void free_tx_pointers(struct gfar_private *priv)
@@ -554,7 +556,7 @@ static void enable_napi(struct gfar_private *priv)
554} 556}
555 557
556static int gfar_parse_group(struct device_node *np, 558static int gfar_parse_group(struct device_node *np,
557 struct gfar_private *priv, const char *model) 559 struct gfar_private *priv, const char *model)
558{ 560{
559 u32 *queue_mask; 561 u32 *queue_mask;
560 562
@@ -580,15 +582,13 @@ static int gfar_parse_group(struct device_node *np,
580 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 582 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
581 priv->gfargrp[priv->num_grps].priv = priv; 583 priv->gfargrp[priv->num_grps].priv = priv;
582 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); 584 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
583 if(priv->mode == MQ_MG_MODE) { 585 if (priv->mode == MQ_MG_MODE) {
584 queue_mask = (u32 *)of_get_property(np, 586 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
585 "fsl,rx-bit-map", NULL); 587 priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
586 priv->gfargrp[priv->num_grps].rx_bit_map = 588 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
587 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); 589 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
588 queue_mask = (u32 *)of_get_property(np, 590 priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
589 "fsl,tx-bit-map", NULL); 591 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
590 priv->gfargrp[priv->num_grps].tx_bit_map =
591 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
592 } else { 592 } else {
593 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; 593 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
594 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; 594 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
@@ -673,7 +673,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
673 } else { 673 } else {
674 priv->mode = SQ_SG_MODE; 674 priv->mode = SQ_SG_MODE;
675 err = gfar_parse_group(np, priv, model); 675 err = gfar_parse_group(np, priv, model);
676 if(err) 676 if (err)
677 goto err_grp_init; 677 goto err_grp_init;
678 } 678 }
679 679
@@ -730,27 +730,27 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
731 731
732 mac_addr = of_get_mac_address(np); 732 mac_addr = of_get_mac_address(np);
733
733 if (mac_addr) 734 if (mac_addr)
734 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 735 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
735 736
736 if (model && !strcasecmp(model, "TSEC")) 737 if (model && !strcasecmp(model, "TSEC"))
737 priv->device_flags = 738 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
738 FSL_GIANFAR_DEV_HAS_GIGABIT | 739 FSL_GIANFAR_DEV_HAS_COALESCE |
739 FSL_GIANFAR_DEV_HAS_COALESCE | 740 FSL_GIANFAR_DEV_HAS_RMON |
740 FSL_GIANFAR_DEV_HAS_RMON | 741 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
741 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 742
742 if (model && !strcasecmp(model, "eTSEC")) 743 if (model && !strcasecmp(model, "eTSEC"))
743 priv->device_flags = 744 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
744 FSL_GIANFAR_DEV_HAS_GIGABIT | 745 FSL_GIANFAR_DEV_HAS_COALESCE |
745 FSL_GIANFAR_DEV_HAS_COALESCE | 746 FSL_GIANFAR_DEV_HAS_RMON |
746 FSL_GIANFAR_DEV_HAS_RMON | 747 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
747 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 748 FSL_GIANFAR_DEV_HAS_PADDING |
748 FSL_GIANFAR_DEV_HAS_PADDING | 749 FSL_GIANFAR_DEV_HAS_CSUM |
749 FSL_GIANFAR_DEV_HAS_CSUM | 750 FSL_GIANFAR_DEV_HAS_VLAN |
750 FSL_GIANFAR_DEV_HAS_VLAN | 751 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
751 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 752 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
752 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 753 FSL_GIANFAR_DEV_HAS_TIMER;
753 FSL_GIANFAR_DEV_HAS_TIMER;
754 754
755 ctype = of_get_property(np, "phy-connection-type", NULL); 755 ctype = of_get_property(np, "phy-connection-type", NULL);
756 756
@@ -781,7 +781,7 @@ err_grp_init:
781} 781}
782 782
783static int gfar_hwtstamp_ioctl(struct net_device *netdev, 783static int gfar_hwtstamp_ioctl(struct net_device *netdev,
784 struct ifreq *ifr, int cmd) 784 struct ifreq *ifr, int cmd)
785{ 785{
786 struct hwtstamp_config config; 786 struct hwtstamp_config config;
787 struct gfar_private *priv = netdev_priv(netdev); 787 struct gfar_private *priv = netdev_priv(netdev);
@@ -851,6 +851,7 @@ static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
851{ 851{
852 unsigned int new_bit_map = 0x0; 852 unsigned int new_bit_map = 0x0;
853 int mask = 0x1 << (max_qs - 1), i; 853 int mask = 0x1 << (max_qs - 1), i;
854
854 for (i = 0; i < max_qs; i++) { 855 for (i = 0; i < max_qs; i++) {
855 if (bit_map & mask) 856 if (bit_map & mask)
856 new_bit_map = new_bit_map + (1 << i); 857 new_bit_map = new_bit_map + (1 << i);
@@ -936,22 +937,22 @@ static void gfar_detect_errata(struct gfar_private *priv)
936 937
937 /* MPC8313 Rev 2.0 and higher; All MPC837x */ 938 /* MPC8313 Rev 2.0 and higher; All MPC837x */
938 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || 939 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
939 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 940 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
940 priv->errata |= GFAR_ERRATA_74; 941 priv->errata |= GFAR_ERRATA_74;
941 942
942 /* MPC8313 and MPC837x all rev */ 943 /* MPC8313 and MPC837x all rev */
943 if ((pvr == 0x80850010 && mod == 0x80b0) || 944 if ((pvr == 0x80850010 && mod == 0x80b0) ||
944 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 945 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
945 priv->errata |= GFAR_ERRATA_76; 946 priv->errata |= GFAR_ERRATA_76;
946 947
947 /* MPC8313 and MPC837x all rev */ 948 /* MPC8313 and MPC837x all rev */
948 if ((pvr == 0x80850010 && mod == 0x80b0) || 949 if ((pvr == 0x80850010 && mod == 0x80b0) ||
949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 950 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
950 priv->errata |= GFAR_ERRATA_A002; 951 priv->errata |= GFAR_ERRATA_A002;
951 952
952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ 953 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || 954 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
954 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) 955 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
955 priv->errata |= GFAR_ERRATA_12; 956 priv->errata |= GFAR_ERRATA_12;
956 957
957 if (priv->errata) 958 if (priv->errata)
@@ -1028,13 +1029,14 @@ static int gfar_probe(struct platform_device *ofdev)
1028 1029
1029 /* Register for napi ...We are registering NAPI for each grp */ 1030 /* Register for napi ...We are registering NAPI for each grp */
1030 for (i = 0; i < priv->num_grps; i++) 1031 for (i = 0; i < priv->num_grps; i++)
1031 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); 1032 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1033 GFAR_DEV_WEIGHT);
1032 1034
1033 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1035 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1034 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1036 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1035 NETIF_F_RXCSUM; 1037 NETIF_F_RXCSUM;
1036 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1038 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1037 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1039 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1038 } 1040 }
1039 1041
1040 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
@@ -1083,7 +1085,7 @@ static int gfar_probe(struct platform_device *ofdev)
1083 priv->padding = 0; 1085 priv->padding = 0;
1084 1086
1085 if (dev->features & NETIF_F_IP_CSUM || 1087 if (dev->features & NETIF_F_IP_CSUM ||
1086 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1088 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1087 dev->needed_headroom = GMAC_FCB_LEN; 1089 dev->needed_headroom = GMAC_FCB_LEN;
1088 1090
1089 /* Program the isrg regs only if number of grps > 1 */ 1091 /* Program the isrg regs only if number of grps > 1 */
@@ -1103,10 +1105,10 @@ static int gfar_probe(struct platform_device *ofdev)
1103 * basically reverses the queue numbers 1105 * basically reverses the queue numbers
1104 */ 1106 */
1105 for (i = 0; i< priv->num_grps; i++) { 1107 for (i = 0; i< priv->num_grps; i++) {
1106 priv->gfargrp[i].tx_bit_map = reverse_bitmap( 1108 priv->gfargrp[i].tx_bit_map =
1107 priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1109 reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1108 priv->gfargrp[i].rx_bit_map = reverse_bitmap( 1110 priv->gfargrp[i].rx_bit_map =
1109 priv->gfargrp[i].rx_bit_map, MAX_RX_QS); 1111 reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1110 } 1112 }
1111 1113
1112 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1114 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
@@ -1114,16 +1116,18 @@ static int gfar_probe(struct platform_device *ofdev)
1114 */ 1116 */
1115 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1117 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1116 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1118 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1119
1117 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1120 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1118 priv->num_rx_queues) { 1121 priv->num_rx_queues) {
1119 priv->gfargrp[grp_idx].num_rx_queues++; 1122 priv->gfargrp[grp_idx].num_rx_queues++;
1120 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1123 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1121 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1124 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1122 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1125 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1123 } 1126 }
1124 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1127 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1128
1125 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, 1129 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1126 priv->num_tx_queues) { 1130 priv->num_tx_queues) {
1127 priv->gfargrp[grp_idx].num_tx_queues++; 1131 priv->gfargrp[grp_idx].num_tx_queues++;
1128 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1132 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1129 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1133 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
@@ -1169,7 +1173,8 @@ static int gfar_probe(struct platform_device *ofdev)
1169 } 1173 }
1170 1174
1171 device_init_wakeup(&dev->dev, 1175 device_init_wakeup(&dev->dev,
1172 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1176 priv->device_flags &
1177 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1173 1178
1174 /* fill out IRQ number and name fields */ 1179 /* fill out IRQ number and name fields */
1175 for (i = 0; i < priv->num_grps; i++) { 1180 for (i = 0; i < priv->num_grps; i++) {
@@ -1200,7 +1205,7 @@ static int gfar_probe(struct platform_device *ofdev)
1200 for (i = 0; i < priv->num_rx_queues; i++) 1205 for (i = 0; i < priv->num_rx_queues; i++)
1201 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 1206 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1202 i, priv->rx_queue[i]->rx_ring_size); 1207 i, priv->rx_queue[i]->rx_ring_size);
1203 for(i = 0; i < priv->num_tx_queues; i++) 1208 for (i = 0; i < priv->num_tx_queues; i++)
1204 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 1209 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1205 i, priv->tx_queue[i]->tx_ring_size); 1210 i, priv->tx_queue[i]->tx_ring_size);
1206 1211
@@ -1247,7 +1252,8 @@ static int gfar_suspend(struct device *dev)
1247 u32 tempval; 1252 u32 tempval;
1248 1253
1249 int magic_packet = priv->wol_en && 1254 int magic_packet = priv->wol_en &&
1250 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1255 (priv->device_flags &
1256 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1251 1257
1252 netif_device_detach(ndev); 1258 netif_device_detach(ndev);
1253 1259
@@ -1299,7 +1305,8 @@ static int gfar_resume(struct device *dev)
1299 unsigned long flags; 1305 unsigned long flags;
1300 u32 tempval; 1306 u32 tempval;
1301 int magic_packet = priv->wol_en && 1307 int magic_packet = priv->wol_en &&
1302 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1308 (priv->device_flags &
1309 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1303 1310
1304 if (!netif_running(ndev)) { 1311 if (!netif_running(ndev)) {
1305 netif_device_attach(ndev); 1312 netif_device_attach(ndev);
@@ -1398,8 +1405,9 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
1398 } 1405 }
1399 1406
1400 if (ecntrl & ECNTRL_REDUCED_MODE) { 1407 if (ecntrl & ECNTRL_REDUCED_MODE) {
1401 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 1408 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1402 return PHY_INTERFACE_MODE_RMII; 1409 return PHY_INTERFACE_MODE_RMII;
1410 }
1403 else { 1411 else {
1404 phy_interface_t interface = priv->interface; 1412 phy_interface_t interface = priv->interface;
1405 1413
@@ -1494,11 +1502,12 @@ static void gfar_configure_serdes(struct net_device *dev)
1494 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1502 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1495 1503
1496 phy_write(tbiphy, MII_ADVERTISE, 1504 phy_write(tbiphy, MII_ADVERTISE,
1497 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1505 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1498 ADVERTISE_1000XPSE_ASYM); 1506 ADVERTISE_1000XPSE_ASYM);
1499 1507
1500 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | 1508 phy_write(tbiphy, MII_BMCR,
1501 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 1509 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1510 BMCR_SPEED1000);
1502} 1511}
1503 1512
1504static void init_registers(struct net_device *dev) 1513static void init_registers(struct net_device *dev)
@@ -1594,8 +1603,8 @@ static void gfar_halt_nodisable(struct net_device *dev)
1594 regs = priv->gfargrp[0].regs; 1603 regs = priv->gfargrp[0].regs;
1595 /* Stop the DMA, and wait for it to stop */ 1604 /* Stop the DMA, and wait for it to stop */
1596 tempval = gfar_read(&regs->dmactrl); 1605 tempval = gfar_read(&regs->dmactrl);
1597 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1606 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1598 != (DMACTRL_GRS | DMACTRL_GTS)) { 1607 (DMACTRL_GRS | DMACTRL_GTS)) {
1599 int ret; 1608 int ret;
1600 1609
1601 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1610 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
@@ -1660,7 +1669,7 @@ void stop_gfar(struct net_device *dev)
1660 } else { 1669 } else {
1661 for (i = 0; i < priv->num_grps; i++) 1670 for (i = 0; i < priv->num_grps; i++)
1662 free_irq(priv->gfargrp[i].interruptTransmit, 1671 free_irq(priv->gfargrp[i].interruptTransmit,
1663 &priv->gfargrp[i]); 1672 &priv->gfargrp[i]);
1664 } 1673 }
1665 1674
1666 free_skb_resources(priv); 1675 free_skb_resources(priv);
@@ -1679,13 +1688,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1679 continue; 1688 continue;
1680 1689
1681 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1690 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1682 txbdp->length, DMA_TO_DEVICE); 1691 txbdp->length, DMA_TO_DEVICE);
1683 txbdp->lstatus = 0; 1692 txbdp->lstatus = 0;
1684 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1693 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1685 j++) { 1694 j++) {
1686 txbdp++; 1695 txbdp++;
1687 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1696 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1688 txbdp->length, DMA_TO_DEVICE); 1697 txbdp->length, DMA_TO_DEVICE);
1689 } 1698 }
1690 txbdp++; 1699 txbdp++;
1691 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1700 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@ -1705,8 +1714,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1705 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1714 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1706 if (rx_queue->rx_skbuff[i]) { 1715 if (rx_queue->rx_skbuff[i]) {
1707 dma_unmap_single(&priv->ofdev->dev, 1716 dma_unmap_single(&priv->ofdev->dev,
1708 rxbdp->bufPtr, priv->rx_buffer_size, 1717 rxbdp->bufPtr, priv->rx_buffer_size,
1709 DMA_FROM_DEVICE); 1718 DMA_FROM_DEVICE);
1710 dev_kfree_skb_any(rx_queue->rx_skbuff[i]); 1719 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1711 rx_queue->rx_skbuff[i] = NULL; 1720 rx_queue->rx_skbuff[i] = NULL;
1712 } 1721 }
@@ -1729,24 +1738,25 @@ static void free_skb_resources(struct gfar_private *priv)
1729 /* Go through all the buffer descriptors and free their data buffers */ 1738 /* Go through all the buffer descriptors and free their data buffers */
1730 for (i = 0; i < priv->num_tx_queues; i++) { 1739 for (i = 0; i < priv->num_tx_queues; i++) {
1731 struct netdev_queue *txq; 1740 struct netdev_queue *txq;
1741
1732 tx_queue = priv->tx_queue[i]; 1742 tx_queue = priv->tx_queue[i];
1733 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); 1743 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1734 if(tx_queue->tx_skbuff) 1744 if (tx_queue->tx_skbuff)
1735 free_skb_tx_queue(tx_queue); 1745 free_skb_tx_queue(tx_queue);
1736 netdev_tx_reset_queue(txq); 1746 netdev_tx_reset_queue(txq);
1737 } 1747 }
1738 1748
1739 for (i = 0; i < priv->num_rx_queues; i++) { 1749 for (i = 0; i < priv->num_rx_queues; i++) {
1740 rx_queue = priv->rx_queue[i]; 1750 rx_queue = priv->rx_queue[i];
1741 if(rx_queue->rx_skbuff) 1751 if (rx_queue->rx_skbuff)
1742 free_skb_rx_queue(rx_queue); 1752 free_skb_rx_queue(rx_queue);
1743 } 1753 }
1744 1754
1745 dma_free_coherent(&priv->ofdev->dev, 1755 dma_free_coherent(&priv->ofdev->dev,
1746 sizeof(struct txbd8) * priv->total_tx_ring_size + 1756 sizeof(struct txbd8) * priv->total_tx_ring_size +
1747 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1757 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1748 priv->tx_queue[0]->tx_bd_base, 1758 priv->tx_queue[0]->tx_bd_base,
1749 priv->tx_queue[0]->tx_bd_dma_base); 1759 priv->tx_queue[0]->tx_bd_dma_base);
1750 skb_queue_purge(&priv->rx_recycle); 1760 skb_queue_purge(&priv->rx_recycle);
1751} 1761}
1752 1762
@@ -1785,7 +1795,7 @@ void gfar_start(struct net_device *dev)
1785} 1795}
1786 1796
1787void gfar_configure_coalescing(struct gfar_private *priv, 1797void gfar_configure_coalescing(struct gfar_private *priv,
1788 unsigned long tx_mask, unsigned long rx_mask) 1798 unsigned long tx_mask, unsigned long rx_mask)
1789{ 1799{
1790 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1800 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1791 u32 __iomem *baddr; 1801 u32 __iomem *baddr;
@@ -1795,11 +1805,11 @@ void gfar_configure_coalescing(struct gfar_private *priv,
1795 * multiple queues, there's only single reg to program 1805 * multiple queues, there's only single reg to program
1796 */ 1806 */
1797 gfar_write(&regs->txic, 0); 1807 gfar_write(&regs->txic, 0);
1798 if(likely(priv->tx_queue[0]->txcoalescing)) 1808 if (likely(priv->tx_queue[0]->txcoalescing))
1799 gfar_write(&regs->txic, priv->tx_queue[0]->txic); 1809 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1800 1810
1801 gfar_write(&regs->rxic, 0); 1811 gfar_write(&regs->rxic, 0);
1802 if(unlikely(priv->rx_queue[0]->rxcoalescing)) 1812 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1803 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic); 1813 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1804 1814
1805 if (priv->mode == MQ_MG_MODE) { 1815 if (priv->mode == MQ_MG_MODE) {
@@ -1834,8 +1844,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1834 /* Install our interrupt handlers for Error, 1844 /* Install our interrupt handlers for Error,
1835 * Transmit, and Receive 1845 * Transmit, and Receive
1836 */ 1846 */
1837 if ((err = request_irq(grp->interruptError, gfar_error, 0, 1847 if ((err = request_irq(grp->interruptError, gfar_error,
1838 grp->int_name_er,grp)) < 0) { 1848 0, grp->int_name_er, grp)) < 0) {
1839 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1849 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1840 grp->interruptError); 1850 grp->interruptError);
1841 1851
@@ -1843,21 +1853,21 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1843 } 1853 }
1844 1854
1845 if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 1855 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1846 0, grp->int_name_tx, grp)) < 0) { 1856 0, grp->int_name_tx, grp)) < 0) {
1847 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1857 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1848 grp->interruptTransmit); 1858 grp->interruptTransmit);
1849 goto tx_irq_fail; 1859 goto tx_irq_fail;
1850 } 1860 }
1851 1861
1852 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, 1862 if ((err = request_irq(grp->interruptReceive, gfar_receive,
1853 grp->int_name_rx, grp)) < 0) { 1863 0, grp->int_name_rx, grp)) < 0) {
1854 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1864 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1855 grp->interruptReceive); 1865 grp->interruptReceive);
1856 goto rx_irq_fail; 1866 goto rx_irq_fail;
1857 } 1867 }
1858 } else { 1868 } else {
1859 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, 1869 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
1860 grp->int_name_tx, grp)) < 0) { 1870 0, grp->int_name_tx, grp)) < 0) {
1861 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1871 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1862 grp->interruptTransmit); 1872 grp->interruptTransmit);
1863 goto err_irq_fail; 1873 goto err_irq_fail;
@@ -1964,7 +1974,7 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1964} 1974}
1965 1975
1966static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, 1976static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1967 int fcb_length) 1977 int fcb_length)
1968{ 1978{
1969 u8 flags = 0; 1979 u8 flags = 0;
1970 1980
@@ -2001,7 +2011,7 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2001} 2011}
2002 2012
2003static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 2013static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2004 struct txbd8 *base, int ring_size) 2014 struct txbd8 *base, int ring_size)
2005{ 2015{
2006 struct txbd8 *new_bd = bdp + stride; 2016 struct txbd8 *new_bd = bdp + stride;
2007 2017
@@ -2009,7 +2019,7 @@ static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2009} 2019}
2010 2020
2011static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 2021static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2012 int ring_size) 2022 int ring_size)
2013{ 2023{
2014 return skip_txbd(bdp, 1, base, ring_size); 2024 return skip_txbd(bdp, 1, base, ring_size);
2015} 2025}
@@ -2035,8 +2045,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2035 * before start of transmission. 2045 * before start of transmission.
2036 */ 2046 */
2037 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && 2047 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2038 skb->ip_summed == CHECKSUM_PARTIAL && 2048 skb->ip_summed == CHECKSUM_PARTIAL &&
2039 skb->len > 2500)) { 2049 skb->len > 2500)) {
2040 int ret; 2050 int ret;
2041 2051
2042 ret = skb_checksum_help(skb); 2052 ret = skb_checksum_help(skb);
@@ -2052,16 +2062,16 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2052 2062
2053 /* check if time stamp should be generated */ 2063 /* check if time stamp should be generated */
2054 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 2064 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2055 priv->hwts_tx_en)) { 2065 priv->hwts_tx_en)) {
2056 do_tstamp = 1; 2066 do_tstamp = 1;
2057 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2067 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2058 } 2068 }
2059 2069
2060 /* make space for additional header when fcb is needed */ 2070 /* make space for additional header when fcb is needed */
2061 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2071 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2062 vlan_tx_tag_present(skb) || 2072 vlan_tx_tag_present(skb) ||
2063 unlikely(do_tstamp)) && 2073 unlikely(do_tstamp)) &&
2064 (skb_headroom(skb) < fcb_length)) { 2074 (skb_headroom(skb) < fcb_length)) {
2065 struct sk_buff *skb_new; 2075 struct sk_buff *skb_new;
2066 2076
2067 skb_new = skb_realloc_headroom(skb, fcb_length); 2077 skb_new = skb_realloc_headroom(skb, fcb_length);
@@ -2105,12 +2115,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2105 /* Time stamp insertion requires one additional TxBD */ 2115 /* Time stamp insertion requires one additional TxBD */
2106 if (unlikely(do_tstamp)) 2116 if (unlikely(do_tstamp))
2107 txbdp_tstamp = txbdp = next_txbd(txbdp, base, 2117 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2108 tx_queue->tx_ring_size); 2118 tx_queue->tx_ring_size);
2109 2119
2110 if (nr_frags == 0) { 2120 if (nr_frags == 0) {
2111 if (unlikely(do_tstamp)) 2121 if (unlikely(do_tstamp))
2112 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | 2122 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2113 TXBD_INTERRUPT); 2123 TXBD_INTERRUPT);
2114 else 2124 else
2115 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2125 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2116 } else { 2126 } else {
@@ -2122,7 +2132,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2122 length = skb_shinfo(skb)->frags[i].size; 2132 length = skb_shinfo(skb)->frags[i].size;
2123 2133
2124 lstatus = txbdp->lstatus | length | 2134 lstatus = txbdp->lstatus | length |
2125 BD_LFLAG(TXBD_READY); 2135 BD_LFLAG(TXBD_READY);
2126 2136
2127 /* Handle the last BD specially */ 2137 /* Handle the last BD specially */
2128 if (i == nr_frags - 1) 2138 if (i == nr_frags - 1)
@@ -2152,8 +2162,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2152 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2162 if (CHECKSUM_PARTIAL == skb->ip_summed) {
2153 fcb = gfar_add_fcb(skb); 2163 fcb = gfar_add_fcb(skb);
2154 /* as specified by errata */ 2164 /* as specified by errata */
2155 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) 2165 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
2156 && ((unsigned long)fcb % 0x20) > 0x18)) { 2166 ((unsigned long)fcb % 0x20) > 0x18)) {
2157 __skb_pull(skb, GMAC_FCB_LEN); 2167 __skb_pull(skb, GMAC_FCB_LEN);
2158 skb_checksum_help(skb); 2168 skb_checksum_help(skb);
2159 } else { 2169 } else {
@@ -2181,7 +2191,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2181 } 2191 }
2182 2192
2183 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2193 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2184 skb_headlen(skb), DMA_TO_DEVICE); 2194 skb_headlen(skb), DMA_TO_DEVICE);
2185 2195
2186 /* If time stamping is requested one additional TxBD must be set up. The 2196 /* If time stamping is requested one additional TxBD must be set up. The
2187 * first TxBD points to the FCB and must have a data length of 2197 * first TxBD points to the FCB and must have a data length of
@@ -2191,7 +2201,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2191 if (unlikely(do_tstamp)) { 2201 if (unlikely(do_tstamp)) {
2192 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length; 2202 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
2193 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | 2203 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2194 (skb_headlen(skb) - fcb_length); 2204 (skb_headlen(skb) - fcb_length);
2195 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 2205 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2196 } else { 2206 } else {
2197 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2207 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2231,7 +2241,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2231 * (wrapping if necessary) 2241 * (wrapping if necessary)
2232 */ 2242 */
2233 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 2243 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2234 TX_RING_MOD_MASK(tx_queue->tx_ring_size); 2244 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2235 2245
2236 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2246 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2237 2247
@@ -2365,9 +2375,8 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2365 2375
2366 frame_size += priv->padding; 2376 frame_size += priv->padding;
2367 2377
2368 tempsize = 2378 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2369 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 2379 INCREMENTAL_BUFFER_SIZE;
2370 INCREMENTAL_BUFFER_SIZE;
2371 2380
2372 /* Only stop and start the controller if it isn't already 2381 /* Only stop and start the controller if it isn't already
2373 * stopped, and we changed something 2382 * stopped, and we changed something
@@ -2389,7 +2398,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2389 tempval = gfar_read(&regs->maccfg2); 2398 tempval = gfar_read(&regs->maccfg2);
2390 2399
2391 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 2400 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2392 gfar_has_errata(priv, GFAR_ERRATA_74)) 2401 gfar_has_errata(priv, GFAR_ERRATA_74))
2393 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2402 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2394 else 2403 else
2395 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2404 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@ -2410,7 +2419,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2410static void gfar_reset_task(struct work_struct *work) 2419static void gfar_reset_task(struct work_struct *work)
2411{ 2420{
2412 struct gfar_private *priv = container_of(work, struct gfar_private, 2421 struct gfar_private *priv = container_of(work, struct gfar_private,
2413 reset_task); 2422 reset_task);
2414 struct net_device *dev = priv->ndev; 2423 struct net_device *dev = priv->ndev;
2415 2424
2416 if (dev->flags & IFF_UP) { 2425 if (dev->flags & IFF_UP) {
@@ -2437,7 +2446,7 @@ static void gfar_align_skb(struct sk_buff *skb)
2437 * as many bytes as needed to align the data properly 2446 * as many bytes as needed to align the data properly
2438 */ 2447 */
2439 skb_reserve(skb, RXBUF_ALIGNMENT - 2448 skb_reserve(skb, RXBUF_ALIGNMENT -
2440 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); 2449 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2441} 2450}
2442 2451
2443/* Interrupt Handler for Transmit complete */ 2452/* Interrupt Handler for Transmit complete */
@@ -2485,7 +2494,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2485 2494
2486 /* Only clean completed frames */ 2495 /* Only clean completed frames */
2487 if ((lstatus & BD_LFLAG(TXBD_READY)) && 2496 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2488 (lstatus & BD_LENGTH_MASK)) 2497 (lstatus & BD_LENGTH_MASK))
2489 break; 2498 break;
2490 2499
2491 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2500 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2495,11 +2504,12 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2495 buflen = bdp->length; 2504 buflen = bdp->length;
2496 2505
2497 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2506 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2498 buflen, DMA_TO_DEVICE); 2507 buflen, DMA_TO_DEVICE);
2499 2508
2500 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2509 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2501 struct skb_shared_hwtstamps shhwtstamps; 2510 struct skb_shared_hwtstamps shhwtstamps;
2502 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); 2511 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2512
2503 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2513 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2504 shhwtstamps.hwtstamp = ns_to_ktime(*ns); 2514 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2505 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); 2515 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
@@ -2512,10 +2522,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2512 bdp = next_txbd(bdp, base, tx_ring_size); 2522 bdp = next_txbd(bdp, base, tx_ring_size);
2513 2523
2514 for (i = 0; i < frags; i++) { 2524 for (i = 0; i < frags; i++) {
2515 dma_unmap_page(&priv->ofdev->dev, 2525 dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
2516 bdp->bufPtr, 2526 bdp->length, DMA_TO_DEVICE);
2517 bdp->length,
2518 DMA_TO_DEVICE);
2519 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2527 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2520 bdp = next_txbd(bdp, base, tx_ring_size); 2528 bdp = next_txbd(bdp, base, tx_ring_size);
2521 } 2529 }
@@ -2526,8 +2534,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2526 * we add this skb back into the pool, if it's the right size 2534 * we add this skb back into the pool, if it's the right size
2527 */ 2535 */
2528 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && 2536 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2529 skb_recycle_check(skb, priv->rx_buffer_size + 2537 skb_recycle_check(skb, priv->rx_buffer_size +
2530 RXBUF_ALIGNMENT)) { 2538 RXBUF_ALIGNMENT)) {
2531 gfar_align_skb(skb); 2539 gfar_align_skb(skb);
2532 skb_queue_head(&priv->rx_recycle, skb); 2540 skb_queue_head(&priv->rx_recycle, skb);
2533 } else 2541 } else
@@ -2536,7 +2544,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2536 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2544 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2537 2545
2538 skb_dirtytx = (skb_dirtytx + 1) & 2546 skb_dirtytx = (skb_dirtytx + 1) &
2539 TX_RING_MOD_MASK(tx_ring_size); 2547 TX_RING_MOD_MASK(tx_ring_size);
2540 2548
2541 howmany++; 2549 howmany++;
2542 spin_lock_irqsave(&tx_queue->txlock, flags); 2550 spin_lock_irqsave(&tx_queue->txlock, flags);
@@ -2583,7 +2591,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
2583} 2591}
2584 2592
2585static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2593static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2586 struct sk_buff *skb) 2594 struct sk_buff *skb)
2587{ 2595{
2588 struct net_device *dev = rx_queue->dev; 2596 struct net_device *dev = rx_queue->dev;
2589 struct gfar_private *priv = netdev_priv(dev); 2597 struct gfar_private *priv = netdev_priv(dev);
@@ -2700,6 +2708,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2700 if (priv->hwts_rx_en) { 2708 if (priv->hwts_rx_en) {
2701 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 2709 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2702 u64 *ns = (u64 *) skb->data; 2710 u64 *ns = (u64 *) skb->data;
2711
2703 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 2712 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2704 shhwtstamps->hwtstamp = ns_to_ktime(*ns); 2713 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2705 } 2714 }
@@ -2752,6 +2761,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2752 2761
2753 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2762 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2754 struct sk_buff *newskb; 2763 struct sk_buff *newskb;
2764
2755 rmb(); 2765 rmb();
2756 2766
2757 /* Add another skb for the future */ 2767 /* Add another skb for the future */
@@ -2760,15 +2770,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2760 skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; 2770 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2761 2771
2762 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2772 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2763 priv->rx_buffer_size, DMA_FROM_DEVICE); 2773 priv->rx_buffer_size, DMA_FROM_DEVICE);
2764 2774
2765 if (unlikely(!(bdp->status & RXBD_ERR) && 2775 if (unlikely(!(bdp->status & RXBD_ERR) &&
2766 bdp->length > priv->rx_buffer_size)) 2776 bdp->length > priv->rx_buffer_size))
2767 bdp->status = RXBD_LARGE; 2777 bdp->status = RXBD_LARGE;
2768 2778
2769 /* We drop the frame if we failed to allocate a new buffer */ 2779 /* We drop the frame if we failed to allocate a new buffer */
2770 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 2780 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2771 bdp->status & RXBD_ERR)) { 2781 bdp->status & RXBD_ERR)) {
2772 count_errors(bdp->status, dev); 2782 count_errors(bdp->status, dev);
2773 2783
2774 if (unlikely(!newskb)) 2784 if (unlikely(!newskb))
@@ -2787,7 +2797,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2787 rx_queue->stats.rx_bytes += pkt_len; 2797 rx_queue->stats.rx_bytes += pkt_len;
2788 skb_record_rx_queue(skb, rx_queue->qindex); 2798 skb_record_rx_queue(skb, rx_queue->qindex);
2789 gfar_process_frame(dev, skb, amount_pull, 2799 gfar_process_frame(dev, skb, amount_pull,
2790 &rx_queue->grp->napi); 2800 &rx_queue->grp->napi);
2791 2801
2792 } else { 2802 } else {
2793 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2803 netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2806,9 +2816,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2806 bdp = next_bd(bdp, base, rx_queue->rx_ring_size); 2816 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2807 2817
2808 /* update to point at the next skb */ 2818 /* update to point at the next skb */
2809 rx_queue->skb_currx = 2819 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2810 (rx_queue->skb_currx + 1) & 2820 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2811 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2812 } 2821 }
2813 2822
2814 /* Update the current rxbd pointer to be the next one */ 2823 /* Update the current rxbd pointer to be the next one */
@@ -2819,8 +2828,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2819 2828
2820static int gfar_poll(struct napi_struct *napi, int budget) 2829static int gfar_poll(struct napi_struct *napi, int budget)
2821{ 2830{
2822 struct gfar_priv_grp *gfargrp = container_of(napi, 2831 struct gfar_priv_grp *gfargrp =
2823 struct gfar_priv_grp, napi); 2832 container_of(napi, struct gfar_priv_grp, napi);
2824 struct gfar_private *priv = gfargrp->priv; 2833 struct gfar_private *priv = gfargrp->priv;
2825 struct gfar __iomem *regs = gfargrp->regs; 2834 struct gfar __iomem *regs = gfargrp->regs;
2826 struct gfar_priv_tx_q *tx_queue = NULL; 2835 struct gfar_priv_tx_q *tx_queue = NULL;
@@ -2839,7 +2848,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2839 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2848 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2840 2849
2841 while (num_queues && left_over_budget) { 2850 while (num_queues && left_over_budget) {
2842
2843 budget_per_queue = left_over_budget/num_queues; 2851 budget_per_queue = left_over_budget/num_queues;
2844 left_over_budget = 0; 2852 left_over_budget = 0;
2845 2853
@@ -2850,12 +2858,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2850 tx_queue = priv->tx_queue[rx_queue->qindex]; 2858 tx_queue = priv->tx_queue[rx_queue->qindex];
2851 2859
2852 tx_cleaned += gfar_clean_tx_ring(tx_queue); 2860 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2853 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, 2861 rx_cleaned_per_queue =
2854 budget_per_queue); 2862 gfar_clean_rx_ring(rx_queue, budget_per_queue);
2855 rx_cleaned += rx_cleaned_per_queue; 2863 rx_cleaned += rx_cleaned_per_queue;
2856 if(rx_cleaned_per_queue < budget_per_queue) { 2864 if (rx_cleaned_per_queue < budget_per_queue) {
2857 left_over_budget = left_over_budget + 2865 left_over_budget = left_over_budget +
2858 (budget_per_queue - rx_cleaned_per_queue); 2866 (budget_per_queue -
2867 rx_cleaned_per_queue);
2859 set_bit(i, &serviced_queues); 2868 set_bit(i, &serviced_queues);
2860 num_queues--; 2869 num_queues--;
2861 } 2870 }
@@ -2876,8 +2885,8 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2876 /* If we are coalescing interrupts, update the timer 2885 /* If we are coalescing interrupts, update the timer
2877 * Otherwise, clear it 2886 * Otherwise, clear it
2878 */ 2887 */
2879 gfar_configure_coalescing(priv, 2888 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2880 gfargrp->rx_bit_map, gfargrp->tx_bit_map); 2889 gfargrp->tx_bit_map);
2881 } 2890 }
2882 2891
2883 return rx_cleaned; 2892 return rx_cleaned;
@@ -2900,7 +2909,7 @@ static void gfar_netpoll(struct net_device *dev)
2900 disable_irq(priv->gfargrp[i].interruptReceive); 2909 disable_irq(priv->gfargrp[i].interruptReceive);
2901 disable_irq(priv->gfargrp[i].interruptError); 2910 disable_irq(priv->gfargrp[i].interruptError);
2902 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2911 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2903 &priv->gfargrp[i]); 2912 &priv->gfargrp[i]);
2904 enable_irq(priv->gfargrp[i].interruptError); 2913 enable_irq(priv->gfargrp[i].interruptError);
2905 enable_irq(priv->gfargrp[i].interruptReceive); 2914 enable_irq(priv->gfargrp[i].interruptReceive);
2906 enable_irq(priv->gfargrp[i].interruptTransmit); 2915 enable_irq(priv->gfargrp[i].interruptTransmit);
@@ -2909,7 +2918,7 @@ static void gfar_netpoll(struct net_device *dev)
2909 for (i = 0; i < priv->num_grps; i++) { 2918 for (i = 0; i < priv->num_grps; i++) {
2910 disable_irq(priv->gfargrp[i].interruptTransmit); 2919 disable_irq(priv->gfargrp[i].interruptTransmit);
2911 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2920 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2912 &priv->gfargrp[i]); 2921 &priv->gfargrp[i]);
2913 enable_irq(priv->gfargrp[i].interruptTransmit); 2922 enable_irq(priv->gfargrp[i].interruptTransmit);
2914 } 2923 }
2915 } 2924 }
@@ -3125,7 +3134,7 @@ static void gfar_clear_exact_match(struct net_device *dev)
3125 int idx; 3134 int idx;
3126 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 3135 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3127 3136
3128 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 3137 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3129 gfar_set_mac_for_addr(dev, idx, zero_arr); 3138 gfar_set_mac_for_addr(dev, idx, zero_arr);
3130} 3139}
3131 3140
@@ -3208,7 +3217,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
3208 3217
3209 /* Hmm... */ 3218 /* Hmm... */
3210 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 3219 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3211 netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n", 3220 netdev_dbg(dev,
3221 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3212 events, gfar_read(&regs->imask)); 3222 events, gfar_read(&regs->imask));
3213 3223
3214 /* Update the error counters */ 3224 /* Update the error counters */