aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:01:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:01:30 -0500
commitc5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb (patch)
tree9830baf38832769e1cf621708889111bbe3c93df /drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
parent29afc4e9a408f2304e09c6dd0dbcfbd2356d0faa (diff)
parent9399f0c51489ae8c16d6559b82a452fdc1895e91 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) More iov_iter conversion work from Al Viro. [ The "crypto: switch af_alg_make_sg() to iov_iter" commit was wrong, and this pull actually adds an extra commit on top of the branch I'm pulling to fix that up, so that the pre-merge state is ok. - Linus ] 2) Various optimizations to the ipv4 forwarding information base trie lookup implementation. From Alexander Duyck. 3) Remove sock_iocb altogether, from CHristoph Hellwig. 4) Allow congestion control algorithm selection via routing metrics. From Daniel Borkmann. 5) Make ipv4 uncached route list per-cpu, from Eric Dumazet. 6) Handle rfs hash collisions more gracefully, also from Eric Dumazet. 7) Add xmit_more support to r8169, e1000, and e1000e drivers. From Florian Westphal. 8) Transparent Ethernet Bridging support for GRO, from Jesse Gross. 9) Add BPF packet actions to packet scheduler, from Jiri Pirko. 10) Add support for uniqu flow IDs to openvswitch, from Joe Stringer. 11) New NetCP ethernet driver, from Muralidharan Karicheri and Wingman Kwok. 12) More sanely handle out-of-window dupacks, which can result in serious ACK storms. From Neal Cardwell. 13) Various rhashtable bug fixes and enhancements, from Herbert Xu, Patrick McHardy, and Thomas Graf. 14) Support xmit_more in be2net, from Sathya Perla. 15) Group Policy extensions for vxlan, from Thomas Graf. 16) Remove Checksum Offload support for vxlan, from Tom Herbert. 17) Like ipv4, support lockless transmit over ipv6 UDP sockets. From Vlad Yasevich. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1494+1 commits) crypto: fix af_alg_make_sg() conversion to iov_iter ipv4: Namespecify TCP PMTU mechanism i40e: Fix for stats init function call in Rx setup tcp: don't include Fast Open option in SYN-ACK on pure SYN-data openvswitch: Only set TUNNEL_VXLAN_OPT if VXLAN-GBP metadata is set ipv6: Make __ipv6_select_ident static ipv6: Fix fragment id assignment on LE arches. bridge: Fix inability to add non-vlan fdb entry net: Mellanox: Delete unnecessary checks before the function call "vunmap" cxgb4: Add support in cxgb4 to get expansion rom version via ethtool ethtool: rename reserved1 memeber in ethtool_drvinfo for expansion ROM version net: dsa: Remove redundant phy_attach() IB/mlx4: Reset flow support for IB kernel ULPs IB/mlx4: Always use the correct port for mirrored multicast attachments net/bonding: Fix potential bad memory access during bonding events tipc: remove tipc_snprintf tipc: nl compat add noop and remove legacy nl framework tipc: convert legacy nl stats show to nl compat tipc: convert legacy nl net id get to nl compat tipc: convert legacy nl net id set to nl compat ...
Diffstat (limited to 'drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c')
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c95
1 files changed, 65 insertions, 30 deletions
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 9e2bcb807923..a17628769a1f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -278,14 +278,20 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
278 fep->stats.collisions++; 278 fep->stats.collisions++;
279 279
280 /* unmap */ 280 /* unmap */
281 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 281 if (fep->mapped_as_page[dirtyidx])
282 skb->len, DMA_TO_DEVICE); 282 dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
283 CBDR_DATLEN(bdp), DMA_TO_DEVICE);
284 else
285 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
286 CBDR_DATLEN(bdp), DMA_TO_DEVICE);
283 287
284 /* 288 /*
285 * Free the sk buffer associated with this last transmit. 289 * Free the sk buffer associated with this last transmit.
286 */ 290 */
287 dev_kfree_skb(skb); 291 if (skb) {
288 fep->tx_skbuff[dirtyidx] = NULL; 292 dev_kfree_skb(skb);
293 fep->tx_skbuff[dirtyidx] = NULL;
294 }
289 295
290 /* 296 /*
291 * Update pointer to next buffer descriptor to be transmitted. 297 * Update pointer to next buffer descriptor to be transmitted.
@@ -299,7 +305,7 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
299 * Since we have freed up a buffer, the ring is no longer 305 * Since we have freed up a buffer, the ring is no longer
300 * full. 306 * full.
301 */ 307 */
302 if (!fep->tx_free++) 308 if (++fep->tx_free >= MAX_SKB_FRAGS)
303 do_wake = 1; 309 do_wake = 1;
304 has_tx_work = 1; 310 has_tx_work = 1;
305 } 311 }
@@ -509,6 +515,9 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
509 cbd_t __iomem *bdp; 515 cbd_t __iomem *bdp;
510 int curidx; 516 int curidx;
511 u16 sc; 517 u16 sc;
518 int nr_frags = skb_shinfo(skb)->nr_frags;
519 skb_frag_t *frag;
520 int len;
512 521
513#ifdef CONFIG_FS_ENET_MPC5121_FEC 522#ifdef CONFIG_FS_ENET_MPC5121_FEC
514 if (((unsigned long)skb->data) & 0x3) { 523 if (((unsigned long)skb->data) & 0x3) {
@@ -530,7 +539,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
530 */ 539 */
531 bdp = fep->cur_tx; 540 bdp = fep->cur_tx;
532 541
533 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { 542 if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
534 netif_stop_queue(dev); 543 netif_stop_queue(dev);
535 spin_unlock(&fep->tx_lock); 544 spin_unlock(&fep->tx_lock);
536 545
@@ -543,35 +552,42 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
543 } 552 }
544 553
545 curidx = bdp - fep->tx_bd_base; 554 curidx = bdp - fep->tx_bd_base;
546 /*
547 * Clear all of the status flags.
548 */
549 CBDC_SC(bdp, BD_ENET_TX_STATS);
550
551 /*
552 * Save skb pointer.
553 */
554 fep->tx_skbuff[curidx] = skb;
555
556 fep->stats.tx_bytes += skb->len;
557 555
556 len = skb->len;
557 fep->stats.tx_bytes += len;
558 if (nr_frags)
559 len -= skb->data_len;
560 fep->tx_free -= nr_frags + 1;
558 /* 561 /*
559 * Push the data cache so the CPM does not get stale memory data. 562 * Push the data cache so the CPM does not get stale memory data.
560 */ 563 */
561 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, 564 CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
562 skb->data, skb->len, DMA_TO_DEVICE)); 565 skb->data, len, DMA_TO_DEVICE));
563 CBDW_DATLEN(bdp, skb->len); 566 CBDW_DATLEN(bdp, len);
567
568 fep->mapped_as_page[curidx] = 0;
569 frag = skb_shinfo(skb)->frags;
570 while (nr_frags) {
571 CBDC_SC(bdp,
572 BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
573 CBDS_SC(bdp, BD_ENET_TX_READY);
574
575 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
576 bdp++, curidx++;
577 else
578 bdp = fep->tx_bd_base, curidx = 0;
564 579
565 /* 580 len = skb_frag_size(frag);
566 * If this was the last BD in the ring, start at the beginning again. 581 CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
567 */ 582 DMA_TO_DEVICE));
568 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 583 CBDW_DATLEN(bdp, len);
569 fep->cur_tx++;
570 else
571 fep->cur_tx = fep->tx_bd_base;
572 584
573 if (!--fep->tx_free) 585 fep->tx_skbuff[curidx] = NULL;
574 netif_stop_queue(dev); 586 fep->mapped_as_page[curidx] = 1;
587
588 frag++;
589 nr_frags--;
590 }
575 591
576 /* Trigger transmission start */ 592 /* Trigger transmission start */
577 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | 593 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
@@ -582,8 +598,22 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
582 * yay for hw reuse :) */ 598 * yay for hw reuse :) */
583 if (skb->len <= 60) 599 if (skb->len <= 60)
584 sc |= BD_ENET_TX_PAD; 600 sc |= BD_ENET_TX_PAD;
601 CBDC_SC(bdp, BD_ENET_TX_STATS);
585 CBDS_SC(bdp, sc); 602 CBDS_SC(bdp, sc);
586 603
604 /* Save skb pointer. */
605 fep->tx_skbuff[curidx] = skb;
606
607 /* If this was the last BD in the ring, start at the beginning again. */
608 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
609 bdp++;
610 else
611 bdp = fep->tx_bd_base;
612 fep->cur_tx = bdp;
613
614 if (fep->tx_free < MAX_SKB_FRAGS)
615 netif_stop_queue(dev);
616
587 skb_tx_timestamp(skb); 617 skb_tx_timestamp(skb);
588 618
589 (*fep->ops->tx_kickstart)(dev); 619 (*fep->ops->tx_kickstart)(dev);
@@ -917,7 +947,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
917 } 947 }
918 948
919 fpi->rx_ring = 32; 949 fpi->rx_ring = 32;
920 fpi->tx_ring = 32; 950 fpi->tx_ring = 64;
921 fpi->rx_copybreak = 240; 951 fpi->rx_copybreak = 240;
922 fpi->napi_weight = 17; 952 fpi->napi_weight = 17;
923 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); 953 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
@@ -955,7 +985,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
955 985
956 privsize = sizeof(*fep) + 986 privsize = sizeof(*fep) +
957 sizeof(struct sk_buff **) * 987 sizeof(struct sk_buff **) *
958 (fpi->rx_ring + fpi->tx_ring); 988 (fpi->rx_ring + fpi->tx_ring) +
989 sizeof(char) * fpi->tx_ring;
959 990
960 ndev = alloc_etherdev(privsize); 991 ndev = alloc_etherdev(privsize);
961 if (!ndev) { 992 if (!ndev) {
@@ -978,6 +1009,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
978 1009
979 fep->rx_skbuff = (struct sk_buff **)&fep[1]; 1010 fep->rx_skbuff = (struct sk_buff **)&fep[1];
980 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; 1011 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1012 fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
1013 fpi->tx_ring);
981 1014
982 spin_lock_init(&fep->lock); 1015 spin_lock_init(&fep->lock);
983 spin_lock_init(&fep->tx_lock); 1016 spin_lock_init(&fep->tx_lock);
@@ -1007,6 +1040,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
1007 1040
1008 netif_carrier_off(ndev); 1041 netif_carrier_off(ndev);
1009 1042
1043 ndev->features |= NETIF_F_SG;
1044
1010 ret = register_netdev(ndev); 1045 ret = register_netdev(ndev);
1011 if (ret) 1046 if (ret)
1012 goto out_free_bd; 1047 goto out_free_bd;