aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cadence
diff options
context:
space:
mode:
authorNicolas Ferre <nicolas.ferre@atmel.com>2013-06-04 17:57:12 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-06 19:22:45 -0400
commit4df95131ea803bcb94f472d465c73ed57015c470 (patch)
tree619988df3ed92f35ec7544642616746234064dbe /drivers/net/ethernet/cadence
parent1b44791ab4ed27e6fa69f5dfa81b0fd48b1d050d (diff)
net/macb: change RX path for GEM
GEM is able to adapt its DMA buffer size, so change the RX path to take advantage of this possibility and remove all kind of memcpy in this path. This modification introduces function pointers for managing differences between MACB and GEM adapter type. Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cadence')
-rw-r--r--drivers/net/ethernet/cadence/macb.c308
-rw-r--r--drivers/net/ethernet/cadence/macb.h13
2 files changed, 272 insertions, 49 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index aab7bb22b1d0..f7e21f278e05 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -33,7 +33,6 @@
33#include "macb.h" 33#include "macb.h"
34 34
35#define MACB_RX_BUFFER_SIZE 128 35#define MACB_RX_BUFFER_SIZE 128
36#define GEM_RX_BUFFER_SIZE 2048
37#define RX_BUFFER_MULTIPLE 64 /* bytes */ 36#define RX_BUFFER_MULTIPLE 64 /* bytes */
38#define RX_RING_SIZE 512 /* must be power of 2 */ 37#define RX_RING_SIZE 512 /* must be power of 2 */
39#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) 38#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
@@ -530,6 +529,155 @@ static void macb_tx_interrupt(struct macb *bp)
530 netif_wake_queue(bp->dev); 529 netif_wake_queue(bp->dev);
531} 530}
532 531
532static void gem_rx_refill(struct macb *bp)
533{
534 unsigned int entry;
535 struct sk_buff *skb;
536 struct macb_dma_desc *desc;
537 dma_addr_t paddr;
538
539 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
540 u32 addr, ctrl;
541
542 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
543 desc = &bp->rx_ring[entry];
544
545 /* Make hw descriptor updates visible to CPU */
546 rmb();
547
548 addr = desc->addr;
549 ctrl = desc->ctrl;
550 bp->rx_prepared_head++;
551
552 if ((addr & MACB_BIT(RX_USED)))
553 continue;
554
555 if (bp->rx_skbuff[entry] == NULL) {
556 /* allocate sk_buff for this free entry in ring */
557 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
558 if (unlikely(skb == NULL)) {
559 netdev_err(bp->dev,
560 "Unable to allocate sk_buff\n");
561 break;
562 }
563 bp->rx_skbuff[entry] = skb;
564
565 /* now fill corresponding descriptor entry */
566 paddr = dma_map_single(&bp->pdev->dev, skb->data,
567 bp->rx_buffer_size, DMA_FROM_DEVICE);
568
569 if (entry == RX_RING_SIZE - 1)
570 paddr |= MACB_BIT(RX_WRAP);
571 bp->rx_ring[entry].addr = paddr;
572 bp->rx_ring[entry].ctrl = 0;
573
574 /* properly align Ethernet header */
575 skb_reserve(skb, NET_IP_ALIGN);
576 }
577 }
578
579 /* Make descriptor updates visible to hardware */
580 wmb();
581
582 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
583 bp->rx_prepared_head, bp->rx_tail);
584}
585
586/* Mark DMA descriptors from begin up to and not including end as unused */
587static void discard_partial_frame(struct macb *bp, unsigned int begin,
588 unsigned int end)
589{
590 unsigned int frag;
591
592 for (frag = begin; frag != end; frag++) {
593 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
594 desc->addr &= ~MACB_BIT(RX_USED);
595 }
596
597 /* Make descriptor updates visible to hardware */
598 wmb();
599
600 /*
601 * When this happens, the hardware stats registers for
602 * whatever caused this is updated, so we don't have to record
603 * anything.
604 */
605}
606
607static int gem_rx(struct macb *bp, int budget)
608{
609 unsigned int len;
610 unsigned int entry;
611 struct sk_buff *skb;
612 struct macb_dma_desc *desc;
613 int count = 0;
614
615 while (count < budget) {
616 u32 addr, ctrl;
617
618 entry = macb_rx_ring_wrap(bp->rx_tail);
619 desc = &bp->rx_ring[entry];
620
621 /* Make hw descriptor updates visible to CPU */
622 rmb();
623
624 addr = desc->addr;
625 ctrl = desc->ctrl;
626
627 if (!(addr & MACB_BIT(RX_USED)))
628 break;
629
630 desc->addr &= ~MACB_BIT(RX_USED);
631 bp->rx_tail++;
632 count++;
633
634 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
635 netdev_err(bp->dev,
636 "not whole frame pointed by descriptor\n");
637 bp->stats.rx_dropped++;
638 break;
639 }
640 skb = bp->rx_skbuff[entry];
641 if (unlikely(!skb)) {
642 netdev_err(bp->dev,
643 "inconsistent Rx descriptor chain\n");
644 bp->stats.rx_dropped++;
645 break;
646 }
647 /* now everything is ready for receiving packet */
648 bp->rx_skbuff[entry] = NULL;
649 len = MACB_BFEXT(RX_FRMLEN, ctrl);
650
651 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
652
653 skb_put(skb, len);
654 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
655 dma_unmap_single(&bp->pdev->dev, addr,
656 len, DMA_FROM_DEVICE);
657
658 skb->protocol = eth_type_trans(skb, bp->dev);
659 skb_checksum_none_assert(skb);
660
661 bp->stats.rx_packets++;
662 bp->stats.rx_bytes += skb->len;
663
664#if defined(DEBUG) && defined(VERBOSE_DEBUG)
665 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
666 skb->len, skb->csum);
667 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
668 skb->mac_header, 16, true);
669 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
670 skb->data, 32, true);
671#endif
672
673 netif_receive_skb(skb);
674 }
675
676 gem_rx_refill(bp);
677
678 return count;
679}
680
533static int macb_rx_frame(struct macb *bp, unsigned int first_frag, 681static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
534 unsigned int last_frag) 682 unsigned int last_frag)
535{ 683{
@@ -608,27 +756,6 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
608 return 0; 756 return 0;
609} 757}
610 758
611/* Mark DMA descriptors from begin up to and not including end as unused */
612static void discard_partial_frame(struct macb *bp, unsigned int begin,
613 unsigned int end)
614{
615 unsigned int frag;
616
617 for (frag = begin; frag != end; frag++) {
618 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
619 desc->addr &= ~MACB_BIT(RX_USED);
620 }
621
622 /* Make descriptor updates visible to hardware */
623 wmb();
624
625 /*
626 * When this happens, the hardware stats registers for
627 * whatever caused this is updated, so we don't have to record
628 * anything.
629 */
630}
631
632static int macb_rx(struct macb *bp, int budget) 759static int macb_rx(struct macb *bp, int budget)
633{ 760{
634 int received = 0; 761 int received = 0;
@@ -689,7 +816,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
689 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 816 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
690 (unsigned long)status, budget); 817 (unsigned long)status, budget);
691 818
692 work_done = macb_rx(bp, budget); 819 work_done = bp->macbgem_ops.mog_rx(bp, budget);
693 if (work_done < budget) { 820 if (work_done < budget) {
694 napi_complete(napi); 821 napi_complete(napi);
695 822
@@ -872,29 +999,63 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
872 return NETDEV_TX_OK; 999 return NETDEV_TX_OK;
873} 1000}
874 1001
875static void macb_init_rx_buffer_size(struct macb *bp) 1002static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
876{ 1003{
877 if (!macb_is_gem(bp)) { 1004 if (!macb_is_gem(bp)) {
878 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; 1005 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
879 } else { 1006 } else {
880 bp->rx_buffer_size = GEM_RX_BUFFER_SIZE; 1007 bp->rx_buffer_size = size;
881 1008
882 if (bp->rx_buffer_size > PAGE_SIZE) {
883 netdev_warn(bp->dev,
884 "RX buffer cannot be bigger than PAGE_SIZE, shrinking\n");
885 bp->rx_buffer_size = PAGE_SIZE;
886 }
887 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 1009 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
888 netdev_warn(bp->dev, 1010 netdev_dbg(bp->dev,
889 "RX buffer must be multiple of %d bytes, shrinking\n", 1011 "RX buffer must be multiple of %d bytes, expanding\n",
890 RX_BUFFER_MULTIPLE); 1012 RX_BUFFER_MULTIPLE);
891 bp->rx_buffer_size = 1013 bp->rx_buffer_size =
892 rounddown(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 1014 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
893 } 1015 }
894 bp->rx_buffer_size = max(RX_BUFFER_MULTIPLE, GEM_RX_BUFFER_SIZE);
895 } 1016 }
1017
1018 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1019 bp->dev->mtu, bp->rx_buffer_size);
896} 1020}
897 1021
1022static void gem_free_rx_buffers(struct macb *bp)
1023{
1024 struct sk_buff *skb;
1025 struct macb_dma_desc *desc;
1026 dma_addr_t addr;
1027 int i;
1028
1029 if (!bp->rx_skbuff)
1030 return;
1031
1032 for (i = 0; i < RX_RING_SIZE; i++) {
1033 skb = bp->rx_skbuff[i];
1034
1035 if (skb == NULL)
1036 continue;
1037
1038 desc = &bp->rx_ring[i];
1039 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1040 dma_unmap_single(&bp->pdev->dev, addr, skb->len,
1041 DMA_FROM_DEVICE);
1042 dev_kfree_skb_any(skb);
1043 skb = NULL;
1044 }
1045
1046 kfree(bp->rx_skbuff);
1047 bp->rx_skbuff = NULL;
1048}
1049
1050static void macb_free_rx_buffers(struct macb *bp)
1051{
1052 if (bp->rx_buffers) {
1053 dma_free_coherent(&bp->pdev->dev,
1054 RX_RING_SIZE * bp->rx_buffer_size,
1055 bp->rx_buffers, bp->rx_buffers_dma);
1056 bp->rx_buffers = NULL;
1057 }
1058}
898 1059
899static void macb_free_consistent(struct macb *bp) 1060static void macb_free_consistent(struct macb *bp)
900{ 1061{
@@ -902,6 +1063,7 @@ static void macb_free_consistent(struct macb *bp)
902 kfree(bp->tx_skb); 1063 kfree(bp->tx_skb);
903 bp->tx_skb = NULL; 1064 bp->tx_skb = NULL;
904 } 1065 }
1066 bp->macbgem_ops.mog_free_rx_buffers(bp);
905 if (bp->rx_ring) { 1067 if (bp->rx_ring) {
906 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, 1068 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
907 bp->rx_ring, bp->rx_ring_dma); 1069 bp->rx_ring, bp->rx_ring_dma);
@@ -912,12 +1074,37 @@ static void macb_free_consistent(struct macb *bp)
912 bp->tx_ring, bp->tx_ring_dma); 1074 bp->tx_ring, bp->tx_ring_dma);
913 bp->tx_ring = NULL; 1075 bp->tx_ring = NULL;
914 } 1076 }
915 if (bp->rx_buffers) { 1077}
916 dma_free_coherent(&bp->pdev->dev, 1078
917 RX_RING_SIZE * bp->rx_buffer_size, 1079static int gem_alloc_rx_buffers(struct macb *bp)
918 bp->rx_buffers, bp->rx_buffers_dma); 1080{
919 bp->rx_buffers = NULL; 1081 int size;
920 } 1082
1083 size = RX_RING_SIZE * sizeof(struct sk_buff *);
1084 bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1085 if (!bp->rx_skbuff)
1086 return -ENOMEM;
1087 else
1088 netdev_dbg(bp->dev,
1089 "Allocated %d RX struct sk_buff entries at %p\n",
1090 RX_RING_SIZE, bp->rx_skbuff);
1091 return 0;
1092}
1093
1094static int macb_alloc_rx_buffers(struct macb *bp)
1095{
1096 int size;
1097
1098 size = RX_RING_SIZE * bp->rx_buffer_size;
1099 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1100 &bp->rx_buffers_dma, GFP_KERNEL);
1101 if (!bp->rx_buffers)
1102 return -ENOMEM;
1103 else
1104 netdev_dbg(bp->dev,
1105 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1106 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1107 return 0;
921} 1108}
922 1109
923static int macb_alloc_consistent(struct macb *bp) 1110static int macb_alloc_consistent(struct macb *bp)
@@ -947,14 +1134,8 @@ static int macb_alloc_consistent(struct macb *bp)
947 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", 1134 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
948 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); 1135 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
949 1136
950 size = RX_RING_SIZE * bp->rx_buffer_size; 1137 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
951 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
952 &bp->rx_buffers_dma, GFP_KERNEL);
953 if (!bp->rx_buffers)
954 goto out_err; 1138 goto out_err;
955 netdev_dbg(bp->dev,
956 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
957 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
958 1139
959 return 0; 1140 return 0;
960 1141
@@ -963,6 +1144,21 @@ out_err:
963 return -ENOMEM; 1144 return -ENOMEM;
964} 1145}
965 1146
1147static void gem_init_rings(struct macb *bp)
1148{
1149 int i;
1150
1151 for (i = 0; i < TX_RING_SIZE; i++) {
1152 bp->tx_ring[i].addr = 0;
1153 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1154 }
1155 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1156
1157 bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
1158
1159 gem_rx_refill(bp);
1160}
1161
966static void macb_init_rings(struct macb *bp) 1162static void macb_init_rings(struct macb *bp)
967{ 1163{
968 int i; 1164 int i;
@@ -1259,6 +1455,7 @@ EXPORT_SYMBOL_GPL(macb_set_rx_mode);
1259static int macb_open(struct net_device *dev) 1455static int macb_open(struct net_device *dev)
1260{ 1456{
1261 struct macb *bp = netdev_priv(dev); 1457 struct macb *bp = netdev_priv(dev);
1458 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
1262 int err; 1459 int err;
1263 1460
1264 netdev_dbg(bp->dev, "open\n"); 1461 netdev_dbg(bp->dev, "open\n");
@@ -1271,7 +1468,7 @@ static int macb_open(struct net_device *dev)
1271 return -EAGAIN; 1468 return -EAGAIN;
1272 1469
1273 /* RX buffers initialization */ 1470 /* RX buffers initialization */
1274 macb_init_rx_buffer_size(bp); 1471 macb_init_rx_buffer_size(bp, bufsz);
1275 1472
1276 err = macb_alloc_consistent(bp); 1473 err = macb_alloc_consistent(bp);
1277 if (err) { 1474 if (err) {
@@ -1282,7 +1479,7 @@ static int macb_open(struct net_device *dev)
1282 1479
1283 napi_enable(&bp->napi); 1480 napi_enable(&bp->napi);
1284 1481
1285 macb_init_rings(bp); 1482 bp->macbgem_ops.mog_init_rings(bp);
1286 macb_init_hw(bp); 1483 macb_init_hw(bp);
1287 1484
1288 /* schedule a link state check */ 1485 /* schedule a link state check */
@@ -1601,6 +1798,19 @@ static int __init macb_probe(struct platform_device *pdev)
1601 1798
1602 dev->base_addr = regs->start; 1799 dev->base_addr = regs->start;
1603 1800
1801 /* setup appropriated routines according to adapter type */
1802 if (macb_is_gem(bp)) {
1803 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
1804 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
1805 bp->macbgem_ops.mog_init_rings = gem_init_rings;
1806 bp->macbgem_ops.mog_rx = gem_rx;
1807 } else {
1808 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
1809 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
1810 bp->macbgem_ops.mog_init_rings = macb_init_rings;
1811 bp->macbgem_ops.mog_rx = macb_rx;
1812 }
1813
1604 /* Set MII management clock divider */ 1814 /* Set MII management clock divider */
1605 config = macb_mdc_clk_div(bp); 1815 config = macb_mdc_clk_div(bp);
1606 config |= macb_dbw(bp); 1816 config |= macb_dbw(bp);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 9b5e19f8b61d..f4076155bed7 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -545,11 +545,22 @@ struct gem_stats {
545 u32 rx_udp_checksum_errors; 545 u32 rx_udp_checksum_errors;
546}; 546};
547 547
548struct macb;
549
550struct macb_or_gem_ops {
551 int (*mog_alloc_rx_buffers)(struct macb *bp);
552 void (*mog_free_rx_buffers)(struct macb *bp);
553 void (*mog_init_rings)(struct macb *bp);
554 int (*mog_rx)(struct macb *bp, int budget);
555};
556
548struct macb { 557struct macb {
549 void __iomem *regs; 558 void __iomem *regs;
550 559
551 unsigned int rx_tail; 560 unsigned int rx_tail;
561 unsigned int rx_prepared_head;
552 struct macb_dma_desc *rx_ring; 562 struct macb_dma_desc *rx_ring;
563 struct sk_buff **rx_skbuff;
553 void *rx_buffers; 564 void *rx_buffers;
554 size_t rx_buffer_size; 565 size_t rx_buffer_size;
555 566
@@ -574,6 +585,8 @@ struct macb {
574 dma_addr_t tx_ring_dma; 585 dma_addr_t tx_ring_dma;
575 dma_addr_t rx_buffers_dma; 586 dma_addr_t rx_buffers_dma;
576 587
588 struct macb_or_gem_ops macbgem_ops;
589
577 struct mii_bus *mii_bus; 590 struct mii_bus *mii_bus;
578 struct phy_device *phy_dev; 591 struct phy_device *phy_dev;
579 unsigned int link; 592 unsigned int link;