aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-07-25 02:40:00 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-25 02:40:00 -0400
commit1e3550e46141b4734c237de69612175c13dc38cc (patch)
treed5a3e395bcbd98d567e29a484ecd29773fdea5ea
parentef492001214ba250b3bd18a7a346a047fe079449 (diff)
parent4b7b0e4f25612cc204e550018ee8c087a2062a6b (diff)
Merge branch 'macb-next'
Cyrille Pitchen says: ==================== net/macb: add HW features to macb driver this series of patches adds new hardware features to macb driver. These features can be enabled/disabled at runtime using ethtool. Depending on hardware and design configuration, some are enabled by default whereas other are disabled. For instance, checksum offload features are enabled by default for gem designed for packet buffer mode but disabled for fifo mode design or for old macb. Besides, the scatter-gather feature is enabled and tested on macb but disabled on sama5d3x gem. When testing this feature on sama5d3x gem, TX lockups occured frequently. Also, the RX checksum offload feature is enabled at GEM level only when both IFF_PROMISC bit is clear in dev->flags and NETIF_F_RXCSUM bit is set in dev->features. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/arm/boot/dts/sama5d3_gmac.dtsi2
-rw-r--r--drivers/net/ethernet/cadence/macb.c419
-rw-r--r--drivers/net/ethernet/cadence/macb.h53
3 files changed, 399 insertions, 75 deletions
diff --git a/arch/arm/boot/dts/sama5d3_gmac.dtsi b/arch/arm/boot/dts/sama5d3_gmac.dtsi
index a6cb0508762f..de5ed59fb446 100644
--- a/arch/arm/boot/dts/sama5d3_gmac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_gmac.dtsi
@@ -74,7 +74,7 @@
74 }; 74 };
75 75
76 macb0: ethernet@f0028000 { 76 macb0: ethernet@f0028000 {
77 compatible = "cdns,pc302-gem", "cdns,gem"; 77 compatible = "atmel,sama5d3-gem";
78 reg = <0xf0028000 0x100>; 78 reg = <0xf0028000 0x100>;
79 interrupts = <34 IRQ_TYPE_LEVEL_HIGH 3>; 79 interrupts = <34 IRQ_TYPE_LEVEL_HIGH 3>;
80 pinctrl-names = "default"; 80 pinctrl-names = "default";
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index e9daa072ebb4..ca5d7798b265 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -52,6 +52,9 @@
52 | MACB_BIT(TXERR)) 52 | MACB_BIT(TXERR))
53#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 53#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
54 54
55#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
56#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
57
55/* 58/*
56 * Graceful stop timeouts in us. We should allow up to 59 * Graceful stop timeouts in us. We should allow up to
57 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 60 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
@@ -264,7 +267,8 @@ static void macb_handle_link_change(struct net_device *dev)
264 reg |= MACB_BIT(FD); 267 reg |= MACB_BIT(FD);
265 if (phydev->speed == SPEED_100) 268 if (phydev->speed == SPEED_100)
266 reg |= MACB_BIT(SPD); 269 reg |= MACB_BIT(SPD);
267 if (phydev->speed == SPEED_1000) 270 if (phydev->speed == SPEED_1000 &&
271 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
268 reg |= GEM_BIT(GBE); 272 reg |= GEM_BIT(GBE);
269 273
270 macb_or_gem_writel(bp, NCFGR, reg); 274 macb_or_gem_writel(bp, NCFGR, reg);
@@ -337,7 +341,7 @@ static int macb_mii_probe(struct net_device *dev)
337 } 341 }
338 342
339 /* mask with MAC supported features */ 343 /* mask with MAC supported features */
340 if (macb_is_gem(bp)) 344 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
341 phydev->supported &= PHY_GBIT_FEATURES; 345 phydev->supported &= PHY_GBIT_FEATURES;
342 else 346 else
343 phydev->supported &= PHY_BASIC_FEATURES; 347 phydev->supported &= PHY_BASIC_FEATURES;
@@ -467,6 +471,24 @@ static int macb_halt_tx(struct macb *bp)
467 return -ETIMEDOUT; 471 return -ETIMEDOUT;
468} 472}
469 473
474static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
475{
476 if (tx_skb->mapping) {
477 if (tx_skb->mapped_as_page)
478 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
479 tx_skb->size, DMA_TO_DEVICE);
480 else
481 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
482 tx_skb->size, DMA_TO_DEVICE);
483 tx_skb->mapping = 0;
484 }
485
486 if (tx_skb->skb) {
487 dev_kfree_skb_any(tx_skb->skb);
488 tx_skb->skb = NULL;
489 }
490}
491
470static void macb_tx_error_task(struct work_struct *work) 492static void macb_tx_error_task(struct work_struct *work)
471{ 493{
472 struct macb *bp = container_of(work, struct macb, tx_error_task); 494 struct macb *bp = container_of(work, struct macb, tx_error_task);
@@ -504,10 +526,23 @@ static void macb_tx_error_task(struct work_struct *work)
504 skb = tx_skb->skb; 526 skb = tx_skb->skb;
505 527
506 if (ctrl & MACB_BIT(TX_USED)) { 528 if (ctrl & MACB_BIT(TX_USED)) {
507 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", 529 /* skb is set for the last buffer of the frame */
508 macb_tx_ring_wrap(tail), skb->data); 530 while (!skb) {
509 bp->stats.tx_packets++; 531 macb_tx_unmap(bp, tx_skb);
510 bp->stats.tx_bytes += skb->len; 532 tail++;
533 tx_skb = macb_tx_skb(bp, tail);
534 skb = tx_skb->skb;
535 }
536
537 /* ctrl still refers to the first buffer descriptor
538 * since it's the only one written back by the hardware
539 */
540 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
541 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
542 macb_tx_ring_wrap(tail), skb->data);
543 bp->stats.tx_packets++;
544 bp->stats.tx_bytes += skb->len;
545 }
511 } else { 546 } else {
512 /* 547 /*
513 * "Buffers exhausted mid-frame" errors may only happen 548 * "Buffers exhausted mid-frame" errors may only happen
@@ -521,10 +556,7 @@ static void macb_tx_error_task(struct work_struct *work)
521 desc->ctrl = ctrl | MACB_BIT(TX_USED); 556 desc->ctrl = ctrl | MACB_BIT(TX_USED);
522 } 557 }
523 558
524 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, 559 macb_tx_unmap(bp, tx_skb);
525 DMA_TO_DEVICE);
526 tx_skb->skb = NULL;
527 dev_kfree_skb(skb);
528 } 560 }
529 561
530 /* Make descriptor updates visible to hardware */ 562 /* Make descriptor updates visible to hardware */
@@ -572,20 +604,35 @@ static void macb_tx_interrupt(struct macb *bp)
572 604
573 ctrl = desc->ctrl; 605 ctrl = desc->ctrl;
574 606
607 /* TX_USED bit is only set by hardware on the very first buffer
608 * descriptor of the transmitted frame.
609 */
575 if (!(ctrl & MACB_BIT(TX_USED))) 610 if (!(ctrl & MACB_BIT(TX_USED)))
576 break; 611 break;
577 612
578 tx_skb = macb_tx_skb(bp, tail); 613 /* Process all buffers of the current transmitted frame */
579 skb = tx_skb->skb; 614 for (;; tail++) {
615 tx_skb = macb_tx_skb(bp, tail);
616 skb = tx_skb->skb;
617
618 /* First, update TX stats if needed */
619 if (skb) {
620 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
621 macb_tx_ring_wrap(tail), skb->data);
622 bp->stats.tx_packets++;
623 bp->stats.tx_bytes += skb->len;
624 }
580 625
581 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 626 /* Now we can safely release resources */
582 macb_tx_ring_wrap(tail), skb->data); 627 macb_tx_unmap(bp, tx_skb);
583 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, 628
584 DMA_TO_DEVICE); 629 /* skb is set only for the last buffer of the frame.
585 bp->stats.tx_packets++; 630 * WARNING: at this point skb has been freed by
586 bp->stats.tx_bytes += skb->len; 631 * macb_tx_unmap().
587 tx_skb->skb = NULL; 632 */
588 dev_kfree_skb_irq(skb); 633 if (skb)
634 break;
635 }
589 } 636 }
590 637
591 bp->tx_tail = tail; 638 bp->tx_tail = tail;
@@ -718,6 +765,10 @@ static int gem_rx(struct macb *bp, int budget)
718 765
719 skb->protocol = eth_type_trans(skb, bp->dev); 766 skb->protocol = eth_type_trans(skb, bp->dev);
720 skb_checksum_none_assert(skb); 767 skb_checksum_none_assert(skb);
768 if (bp->dev->features & NETIF_F_RXCSUM &&
769 !(bp->dev->flags & IFF_PROMISC) &&
770 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
771 skb->ip_summed = CHECKSUM_UNNECESSARY;
721 772
722 bp->stats.rx_packets++; 773 bp->stats.rx_packets++;
723 bp->stats.rx_bytes += skb->len; 774 bp->stats.rx_bytes += skb->len;
@@ -1001,15 +1052,145 @@ static void macb_poll_controller(struct net_device *dev)
1001} 1052}
1002#endif 1053#endif
1003 1054
1004static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1055static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
1056 unsigned int len)
1057{
1058 return (len + bp->max_tx_length - 1) / bp->max_tx_length;
1059}
1060
1061static unsigned int macb_tx_map(struct macb *bp,
1062 struct sk_buff *skb)
1005{ 1063{
1006 struct macb *bp = netdev_priv(dev);
1007 dma_addr_t mapping; 1064 dma_addr_t mapping;
1008 unsigned int len, entry; 1065 unsigned int len, entry, i, tx_head = bp->tx_head;
1066 struct macb_tx_skb *tx_skb = NULL;
1009 struct macb_dma_desc *desc; 1067 struct macb_dma_desc *desc;
1010 struct macb_tx_skb *tx_skb; 1068 unsigned int offset, size, count = 0;
1069 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1070 unsigned int eof = 1;
1011 u32 ctrl; 1071 u32 ctrl;
1072
1073 /* First, map non-paged data */
1074 len = skb_headlen(skb);
1075 offset = 0;
1076 while (len) {
1077 size = min(len, bp->max_tx_length);
1078 entry = macb_tx_ring_wrap(tx_head);
1079 tx_skb = &bp->tx_skb[entry];
1080
1081 mapping = dma_map_single(&bp->pdev->dev,
1082 skb->data + offset,
1083 size, DMA_TO_DEVICE);
1084 if (dma_mapping_error(&bp->pdev->dev, mapping))
1085 goto dma_error;
1086
1087 /* Save info to properly release resources */
1088 tx_skb->skb = NULL;
1089 tx_skb->mapping = mapping;
1090 tx_skb->size = size;
1091 tx_skb->mapped_as_page = false;
1092
1093 len -= size;
1094 offset += size;
1095 count++;
1096 tx_head++;
1097 }
1098
1099 /* Then, map paged data from fragments */
1100 for (f = 0; f < nr_frags; f++) {
1101 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1102
1103 len = skb_frag_size(frag);
1104 offset = 0;
1105 while (len) {
1106 size = min(len, bp->max_tx_length);
1107 entry = macb_tx_ring_wrap(tx_head);
1108 tx_skb = &bp->tx_skb[entry];
1109
1110 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1111 offset, size, DMA_TO_DEVICE);
1112 if (dma_mapping_error(&bp->pdev->dev, mapping))
1113 goto dma_error;
1114
1115 /* Save info to properly release resources */
1116 tx_skb->skb = NULL;
1117 tx_skb->mapping = mapping;
1118 tx_skb->size = size;
1119 tx_skb->mapped_as_page = true;
1120
1121 len -= size;
1122 offset += size;
1123 count++;
1124 tx_head++;
1125 }
1126 }
1127
1128 /* Should never happen */
1129 if (unlikely(tx_skb == NULL)) {
1130 netdev_err(bp->dev, "BUG! empty skb!\n");
1131 return 0;
1132 }
1133
1134 /* This is the last buffer of the frame: save socket buffer */
1135 tx_skb->skb = skb;
1136
1137 /* Update TX ring: update buffer descriptors in reverse order
1138 * to avoid race condition
1139 */
1140
1141 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1142 * to set the end of TX queue
1143 */
1144 i = tx_head;
1145 entry = macb_tx_ring_wrap(i);
1146 ctrl = MACB_BIT(TX_USED);
1147 desc = &bp->tx_ring[entry];
1148 desc->ctrl = ctrl;
1149
1150 do {
1151 i--;
1152 entry = macb_tx_ring_wrap(i);
1153 tx_skb = &bp->tx_skb[entry];
1154 desc = &bp->tx_ring[entry];
1155
1156 ctrl = (u32)tx_skb->size;
1157 if (eof) {
1158 ctrl |= MACB_BIT(TX_LAST);
1159 eof = 0;
1160 }
1161 if (unlikely(entry == (TX_RING_SIZE - 1)))
1162 ctrl |= MACB_BIT(TX_WRAP);
1163
1164 /* Set TX buffer descriptor */
1165 desc->addr = tx_skb->mapping;
1166 /* desc->addr must be visible to hardware before clearing
1167 * 'TX_USED' bit in desc->ctrl.
1168 */
1169 wmb();
1170 desc->ctrl = ctrl;
1171 } while (i != bp->tx_head);
1172
1173 bp->tx_head = tx_head;
1174
1175 return count;
1176
1177dma_error:
1178 netdev_err(bp->dev, "TX DMA map failed\n");
1179
1180 for (i = bp->tx_head; i != tx_head; i++) {
1181 tx_skb = macb_tx_skb(bp, i);
1182
1183 macb_tx_unmap(bp, tx_skb);
1184 }
1185
1186 return 0;
1187}
1188
1189static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1190{
1191 struct macb *bp = netdev_priv(dev);
1012 unsigned long flags; 1192 unsigned long flags;
1193 unsigned int count, nr_frags, frag_size, f;
1013 1194
1014#if defined(DEBUG) && defined(VERBOSE_DEBUG) 1195#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1015 netdev_vdbg(bp->dev, 1196 netdev_vdbg(bp->dev,
@@ -1020,44 +1201,34 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1020 skb->data, 16, true); 1201 skb->data, 16, true);
1021#endif 1202#endif
1022 1203
1023 len = skb->len; 1204 /* Count how many TX buffer descriptors are needed to send this
1205 * socket buffer: skb fragments of jumbo frames may need to be
1206 * splitted into many buffer descriptors.
1207 */
1208 count = macb_count_tx_descriptors(bp, skb_headlen(skb));
1209 nr_frags = skb_shinfo(skb)->nr_frags;
1210 for (f = 0; f < nr_frags; f++) {
1211 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1212 count += macb_count_tx_descriptors(bp, frag_size);
1213 }
1214
1024 spin_lock_irqsave(&bp->lock, flags); 1215 spin_lock_irqsave(&bp->lock, flags);
1025 1216
1026 /* This is a hard error, log it. */ 1217 /* This is a hard error, log it. */
1027 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) { 1218 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < count) {
1028 netif_stop_queue(dev); 1219 netif_stop_queue(dev);
1029 spin_unlock_irqrestore(&bp->lock, flags); 1220 spin_unlock_irqrestore(&bp->lock, flags);
1030 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
1031 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1221 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1032 bp->tx_head, bp->tx_tail); 1222 bp->tx_head, bp->tx_tail);
1033 return NETDEV_TX_BUSY; 1223 return NETDEV_TX_BUSY;
1034 } 1224 }
1035 1225
1036 entry = macb_tx_ring_wrap(bp->tx_head); 1226 /* Map socket buffer for DMA transfer */
1037 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); 1227 if (!macb_tx_map(bp, skb)) {
1038 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1039 len, DMA_TO_DEVICE);
1040 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
1041 dev_kfree_skb_any(skb); 1228 dev_kfree_skb_any(skb);
1042 goto unlock; 1229 goto unlock;
1043 } 1230 }
1044 1231
1045 bp->tx_head++;
1046 tx_skb = &bp->tx_skb[entry];
1047 tx_skb->skb = skb;
1048 tx_skb->mapping = mapping;
1049 netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
1050 skb->data, (unsigned long)mapping);
1051
1052 ctrl = MACB_BF(TX_FRMLEN, len);
1053 ctrl |= MACB_BIT(TX_LAST);
1054 if (entry == (TX_RING_SIZE - 1))
1055 ctrl |= MACB_BIT(TX_WRAP);
1056
1057 desc = &bp->tx_ring[entry];
1058 desc->addr = mapping;
1059 desc->ctrl = ctrl;
1060
1061 /* Make newly initialized descriptor visible to hardware */ 1232 /* Make newly initialized descriptor visible to hardware */
1062 wmb(); 1233 wmb();
1063 1234
@@ -1342,7 +1513,7 @@ static u32 macb_dbw(struct macb *bp)
1342/* 1513/*
1343 * Configure the receive DMA engine 1514 * Configure the receive DMA engine
1344 * - use the correct receive buffer size 1515 * - use the correct receive buffer size
1345 * - set the possibility to use INCR16 bursts 1516 * - set best burst length for DMA operations
1346 * (if not supported by FIFO, it will fallback to default) 1517 * (if not supported by FIFO, it will fallback to default)
1347 * - set both rx/tx packet buffers to full memory size 1518 * - set both rx/tx packet buffers to full memory size
1348 * These are configurable parameters for GEM. 1519 * These are configurable parameters for GEM.
@@ -1354,24 +1525,20 @@ static void macb_configure_dma(struct macb *bp)
1354 if (macb_is_gem(bp)) { 1525 if (macb_is_gem(bp)) {
1355 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1526 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1356 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); 1527 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1357 dmacfg |= GEM_BF(FBLDO, 16); 1528 if (bp->dma_burst_length)
1529 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1358 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1530 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1359 dmacfg &= ~GEM_BIT(ENDIA); 1531 dmacfg &= ~GEM_BIT(ENDIA);
1532 if (bp->dev->features & NETIF_F_HW_CSUM)
1533 dmacfg |= GEM_BIT(TXCOEN);
1534 else
1535 dmacfg &= ~GEM_BIT(TXCOEN);
1536 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1537 dmacfg);
1360 gem_writel(bp, DMACFG, dmacfg); 1538 gem_writel(bp, DMACFG, dmacfg);
1361 } 1539 }
1362} 1540}
1363 1541
1364/*
1365 * Configure peripheral capacities according to integration options used
1366 */
1367static void macb_configure_caps(struct macb *bp)
1368{
1369 if (macb_is_gem(bp)) {
1370 if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
1371 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
1372 }
1373}
1374
1375static void macb_init_hw(struct macb *bp) 1542static void macb_init_hw(struct macb *bp)
1376{ 1543{
1377 u32 config; 1544 u32 config;
@@ -1386,6 +1553,8 @@ static void macb_init_hw(struct macb *bp)
1386 config |= MACB_BIT(BIG); /* Receive oversized frames */ 1553 config |= MACB_BIT(BIG); /* Receive oversized frames */
1387 if (bp->dev->flags & IFF_PROMISC) 1554 if (bp->dev->flags & IFF_PROMISC)
1388 config |= MACB_BIT(CAF); /* Copy All Frames */ 1555 config |= MACB_BIT(CAF); /* Copy All Frames */
1556 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
1557 config |= GEM_BIT(RXCOEN);
1389 if (!(bp->dev->flags & IFF_BROADCAST)) 1558 if (!(bp->dev->flags & IFF_BROADCAST))
1390 config |= MACB_BIT(NBC); /* No BroadCast */ 1559 config |= MACB_BIT(NBC); /* No BroadCast */
1391 config |= macb_dbw(bp); 1560 config |= macb_dbw(bp);
@@ -1394,7 +1563,6 @@ static void macb_init_hw(struct macb *bp)
1394 bp->duplex = DUPLEX_HALF; 1563 bp->duplex = DUPLEX_HALF;
1395 1564
1396 macb_configure_dma(bp); 1565 macb_configure_dma(bp);
1397 macb_configure_caps(bp);
1398 1566
1399 /* Initialize TX and RX buffers */ 1567 /* Initialize TX and RX buffers */
1400 macb_writel(bp, RBQP, bp->rx_ring_dma); 1568 macb_writel(bp, RBQP, bp->rx_ring_dma);
@@ -1500,13 +1668,22 @@ void macb_set_rx_mode(struct net_device *dev)
1500 1668
1501 cfg = macb_readl(bp, NCFGR); 1669 cfg = macb_readl(bp, NCFGR);
1502 1670
1503 if (dev->flags & IFF_PROMISC) 1671 if (dev->flags & IFF_PROMISC) {
1504 /* Enable promiscuous mode */ 1672 /* Enable promiscuous mode */
1505 cfg |= MACB_BIT(CAF); 1673 cfg |= MACB_BIT(CAF);
1506 else if (dev->flags & (~IFF_PROMISC)) 1674
1507 /* Disable promiscuous mode */ 1675 /* Disable RX checksum offload */
1676 if (macb_is_gem(bp))
1677 cfg &= ~GEM_BIT(RXCOEN);
1678 } else {
1679 /* Disable promiscuous mode */
1508 cfg &= ~MACB_BIT(CAF); 1680 cfg &= ~MACB_BIT(CAF);
1509 1681
1682 /* Enable RX checksum offload only if requested */
1683 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
1684 cfg |= GEM_BIT(RXCOEN);
1685 }
1686
1510 if (dev->flags & IFF_ALLMULTI) { 1687 if (dev->flags & IFF_ALLMULTI) {
1511 /* Enable all multicast mode */ 1688 /* Enable all multicast mode */
1512 macb_or_gem_writel(bp, HRB, -1); 1689 macb_or_gem_writel(bp, HRB, -1);
@@ -1767,6 +1944,40 @@ int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1767} 1944}
1768EXPORT_SYMBOL_GPL(macb_ioctl); 1945EXPORT_SYMBOL_GPL(macb_ioctl);
1769 1946
1947static int macb_set_features(struct net_device *netdev,
1948 netdev_features_t features)
1949{
1950 struct macb *bp = netdev_priv(netdev);
1951 netdev_features_t changed = features ^ netdev->features;
1952
1953 /* TX checksum offload */
1954 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
1955 u32 dmacfg;
1956
1957 dmacfg = gem_readl(bp, DMACFG);
1958 if (features & NETIF_F_HW_CSUM)
1959 dmacfg |= GEM_BIT(TXCOEN);
1960 else
1961 dmacfg &= ~GEM_BIT(TXCOEN);
1962 gem_writel(bp, DMACFG, dmacfg);
1963 }
1964
1965 /* RX checksum offload */
1966 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
1967 u32 netcfg;
1968
1969 netcfg = gem_readl(bp, NCFGR);
1970 if (features & NETIF_F_RXCSUM &&
1971 !(netdev->flags & IFF_PROMISC))
1972 netcfg |= GEM_BIT(RXCOEN);
1973 else
1974 netcfg &= ~GEM_BIT(RXCOEN);
1975 gem_writel(bp, NCFGR, netcfg);
1976 }
1977
1978 return 0;
1979}
1980
1770static const struct net_device_ops macb_netdev_ops = { 1981static const struct net_device_ops macb_netdev_ops = {
1771 .ndo_open = macb_open, 1982 .ndo_open = macb_open,
1772 .ndo_stop = macb_close, 1983 .ndo_stop = macb_close,
@@ -1780,20 +1991,77 @@ static const struct net_device_ops macb_netdev_ops = {
1780#ifdef CONFIG_NET_POLL_CONTROLLER 1991#ifdef CONFIG_NET_POLL_CONTROLLER
1781 .ndo_poll_controller = macb_poll_controller, 1992 .ndo_poll_controller = macb_poll_controller,
1782#endif 1993#endif
1994 .ndo_set_features = macb_set_features,
1783}; 1995};
1784 1996
1785#if defined(CONFIG_OF) 1997#if defined(CONFIG_OF)
1998static struct macb_config pc302gem_config = {
1999 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2000 .dma_burst_length = 16,
2001};
2002
2003static struct macb_config sama5d3_config = {
2004 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2005 .dma_burst_length = 16,
2006};
2007
2008static struct macb_config sama5d4_config = {
2009 .caps = 0,
2010 .dma_burst_length = 4,
2011};
2012
1786static const struct of_device_id macb_dt_ids[] = { 2013static const struct of_device_id macb_dt_ids[] = {
1787 { .compatible = "cdns,at32ap7000-macb" }, 2014 { .compatible = "cdns,at32ap7000-macb" },
1788 { .compatible = "cdns,at91sam9260-macb" }, 2015 { .compatible = "cdns,at91sam9260-macb" },
1789 { .compatible = "cdns,macb" }, 2016 { .compatible = "cdns,macb" },
1790 { .compatible = "cdns,pc302-gem" }, 2017 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
1791 { .compatible = "cdns,gem" }, 2018 { .compatible = "cdns,gem", .data = &pc302gem_config },
2019 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
2020 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
1792 { /* sentinel */ } 2021 { /* sentinel */ }
1793}; 2022};
1794MODULE_DEVICE_TABLE(of, macb_dt_ids); 2023MODULE_DEVICE_TABLE(of, macb_dt_ids);
1795#endif 2024#endif
1796 2025
2026/*
2027 * Configure peripheral capacities according to device tree
2028 * and integration options used
2029 */
2030static void macb_configure_caps(struct macb *bp)
2031{
2032 u32 dcfg;
2033 const struct of_device_id *match;
2034 const struct macb_config *config;
2035
2036 if (bp->pdev->dev.of_node) {
2037 match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
2038 if (match && match->data) {
2039 config = (const struct macb_config *)match->data;
2040
2041 bp->caps = config->caps;
2042 /*
2043 * As we have access to the matching node, configure
2044 * DMA burst length as well
2045 */
2046 bp->dma_burst_length = config->dma_burst_length;
2047 }
2048 }
2049
2050 if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
2051 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2052
2053 if (macb_is_gem(bp)) {
2054 dcfg = gem_readl(bp, DCFG1);
2055 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2056 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2057 dcfg = gem_readl(bp, DCFG2);
2058 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2059 bp->caps |= MACB_CAPS_FIFO_MODE;
2060 }
2061
2062 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
2063}
2064
1797static int __init macb_probe(struct platform_device *pdev) 2065static int __init macb_probe(struct platform_device *pdev)
1798{ 2066{
1799 struct macb_platform_data *pdata; 2067 struct macb_platform_data *pdata;
@@ -1828,9 +2096,6 @@ static int __init macb_probe(struct platform_device *pdev)
1828 2096
1829 SET_NETDEV_DEV(dev, &pdev->dev); 2097 SET_NETDEV_DEV(dev, &pdev->dev);
1830 2098
1831 /* TODO: Actually, we have some interesting features... */
1832 dev->features |= 0;
1833
1834 bp = netdev_priv(dev); 2099 bp = netdev_priv(dev);
1835 bp->pdev = pdev; 2100 bp->pdev = pdev;
1836 bp->dev = dev; 2101 bp->dev = dev;
@@ -1897,19 +2162,33 @@ static int __init macb_probe(struct platform_device *pdev)
1897 2162
1898 dev->base_addr = regs->start; 2163 dev->base_addr = regs->start;
1899 2164
2165 /* setup capacities */
2166 macb_configure_caps(bp);
2167
1900 /* setup appropriated routines according to adapter type */ 2168 /* setup appropriated routines according to adapter type */
1901 if (macb_is_gem(bp)) { 2169 if (macb_is_gem(bp)) {
2170 bp->max_tx_length = GEM_MAX_TX_LEN;
1902 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; 2171 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
1903 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 2172 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
1904 bp->macbgem_ops.mog_init_rings = gem_init_rings; 2173 bp->macbgem_ops.mog_init_rings = gem_init_rings;
1905 bp->macbgem_ops.mog_rx = gem_rx; 2174 bp->macbgem_ops.mog_rx = gem_rx;
1906 } else { 2175 } else {
2176 bp->max_tx_length = MACB_MAX_TX_LEN;
1907 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 2177 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
1908 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 2178 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
1909 bp->macbgem_ops.mog_init_rings = macb_init_rings; 2179 bp->macbgem_ops.mog_init_rings = macb_init_rings;
1910 bp->macbgem_ops.mog_rx = macb_rx; 2180 bp->macbgem_ops.mog_rx = macb_rx;
1911 } 2181 }
1912 2182
2183 /* Set features */
2184 dev->hw_features = NETIF_F_SG;
2185 /* Checksum offload is only available on gem with packet buffer */
2186 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2187 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2188 if (bp->caps & MACB_CAPS_SG_DISABLED)
2189 dev->hw_features &= ~NETIF_F_SG;
2190 dev->features = dev->hw_features;
2191
1913 /* Set MII management clock divider */ 2192 /* Set MII management clock divider */
1914 config = macb_mdc_clk_div(bp); 2193 config = macb_mdc_clk_div(bp);
1915 config |= macb_dbw(bp); 2194 config |= macb_dbw(bp);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 51c02442160a..517c09d72c4a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -164,6 +164,8 @@
164#define GEM_CLK_SIZE 3 164#define GEM_CLK_SIZE 3
165#define GEM_DBW_OFFSET 21 165#define GEM_DBW_OFFSET 21
166#define GEM_DBW_SIZE 2 166#define GEM_DBW_SIZE 2
167#define GEM_RXCOEN_OFFSET 24
168#define GEM_RXCOEN_SIZE 1
167 169
168/* Constants for data bus width. */ 170/* Constants for data bus width. */
169#define GEM_DBW32 0 171#define GEM_DBW32 0
@@ -305,6 +307,12 @@
305#define GEM_DBWDEF_OFFSET 25 307#define GEM_DBWDEF_OFFSET 25
306#define GEM_DBWDEF_SIZE 3 308#define GEM_DBWDEF_SIZE 3
307 309
310/* Bitfields in DCFG2. */
311#define GEM_RX_PKT_BUFF_OFFSET 20
312#define GEM_RX_PKT_BUFF_SIZE 1
313#define GEM_TX_PKT_BUFF_OFFSET 21
314#define GEM_TX_PKT_BUFF_SIZE 1
315
308/* Constants for CLK */ 316/* Constants for CLK */
309#define MACB_CLK_DIV8 0 317#define MACB_CLK_DIV8 0
310#define MACB_CLK_DIV16 1 318#define MACB_CLK_DIV16 1
@@ -326,7 +334,11 @@
326#define MACB_MAN_CODE 2 334#define MACB_MAN_CODE 2
327 335
328/* Capability mask bits */ 336/* Capability mask bits */
329#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x1 337#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
338#define MACB_CAPS_FIFO_MODE 0x10000000
339#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
340#define MACB_CAPS_SG_DISABLED 0x40000000
341#define MACB_CAPS_MACB_IS_GEM 0x80000000
330 342
331/* Bit manipulation macros */ 343/* Bit manipulation macros */
332#define MACB_BIT(name) \ 344#define MACB_BIT(name) \
@@ -442,6 +454,14 @@ struct macb_dma_desc {
442#define MACB_RX_BROADCAST_OFFSET 31 454#define MACB_RX_BROADCAST_OFFSET 31
443#define MACB_RX_BROADCAST_SIZE 1 455#define MACB_RX_BROADCAST_SIZE 1
444 456
457/* RX checksum offload disabled: bit 24 clear in NCFGR */
458#define GEM_RX_TYPEID_MATCH_OFFSET 22
459#define GEM_RX_TYPEID_MATCH_SIZE 2
460
461/* RX checksum offload enabled: bit 24 set in NCFGR */
462#define GEM_RX_CSUM_OFFSET 22
463#define GEM_RX_CSUM_SIZE 2
464
445#define MACB_TX_FRMLEN_OFFSET 0 465#define MACB_TX_FRMLEN_OFFSET 0
446#define MACB_TX_FRMLEN_SIZE 11 466#define MACB_TX_FRMLEN_SIZE 11
447#define MACB_TX_LAST_OFFSET 15 467#define MACB_TX_LAST_OFFSET 15
@@ -459,14 +479,32 @@ struct macb_dma_desc {
459#define MACB_TX_USED_OFFSET 31 479#define MACB_TX_USED_OFFSET 31
460#define MACB_TX_USED_SIZE 1 480#define MACB_TX_USED_SIZE 1
461 481
482#define GEM_TX_FRMLEN_OFFSET 0
483#define GEM_TX_FRMLEN_SIZE 14
484
485/* Buffer descriptor constants */
486#define GEM_RX_CSUM_NONE 0
487#define GEM_RX_CSUM_IP_ONLY 1
488#define GEM_RX_CSUM_IP_TCP 2
489#define GEM_RX_CSUM_IP_UDP 3
490
491/* limit RX checksum offload to TCP and UDP packets */
492#define GEM_RX_CSUM_CHECKED_MASK 2
493
462/** 494/**
463 * struct macb_tx_skb - data about an skb which is being transmitted 495 * struct macb_tx_skb - data about an skb which is being transmitted
464 * @skb: skb currently being transmitted 496 * @skb: skb currently being transmitted, only set for the last buffer
465 * @mapping: DMA address of the skb's data buffer 497 * of the frame
498 * @mapping: DMA address of the skb's fragment buffer
499 * @size: size of the DMA mapped buffer
500 * @mapped_as_page: true when buffer was mapped with skb_frag_dma_map(),
501 * false when buffer was mapped with dma_map_single()
466 */ 502 */
467struct macb_tx_skb { 503struct macb_tx_skb {
468 struct sk_buff *skb; 504 struct sk_buff *skb;
469 dma_addr_t mapping; 505 dma_addr_t mapping;
506 size_t size;
507 bool mapped_as_page;
470}; 508};
471 509
472/* 510/*
@@ -554,6 +592,11 @@ struct macb_or_gem_ops {
554 int (*mog_rx)(struct macb *bp, int budget); 592 int (*mog_rx)(struct macb *bp, int budget);
555}; 593};
556 594
595struct macb_config {
596 u32 caps;
597 unsigned int dma_burst_length;
598};
599
557struct macb { 600struct macb {
558 void __iomem *regs; 601 void __iomem *regs;
559 602
@@ -595,6 +638,7 @@ struct macb {
595 unsigned int duplex; 638 unsigned int duplex;
596 639
597 u32 caps; 640 u32 caps;
641 unsigned int dma_burst_length;
598 642
599 phy_interface_t phy_interface; 643 phy_interface_t phy_interface;
600 644
@@ -602,6 +646,7 @@ struct macb {
602 struct sk_buff *skb; /* holds skb until xmit interrupt completes */ 646 struct sk_buff *skb; /* holds skb until xmit interrupt completes */
603 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */ 647 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
604 int skb_length; /* saved skb length for pci_unmap_single */ 648 int skb_length; /* saved skb length for pci_unmap_single */
649 unsigned int max_tx_length;
605}; 650};
606 651
607extern const struct ethtool_ops macb_ethtool_ops; 652extern const struct ethtool_ops macb_ethtool_ops;
@@ -615,7 +660,7 @@ void macb_get_hwaddr(struct macb *bp);
615 660
616static inline bool macb_is_gem(struct macb *bp) 661static inline bool macb_is_gem(struct macb *bp)
617{ 662{
618 return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2; 663 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
619} 664}
620 665
621#endif /* _MACB_H */ 666#endif /* _MACB_H */