aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2009-02-12 09:08:39 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-16 02:43:58 -0500
commiteaf5d59092dbed853bfab956ce123293832998f5 (patch)
treedee0b172a682bdace7533302c12be28a012ffab2 /drivers/net/mv643xx_eth.c
parentb8df184f88f06f985ae58248305ddc257dc016b8 (diff)
mv643xx_eth: implement Large Receive Offload
Controlled by a compile-time (Kconfig) option for now, since it isn't a win in all cases. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c112
1 files changed, 111 insertions, 1 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a99e5d3f2e46..bb9693195242 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -53,6 +53,7 @@
53#include <linux/mv643xx_eth.h> 53#include <linux/mv643xx_eth.h>
54#include <linux/io.h> 54#include <linux/io.h>
55#include <linux/types.h> 55#include <linux/types.h>
56#include <linux/inet_lro.h>
56#include <asm/system.h> 57#include <asm/system.h>
57 58
58static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 59static char mv643xx_eth_driver_name[] = "mv643xx_eth";
@@ -227,6 +228,12 @@ struct tx_desc {
227#define RX_ENABLE_INTERRUPT 0x20000000 228#define RX_ENABLE_INTERRUPT 0x20000000
228#define RX_FIRST_DESC 0x08000000 229#define RX_FIRST_DESC 0x08000000
229#define RX_LAST_DESC 0x04000000 230#define RX_LAST_DESC 0x04000000
231#define RX_IP_HDR_OK 0x02000000
232#define RX_PKT_IS_IPV4 0x01000000
233#define RX_PKT_IS_ETHERNETV2 0x00800000
234#define RX_PKT_LAYER4_TYPE_MASK 0x00600000
235#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
236#define RX_PKT_IS_VLAN_TAGGED 0x00080000
230 237
231/* TX descriptor command */ 238/* TX descriptor command */
232#define TX_ENABLE_INTERRUPT 0x00800000 239#define TX_ENABLE_INTERRUPT 0x00800000
@@ -324,6 +331,12 @@ struct mib_counters {
324 u32 late_collision; 331 u32 late_collision;
325}; 332};
326 333
334struct lro_counters {
335 u32 lro_aggregated;
336 u32 lro_flushed;
337 u32 lro_no_desc;
338};
339
327struct rx_queue { 340struct rx_queue {
328 int index; 341 int index;
329 342
@@ -337,6 +350,11 @@ struct rx_queue {
337 dma_addr_t rx_desc_dma; 350 dma_addr_t rx_desc_dma;
338 int rx_desc_area_size; 351 int rx_desc_area_size;
339 struct sk_buff **rx_skb; 352 struct sk_buff **rx_skb;
353
354#ifdef CONFIG_MV643XX_ETH_LRO
355 struct net_lro_mgr lro_mgr;
356 struct net_lro_desc lro_arr[8];
357#endif
340}; 358};
341 359
342struct tx_queue { 360struct tx_queue {
@@ -372,6 +390,8 @@ struct mv643xx_eth_private {
372 spinlock_t mib_counters_lock; 390 spinlock_t mib_counters_lock;
373 struct mib_counters mib_counters; 391 struct mib_counters mib_counters;
374 392
393 struct lro_counters lro_counters;
394
375 struct work_struct tx_timeout_task; 395 struct work_struct tx_timeout_task;
376 396
377 struct napi_struct napi; 397 struct napi_struct napi;
@@ -496,12 +516,42 @@ static void txq_maybe_wake(struct tx_queue *txq)
496 516
497 517
498/* rx napi ******************************************************************/ 518/* rx napi ******************************************************************/
519#ifdef CONFIG_MV643XX_ETH_LRO
520static int
521mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
522 u64 *hdr_flags, void *priv)
523{
524 unsigned long cmd_sts = (unsigned long)priv;
525
526 /*
527 * Make sure that this packet is Ethernet II, is not VLAN
528 * tagged, is IPv4, has a valid IP header, and is TCP.
529 */
530 if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
531 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK |
532 RX_PKT_IS_VLAN_TAGGED)) !=
533 (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
534 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4))
535 return -1;
536
537 skb_reset_network_header(skb);
538 skb_set_transport_header(skb, ip_hdrlen(skb));
539 *iphdr = ip_hdr(skb);
540 *tcph = tcp_hdr(skb);
541 *hdr_flags = LRO_IPV4 | LRO_TCP;
542
543 return 0;
544}
545#endif
546
499static int rxq_process(struct rx_queue *rxq, int budget) 547static int rxq_process(struct rx_queue *rxq, int budget)
500{ 548{
501 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 549 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
502 struct net_device_stats *stats = &mp->dev->stats; 550 struct net_device_stats *stats = &mp->dev->stats;
551 int lro_flush_needed;
503 int rx; 552 int rx;
504 553
554 lro_flush_needed = 0;
505 rx = 0; 555 rx = 0;
506 while (rx < budget && rxq->rx_desc_count) { 556 while (rx < budget && rxq->rx_desc_count) {
507 struct rx_desc *rx_desc; 557 struct rx_desc *rx_desc;
@@ -561,7 +611,15 @@ static int rxq_process(struct rx_queue *rxq, int budget)
561 if (cmd_sts & LAYER_4_CHECKSUM_OK) 611 if (cmd_sts & LAYER_4_CHECKSUM_OK)
562 skb->ip_summed = CHECKSUM_UNNECESSARY; 612 skb->ip_summed = CHECKSUM_UNNECESSARY;
563 skb->protocol = eth_type_trans(skb, mp->dev); 613 skb->protocol = eth_type_trans(skb, mp->dev);
564 netif_receive_skb(skb); 614
615#ifdef CONFIG_MV643XX_ETH_LRO
616 if (skb->dev->features & NETIF_F_LRO &&
617 skb->ip_summed == CHECKSUM_UNNECESSARY) {
618 lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts);
619 lro_flush_needed = 1;
620 } else
621#endif
622 netif_receive_skb(skb);
565 623
566 continue; 624 continue;
567 625
@@ -582,6 +640,11 @@ err:
582 dev_kfree_skb(skb); 640 dev_kfree_skb(skb);
583 } 641 }
584 642
643#ifdef CONFIG_MV643XX_ETH_LRO
644 if (lro_flush_needed)
645 lro_flush_all(&rxq->lro_mgr);
646#endif
647
585 if (rx < budget) 648 if (rx < budget)
586 mp->work_rx &= ~(1 << rxq->index); 649 mp->work_rx &= ~(1 << rxq->index);
587 650
@@ -1161,6 +1224,28 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1161 return stats; 1224 return stats;
1162} 1225}
1163 1226
1227static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp)
1228{
1229 u32 lro_aggregated = 0;
1230 u32 lro_flushed = 0;
1231 u32 lro_no_desc = 0;
1232 int i;
1233
1234#ifdef CONFIG_MV643XX_ETH_LRO
1235 for (i = 0; i < mp->rxq_count; i++) {
1236 struct rx_queue *rxq = mp->rxq + i;
1237
1238 lro_aggregated += rxq->lro_mgr.stats.aggregated;
1239 lro_flushed += rxq->lro_mgr.stats.flushed;
1240 lro_no_desc += rxq->lro_mgr.stats.no_desc;
1241 }
1242#endif
1243
1244 mp->lro_counters.lro_aggregated = lro_aggregated;
1245 mp->lro_counters.lro_flushed = lro_flushed;
1246 mp->lro_counters.lro_no_desc = lro_no_desc;
1247}
1248
1164static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1249static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1165{ 1250{
1166 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1251 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
@@ -1319,6 +1404,10 @@ struct mv643xx_eth_stats {
1319 { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1404 { #m, FIELD_SIZEOF(struct mib_counters, m), \
1320 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1405 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1321 1406
1407#define LROSTAT(m) \
1408 { #m, FIELD_SIZEOF(struct lro_counters, m), \
1409 -1, offsetof(struct mv643xx_eth_private, lro_counters.m) }
1410
1322static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { 1411static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1323 SSTAT(rx_packets), 1412 SSTAT(rx_packets),
1324 SSTAT(tx_packets), 1413 SSTAT(tx_packets),
@@ -1358,6 +1447,9 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1358 MIBSTAT(bad_crc_event), 1447 MIBSTAT(bad_crc_event),
1359 MIBSTAT(collision), 1448 MIBSTAT(collision),
1360 MIBSTAT(late_collision), 1449 MIBSTAT(late_collision),
1450 LROSTAT(lro_aggregated),
1451 LROSTAT(lro_flushed),
1452 LROSTAT(lro_no_desc),
1361}; 1453};
1362 1454
1363static int 1455static int
@@ -1569,6 +1661,7 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1569 1661
1570 mv643xx_eth_get_stats(dev); 1662 mv643xx_eth_get_stats(dev);
1571 mib_counters_update(mp); 1663 mib_counters_update(mp);
1664 mv643xx_eth_grab_lro_stats(mp);
1572 1665
1573 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1666 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1574 const struct mv643xx_eth_stats *stat; 1667 const struct mv643xx_eth_stats *stat;
@@ -1610,6 +1703,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1610 .set_sg = ethtool_op_set_sg, 1703 .set_sg = ethtool_op_set_sg,
1611 .get_strings = mv643xx_eth_get_strings, 1704 .get_strings = mv643xx_eth_get_strings,
1612 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1705 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1706 .get_flags = ethtool_op_get_flags,
1707 .set_flags = ethtool_op_set_flags,
1613 .get_sset_count = mv643xx_eth_get_sset_count, 1708 .get_sset_count = mv643xx_eth_get_sset_count,
1614}; 1709};
1615 1710
@@ -1844,6 +1939,21 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1844 nexti * sizeof(struct rx_desc); 1939 nexti * sizeof(struct rx_desc);
1845 } 1940 }
1846 1941
1942#ifdef CONFIG_MV643XX_ETH_LRO
1943 rxq->lro_mgr.dev = mp->dev;
1944 memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats));
1945 rxq->lro_mgr.features = LRO_F_NAPI;
1946 rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1947 rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1948 rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr);
1949 rxq->lro_mgr.max_aggr = 32;
1950 rxq->lro_mgr.frag_align_pad = 0;
1951 rxq->lro_mgr.lro_arr = rxq->lro_arr;
1952 rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header;
1953
1954 memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr));
1955#endif
1956
1847 return 0; 1957 return 0;
1848 1958
1849 1959