diff options
| -rw-r--r-- | drivers/net/ethernet/marvell/mv643xx_eth.c | 76 | ||||
| -rw-r--r-- | drivers/net/ethernet/marvell/mvneta.c | 46 |
2 files changed, 85 insertions, 37 deletions
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index c68ff5deba8c..b151a949f352 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
| @@ -185,6 +185,13 @@ static char mv643xx_eth_driver_version[] = "1.4"; | |||
| 185 | 185 | ||
| 186 | #define TSO_HEADER_SIZE 128 | 186 | #define TSO_HEADER_SIZE 128 |
| 187 | 187 | ||
| 188 | /* Max number of allowed TCP segments for software TSO */ | ||
| 189 | #define MV643XX_MAX_TSO_SEGS 100 | ||
| 190 | #define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) | ||
| 191 | |||
| 192 | #define IS_TSO_HEADER(txq, addr) \ | ||
| 193 | ((addr >= txq->tso_hdrs_dma) && \ | ||
| 194 | (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) | ||
| 188 | /* | 195 | /* |
| 189 | * RX/TX descriptors. | 196 | * RX/TX descriptors. |
| 190 | */ | 197 | */ |
| @@ -348,6 +355,9 @@ struct tx_queue { | |||
| 348 | int tx_curr_desc; | 355 | int tx_curr_desc; |
| 349 | int tx_used_desc; | 356 | int tx_used_desc; |
| 350 | 357 | ||
| 358 | int tx_stop_threshold; | ||
| 359 | int tx_wake_threshold; | ||
| 360 | |||
| 351 | char *tso_hdrs; | 361 | char *tso_hdrs; |
| 352 | dma_addr_t tso_hdrs_dma; | 362 | dma_addr_t tso_hdrs_dma; |
| 353 | 363 | ||
| @@ -497,7 +507,7 @@ static void txq_maybe_wake(struct tx_queue *txq) | |||
| 497 | 507 | ||
| 498 | if (netif_tx_queue_stopped(nq)) { | 508 | if (netif_tx_queue_stopped(nq)) { |
| 499 | __netif_tx_lock(nq, smp_processor_id()); | 509 | __netif_tx_lock(nq, smp_processor_id()); |
| 500 | if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) | 510 | if (txq->tx_desc_count <= txq->tx_wake_threshold) |
| 501 | netif_tx_wake_queue(nq); | 511 | netif_tx_wake_queue(nq); |
| 502 | __netif_tx_unlock(nq); | 512 | __netif_tx_unlock(nq); |
| 503 | } | 513 | } |
| @@ -897,7 +907,8 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
| 897 | } | 907 | } |
| 898 | } | 908 | } |
| 899 | 909 | ||
| 900 | static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) | 910 | static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, |
| 911 | struct net_device *dev) | ||
| 901 | { | 912 | { |
| 902 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | 913 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
| 903 | int nr_frags = skb_shinfo(skb)->nr_frags; | 914 | int nr_frags = skb_shinfo(skb)->nr_frags; |
| @@ -910,11 +921,15 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
| 910 | cmd_sts = 0; | 921 | cmd_sts = 0; |
| 911 | l4i_chk = 0; | 922 | l4i_chk = 0; |
| 912 | 923 | ||
| 924 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { | ||
| 925 | if (net_ratelimit()) | ||
| 926 | netdev_err(dev, "tx queue full?!\n"); | ||
| 927 | return -EBUSY; | ||
| 928 | } | ||
| 929 | |||
| 913 | ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); | 930 | ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); |
| 914 | if (ret) { | 931 | if (ret) |
| 915 | dev_kfree_skb_any(skb); | ||
| 916 | return ret; | 932 | return ret; |
| 917 | } | ||
| 918 | cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; | 933 | cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; |
| 919 | 934 | ||
| 920 | tx_index = txq->tx_curr_desc++; | 935 | tx_index = txq->tx_curr_desc++; |
| @@ -967,36 +982,26 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 967 | nq = netdev_get_tx_queue(dev, queue); | 982 | nq = netdev_get_tx_queue(dev, queue); |
| 968 | 983 | ||
| 969 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { | 984 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { |
| 970 | txq->tx_dropped++; | ||
| 971 | netdev_printk(KERN_DEBUG, dev, | 985 | netdev_printk(KERN_DEBUG, dev, |
| 972 | "failed to linearize skb with tiny unaligned fragment\n"); | 986 | "failed to linearize skb with tiny unaligned fragment\n"); |
| 973 | return NETDEV_TX_BUSY; | 987 | return NETDEV_TX_BUSY; |
| 974 | } | 988 | } |
| 975 | 989 | ||
| 976 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { | ||
| 977 | if (net_ratelimit()) | ||
| 978 | netdev_err(dev, "tx queue full?!\n"); | ||
| 979 | dev_kfree_skb_any(skb); | ||
| 980 | return NETDEV_TX_OK; | ||
| 981 | } | ||
| 982 | |||
| 983 | length = skb->len; | 990 | length = skb->len; |
| 984 | 991 | ||
| 985 | if (skb_is_gso(skb)) | 992 | if (skb_is_gso(skb)) |
| 986 | ret = txq_submit_tso(txq, skb, dev); | 993 | ret = txq_submit_tso(txq, skb, dev); |
| 987 | else | 994 | else |
| 988 | ret = txq_submit_skb(txq, skb); | 995 | ret = txq_submit_skb(txq, skb, dev); |
| 989 | if (!ret) { | 996 | if (!ret) { |
| 990 | int entries_left; | ||
| 991 | |||
| 992 | txq->tx_bytes += length; | 997 | txq->tx_bytes += length; |
| 993 | txq->tx_packets++; | 998 | txq->tx_packets++; |
| 994 | 999 | ||
| 995 | entries_left = txq->tx_ring_size - txq->tx_desc_count; | 1000 | if (txq->tx_desc_count >= txq->tx_stop_threshold) |
| 996 | if (entries_left < MAX_SKB_FRAGS + 1) | ||
| 997 | netif_tx_stop_queue(nq); | 1001 | netif_tx_stop_queue(nq); |
| 998 | } else if (ret == -EBUSY) { | 1002 | } else { |
| 999 | return NETDEV_TX_BUSY; | 1003 | txq->tx_dropped++; |
| 1004 | dev_kfree_skb_any(skb); | ||
| 1000 | } | 1005 | } |
| 1001 | 1006 | ||
| 1002 | return NETDEV_TX_OK; | 1007 | return NETDEV_TX_OK; |
| @@ -1070,8 +1075,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
| 1070 | mp->dev->stats.tx_errors++; | 1075 | mp->dev->stats.tx_errors++; |
| 1071 | } | 1076 | } |
| 1072 | 1077 | ||
| 1073 | dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, | 1078 | if (!IS_TSO_HEADER(txq, desc->buf_ptr)) |
| 1074 | desc->byte_cnt, DMA_TO_DEVICE); | 1079 | dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, |
| 1080 | desc->byte_cnt, DMA_TO_DEVICE); | ||
| 1075 | dev_kfree_skb(skb); | 1081 | dev_kfree_skb(skb); |
| 1076 | } | 1082 | } |
| 1077 | 1083 | ||
| @@ -1614,7 +1620,11 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) | |||
| 1614 | return -EINVAL; | 1620 | return -EINVAL; |
| 1615 | 1621 | ||
| 1616 | mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; | 1622 | mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; |
| 1617 | mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; | 1623 | mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, |
| 1624 | MV643XX_MAX_SKB_DESCS * 2, 4096); | ||
| 1625 | if (mp->tx_ring_size != er->tx_pending) | ||
| 1626 | netdev_warn(dev, "TX queue size set to %u (requested %u)\n", | ||
| 1627 | mp->tx_ring_size, er->tx_pending); | ||
| 1618 | 1628 | ||
| 1619 | if (netif_running(dev)) { | 1629 | if (netif_running(dev)) { |
| 1620 | mv643xx_eth_stop(dev); | 1630 | mv643xx_eth_stop(dev); |
| @@ -1990,6 +2000,13 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
| 1990 | 2000 | ||
| 1991 | txq->tx_ring_size = mp->tx_ring_size; | 2001 | txq->tx_ring_size = mp->tx_ring_size; |
| 1992 | 2002 | ||
| 2003 | /* A queue must always have room for at least one skb. | ||
| 2004 | * Therefore, stop the queue when the free entries reaches | ||
| 2005 | * the maximum number of descriptors per skb. | ||
| 2006 | */ | ||
| 2007 | txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS; | ||
| 2008 | txq->tx_wake_threshold = txq->tx_stop_threshold / 2; | ||
| 2009 | |||
| 1993 | txq->tx_desc_count = 0; | 2010 | txq->tx_desc_count = 0; |
| 1994 | txq->tx_curr_desc = 0; | 2011 | txq->tx_curr_desc = 0; |
| 1995 | txq->tx_used_desc = 0; | 2012 | txq->tx_used_desc = 0; |
| @@ -2849,6 +2866,7 @@ static void set_params(struct mv643xx_eth_private *mp, | |||
| 2849 | struct mv643xx_eth_platform_data *pd) | 2866 | struct mv643xx_eth_platform_data *pd) |
| 2850 | { | 2867 | { |
| 2851 | struct net_device *dev = mp->dev; | 2868 | struct net_device *dev = mp->dev; |
| 2869 | unsigned int tx_ring_size; | ||
| 2852 | 2870 | ||
| 2853 | if (is_valid_ether_addr(pd->mac_addr)) | 2871 | if (is_valid_ether_addr(pd->mac_addr)) |
| 2854 | memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); | 2872 | memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); |
| @@ -2863,9 +2881,16 @@ static void set_params(struct mv643xx_eth_private *mp, | |||
| 2863 | 2881 | ||
| 2864 | mp->rxq_count = pd->rx_queue_count ? : 1; | 2882 | mp->rxq_count = pd->rx_queue_count ? : 1; |
| 2865 | 2883 | ||
| 2866 | mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; | 2884 | tx_ring_size = DEFAULT_TX_QUEUE_SIZE; |
| 2867 | if (pd->tx_queue_size) | 2885 | if (pd->tx_queue_size) |
| 2868 | mp->tx_ring_size = pd->tx_queue_size; | 2886 | tx_ring_size = pd->tx_queue_size; |
| 2887 | |||
| 2888 | mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size, | ||
| 2889 | MV643XX_MAX_SKB_DESCS * 2, 4096); | ||
| 2890 | if (mp->tx_ring_size != tx_ring_size) | ||
| 2891 | netdev_warn(dev, "TX queue size set to %u (requested %u)\n", | ||
| 2892 | mp->tx_ring_size, tx_ring_size); | ||
| 2893 | |||
| 2869 | mp->tx_desc_sram_addr = pd->tx_sram_addr; | 2894 | mp->tx_desc_sram_addr = pd->tx_sram_addr; |
| 2870 | mp->tx_desc_sram_size = pd->tx_sram_size; | 2895 | mp->tx_desc_sram_size = pd->tx_sram_size; |
| 2871 | 2896 | ||
| @@ -3092,6 +3117,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
| 3092 | dev->hw_features = dev->features; | 3117 | dev->hw_features = dev->features; |
| 3093 | 3118 | ||
| 3094 | dev->priv_flags |= IFF_UNICAST_FLT; | 3119 | dev->priv_flags |= IFF_UNICAST_FLT; |
| 3120 | dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; | ||
| 3095 | 3121 | ||
| 3096 | SET_NETDEV_DEV(dev, &pdev->dev); | 3122 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 3097 | 3123 | ||
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b8919fa6ed27..45beca17fa50 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
| @@ -219,9 +219,6 @@ | |||
| 219 | #define MVNETA_RX_COAL_PKTS 32 | 219 | #define MVNETA_RX_COAL_PKTS 32 |
| 220 | #define MVNETA_RX_COAL_USEC 100 | 220 | #define MVNETA_RX_COAL_USEC 100 |
| 221 | 221 | ||
| 222 | /* Napi polling weight */ | ||
| 223 | #define MVNETA_RX_POLL_WEIGHT 64 | ||
| 224 | |||
| 225 | /* The two bytes Marvell header. Either contains a special value used | 222 | /* The two bytes Marvell header. Either contains a special value used |
| 226 | * by Marvell switches when a specific hardware mode is enabled (not | 223 | * by Marvell switches when a specific hardware mode is enabled (not |
| 227 | * supported by this driver) or is filled automatically by zeroes on | 224 | * supported by this driver) or is filled automatically by zeroes on |
| @@ -254,6 +251,11 @@ | |||
| 254 | /* Max number of Tx descriptors */ | 251 | /* Max number of Tx descriptors */ |
| 255 | #define MVNETA_MAX_TXD 532 | 252 | #define MVNETA_MAX_TXD 532 |
| 256 | 253 | ||
| 254 | /* Max number of allowed TCP segments for software TSO */ | ||
| 255 | #define MVNETA_MAX_TSO_SEGS 100 | ||
| 256 | |||
| 257 | #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) | ||
| 258 | |||
| 257 | /* descriptor aligned size */ | 259 | /* descriptor aligned size */ |
| 258 | #define MVNETA_DESC_ALIGNED_SIZE 32 | 260 | #define MVNETA_DESC_ALIGNED_SIZE 32 |
| 259 | 261 | ||
| @@ -262,6 +264,10 @@ | |||
| 262 | ETH_HLEN + ETH_FCS_LEN, \ | 264 | ETH_HLEN + ETH_FCS_LEN, \ |
| 263 | MVNETA_CPU_D_CACHE_LINE_SIZE) | 265 | MVNETA_CPU_D_CACHE_LINE_SIZE) |
| 264 | 266 | ||
| 267 | #define IS_TSO_HEADER(txq, addr) \ | ||
| 268 | ((addr >= txq->tso_hdrs_phys) && \ | ||
| 269 | (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) | ||
| 270 | |||
| 265 | #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) | 271 | #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) |
| 266 | 272 | ||
| 267 | struct mvneta_pcpu_stats { | 273 | struct mvneta_pcpu_stats { |
| @@ -391,6 +397,8 @@ struct mvneta_tx_queue { | |||
| 391 | * descriptor ring | 397 | * descriptor ring |
| 392 | */ | 398 | */ |
| 393 | int count; | 399 | int count; |
| 400 | int tx_stop_threshold; | ||
| 401 | int tx_wake_threshold; | ||
| 394 | 402 | ||
| 395 | /* Array of transmitted skb */ | 403 | /* Array of transmitted skb */ |
| 396 | struct sk_buff **tx_skb; | 404 | struct sk_buff **tx_skb; |
| @@ -1287,11 +1295,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, | |||
| 1287 | 1295 | ||
| 1288 | mvneta_txq_inc_get(txq); | 1296 | mvneta_txq_inc_get(txq); |
| 1289 | 1297 | ||
| 1298 | if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) | ||
| 1299 | dma_unmap_single(pp->dev->dev.parent, | ||
| 1300 | tx_desc->buf_phys_addr, | ||
| 1301 | tx_desc->data_size, DMA_TO_DEVICE); | ||
| 1290 | if (!skb) | 1302 | if (!skb) |
| 1291 | continue; | 1303 | continue; |
| 1292 | |||
| 1293 | dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, | ||
| 1294 | tx_desc->data_size, DMA_TO_DEVICE); | ||
| 1295 | dev_kfree_skb_any(skb); | 1304 | dev_kfree_skb_any(skb); |
| 1296 | } | 1305 | } |
| 1297 | } | 1306 | } |
| @@ -1312,7 +1321,7 @@ static void mvneta_txq_done(struct mvneta_port *pp, | |||
| 1312 | txq->count -= tx_done; | 1321 | txq->count -= tx_done; |
| 1313 | 1322 | ||
| 1314 | if (netif_tx_queue_stopped(nq)) { | 1323 | if (netif_tx_queue_stopped(nq)) { |
| 1315 | if (txq->size - txq->count >= MAX_SKB_FRAGS + 1) | 1324 | if (txq->count <= txq->tx_wake_threshold) |
| 1316 | netif_tx_wake_queue(nq); | 1325 | netif_tx_wake_queue(nq); |
| 1317 | } | 1326 | } |
| 1318 | } | 1327 | } |
| @@ -1639,7 +1648,7 @@ err_release: | |||
| 1639 | */ | 1648 | */ |
| 1640 | for (i = desc_count - 1; i >= 0; i--) { | 1649 | for (i = desc_count - 1; i >= 0; i--) { |
| 1641 | struct mvneta_tx_desc *tx_desc = txq->descs + i; | 1650 | struct mvneta_tx_desc *tx_desc = txq->descs + i; |
| 1642 | if (!(tx_desc->command & MVNETA_TXD_F_DESC)) | 1651 | if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) |
| 1643 | dma_unmap_single(pp->dev->dev.parent, | 1652 | dma_unmap_single(pp->dev->dev.parent, |
| 1644 | tx_desc->buf_phys_addr, | 1653 | tx_desc->buf_phys_addr, |
| 1645 | tx_desc->data_size, | 1654 | tx_desc->data_size, |
| @@ -1772,7 +1781,7 @@ out: | |||
| 1772 | txq->count += frags; | 1781 | txq->count += frags; |
| 1773 | mvneta_txq_pend_desc_add(pp, txq, frags); | 1782 | mvneta_txq_pend_desc_add(pp, txq, frags); |
| 1774 | 1783 | ||
| 1775 | if (txq->size - txq->count < MAX_SKB_FRAGS + 1) | 1784 | if (txq->count >= txq->tx_stop_threshold) |
| 1776 | netif_tx_stop_queue(nq); | 1785 | netif_tx_stop_queue(nq); |
| 1777 | 1786 | ||
| 1778 | u64_stats_update_begin(&stats->syncp); | 1787 | u64_stats_update_begin(&stats->syncp); |
| @@ -2211,6 +2220,14 @@ static int mvneta_txq_init(struct mvneta_port *pp, | |||
| 2211 | { | 2220 | { |
| 2212 | txq->size = pp->tx_ring_size; | 2221 | txq->size = pp->tx_ring_size; |
| 2213 | 2222 | ||
| 2223 | /* A queue must always have room for at least one skb. | ||
| 2224 | * Therefore, stop the queue when the free entries reaches | ||
| 2225 | * the maximum number of descriptors per skb. | ||
| 2226 | */ | ||
| 2227 | txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; | ||
| 2228 | txq->tx_wake_threshold = txq->tx_stop_threshold / 2; | ||
| 2229 | |||
| 2230 | |||
| 2214 | /* Allocate memory for TX descriptors */ | 2231 | /* Allocate memory for TX descriptors */ |
| 2215 | txq->descs = dma_alloc_coherent(pp->dev->dev.parent, | 2232 | txq->descs = dma_alloc_coherent(pp->dev->dev.parent, |
| 2216 | txq->size * MVNETA_DESC_ALIGNED_SIZE, | 2233 | txq->size * MVNETA_DESC_ALIGNED_SIZE, |
| @@ -2745,8 +2762,12 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev, | |||
| 2745 | return -EINVAL; | 2762 | return -EINVAL; |
| 2746 | pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? | 2763 | pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? |
| 2747 | ring->rx_pending : MVNETA_MAX_RXD; | 2764 | ring->rx_pending : MVNETA_MAX_RXD; |
| 2748 | pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ? | 2765 | |
| 2749 | ring->tx_pending : MVNETA_MAX_TXD; | 2766 | pp->tx_ring_size = clamp_t(u16, ring->tx_pending, |
| 2767 | MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); | ||
| 2768 | if (pp->tx_ring_size != ring->tx_pending) | ||
| 2769 | netdev_warn(dev, "TX queue size set to %u (requested %u)\n", | ||
| 2770 | pp->tx_ring_size, ring->tx_pending); | ||
| 2750 | 2771 | ||
| 2751 | if (netif_running(dev)) { | 2772 | if (netif_running(dev)) { |
| 2752 | mvneta_stop(dev); | 2773 | mvneta_stop(dev); |
| @@ -3025,12 +3046,13 @@ static int mvneta_probe(struct platform_device *pdev) | |||
| 3025 | if (dram_target_info) | 3046 | if (dram_target_info) |
| 3026 | mvneta_conf_mbus_windows(pp, dram_target_info); | 3047 | mvneta_conf_mbus_windows(pp, dram_target_info); |
| 3027 | 3048 | ||
| 3028 | netif_napi_add(dev, &pp->napi, mvneta_poll, MVNETA_RX_POLL_WEIGHT); | 3049 | netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); |
| 3029 | 3050 | ||
| 3030 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; | 3051 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; |
| 3031 | dev->hw_features |= dev->features; | 3052 | dev->hw_features |= dev->features; |
| 3032 | dev->vlan_features |= dev->features; | 3053 | dev->vlan_features |= dev->features; |
| 3033 | dev->priv_flags |= IFF_UNICAST_FLT; | 3054 | dev->priv_flags |= IFF_UNICAST_FLT; |
| 3055 | dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; | ||
| 3034 | 3056 | ||
| 3035 | err = register_netdev(dev); | 3057 | err = register_netdev(dev); |
| 3036 | if (err < 0) { | 3058 | if (err < 0) { |
