aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorGiuseppe CAVALLARO <peppe.cavallaro@st.com>2011-10-17 21:39:55 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-19 19:24:18 -0400
commit45db81e1590c82ddc735ccd33f8adab02528b3e3 (patch)
treec3438feab991a27b0d1a696c8118d5d28f414e03 /drivers/net/ethernet
parent286a837217204b1ef105e3a554d0757e4fdfaac1 (diff)
stmmac: limit max_mtu in case of 4KiB and use __netdev_alloc_skb (V2)
Problem using big mtu around 4096 bytes is you end allocating (4096 +NET_SKB_PAD + NET_IP_ALIGN + sizeof(struct skb_shared_info) bytes -> 8192 bytes : order-1 pages It's better to limit the mtu to SKB_MAX_HEAD(NET_SKB_PAD), to have no more than one page per skb. Also the patch changes the netdev_alloc_skb_ip_align() done in init_dma_desc_rings() and uses a variant allowing GFP_KERNEL allocations allowing the driver to load even in case of memory pressure. Reported-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5eccd996cde..aeaa15b451d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -474,11 +474,13 @@ static void init_dma_desc_rings(struct net_device *dev)
474 for (i = 0; i < rxsize; i++) { 474 for (i = 0; i < rxsize; i++) {
475 struct dma_desc *p = priv->dma_rx + i; 475 struct dma_desc *p = priv->dma_rx + i;
476 476
477 skb = netdev_alloc_skb_ip_align(dev, bfsize); 477 skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN,
478 GFP_KERNEL);
478 if (unlikely(skb == NULL)) { 479 if (unlikely(skb == NULL)) {
479 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 480 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
480 break; 481 break;
481 } 482 }
483 skb_reserve(skb, NET_IP_ALIGN);
482 priv->rx_skbuff[i] = skb; 484 priv->rx_skbuff[i] = skb;
483 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 485 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
484 bfsize, DMA_FROM_DEVICE); 486 bfsize, DMA_FROM_DEVICE);
@@ -1401,7 +1403,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1401 if (priv->plat->enh_desc) 1403 if (priv->plat->enh_desc)
1402 max_mtu = JUMBO_LEN; 1404 max_mtu = JUMBO_LEN;
1403 else 1405 else
1404 max_mtu = BUF_SIZE_4KiB; 1406 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
1405 1407
1406 if ((new_mtu < 46) || (new_mtu > max_mtu)) { 1408 if ((new_mtu < 46) || (new_mtu > max_mtu)) {
1407 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); 1409 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);