diff options
author | Eric Dumazet <edumazet@google.com> | 2015-04-22 10:33:36 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-22 16:24:59 -0400 |
commit | 79930f5892e134c6da1254389577fffb8bd72c66 (patch) | |
tree | 6a6b4cf89527383ae541fdd797d90dfc2e07c69f /net/core | |
parent | 26349c71b4323e62f8de0fe45f2f06c4df535b9b (diff) |
net: do not deplete pfmemalloc reserve
build_skb() should look at the page pfmemalloc status.
If set, this means page allocator allocated this page in the
expectation it would help to free other pages. Networking
stack can do that only if skb->pfmemalloc is also set.
Also, we must refrain using high order pages from the pfmemalloc
reserve, so __page_frag_refill() must also use __GFP_NOMEMALLOC for
them. Under memory pressure, using order-0 pages is probably the best
strategy.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/skbuff.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d1967dab9cc6..456ead534e10 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -311,7 +311,11 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) | |||
311 | 311 | ||
312 | memset(skb, 0, offsetof(struct sk_buff, tail)); | 312 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
313 | skb->truesize = SKB_TRUESIZE(size); | 313 | skb->truesize = SKB_TRUESIZE(size); |
314 | skb->head_frag = frag_size != 0; | 314 | if (frag_size) { |
315 | skb->head_frag = 1; | ||
316 | if (virt_to_head_page(data)->pfmemalloc) | ||
317 | skb->pfmemalloc = 1; | ||
318 | } | ||
315 | atomic_set(&skb->users, 1); | 319 | atomic_set(&skb->users, 1); |
316 | skb->head = data; | 320 | skb->head = data; |
317 | skb->data = data; | 321 | skb->data = data; |
@@ -348,7 +352,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc, | |||
348 | gfp_t gfp = gfp_mask; | 352 | gfp_t gfp = gfp_mask; |
349 | 353 | ||
350 | if (order) { | 354 | if (order) { |
351 | gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; | 355 | gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | |
356 | __GFP_NOMEMALLOC; | ||
352 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); | 357 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); |
353 | nc->frag.size = PAGE_SIZE << (page ? order : 0); | 358 | nc->frag.size = PAGE_SIZE << (page ? order : 0); |
354 | } | 359 | } |