diff options
author | Eric Dumazet <edumazet@google.com> | 2013-10-17 19:27:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-10-18 00:08:51 -0400 |
commit | 400dfd3ae899849b27d398ca7894e1b44430887f (patch) | |
tree | 5478cafc2fd175ea7f2307e4eda1f6a9a8618733 /net/core/sock.c | |
parent | baf785bacc9f840104fc2dfedd2b72b5cbb4e103 (diff) |
net: refactor sk_page_frag_refill()
While working on virtio_net new allocation strategy to increase
payload/truesize ratio, we found that refactoring sk_page_frag_refill()
was needed.
This patch splits sk_page_frag_refill() into two parts, adding
skb_page_frag_refill() which can be used without a socket.
While we are at it, add a minimum frag size of 32 for
sk_page_frag_refill()
Michael will either use netdev_alloc_frag() from softirq context,
or skb_page_frag_refill() from process context in refill_work()
(GFP_KERNEL allocations)
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Michael Dalton <mwdalton@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/sock.c')
-rw-r--r-- | net/core/sock.c | 27 |
1 files changed, 23 insertions, 4 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index fd6afa267475..440afdca1e8f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1847,7 +1847,17 @@ EXPORT_SYMBOL(sock_alloc_send_skb); | |||
1847 | /* On 32bit arches, an skb frag is limited to 2^15 */ | 1847 | /* On 32bit arches, an skb frag is limited to 2^15 */ |
1848 | #define SKB_FRAG_PAGE_ORDER get_order(32768) | 1848 | #define SKB_FRAG_PAGE_ORDER get_order(32768) |
1849 | 1849 | ||
1850 | bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | 1850 | /** |
1851 | * skb_page_frag_refill - check that a page_frag contains enough room | ||
1852 | * @sz: minimum size of the fragment we want to get | ||
1853 | * @pfrag: pointer to page_frag | ||
1854 | * @prio: priority for memory allocation | ||
1855 | * | ||
1856 | * Note: While this allocator tries to use high order pages, there is | ||
1857 | * no guarantee that allocations succeed. Therefore, @sz MUST be | ||
1858 | * less or equal than PAGE_SIZE. | ||
1859 | */ | ||
1860 | bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio) | ||
1851 | { | 1861 | { |
1852 | int order; | 1862 | int order; |
1853 | 1863 | ||
@@ -1856,16 +1866,16 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | |||
1856 | pfrag->offset = 0; | 1866 | pfrag->offset = 0; |
1857 | return true; | 1867 | return true; |
1858 | } | 1868 | } |
1859 | if (pfrag->offset < pfrag->size) | 1869 | if (pfrag->offset + sz <= pfrag->size) |
1860 | return true; | 1870 | return true; |
1861 | put_page(pfrag->page); | 1871 | put_page(pfrag->page); |
1862 | } | 1872 | } |
1863 | 1873 | ||
1864 | /* We restrict high order allocations to users that can afford to wait */ | 1874 | /* We restrict high order allocations to users that can afford to wait */ |
1865 | order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0; | 1875 | order = (prio & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0; |
1866 | 1876 | ||
1867 | do { | 1877 | do { |
1868 | gfp_t gfp = sk->sk_allocation; | 1878 | gfp_t gfp = prio; |
1869 | 1879 | ||
1870 | if (order) | 1880 | if (order) |
1871 | gfp |= __GFP_COMP | __GFP_NOWARN; | 1881 | gfp |= __GFP_COMP | __GFP_NOWARN; |
@@ -1877,6 +1887,15 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | |||
1877 | } | 1887 | } |
1878 | } while (--order >= 0); | 1888 | } while (--order >= 0); |
1879 | 1889 | ||
1890 | return false; | ||
1891 | } | ||
1892 | EXPORT_SYMBOL(skb_page_frag_refill); | ||
1893 | |||
1894 | bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | ||
1895 | { | ||
1896 | if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) | ||
1897 | return true; | ||
1898 | |||
1880 | sk_enter_memory_pressure(sk); | 1899 | sk_enter_memory_pressure(sk); |
1881 | sk_stream_moderate_sndbuf(sk); | 1900 | sk_stream_moderate_sndbuf(sk); |
1882 | return false; | 1901 | return false; |