diff options
author | Eric Dumazet <edumazet@google.com> | 2014-09-17 07:49:49 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-19 16:25:23 -0400 |
commit | 2e4e44107176d552f8bb1bb76053e850e3809841 (patch) | |
tree | b5cbc8a9e0a02bd3973e918e67493b9d81ffae4a /net/core | |
parent | cb93471acc42b71fa3f2e46805020f2b323db64f (diff) |
net: add alloc_skb_with_frags() helper
Extract from sock_alloc_send_pskb() code building skb with frags,
so that we can reuse this in other contexts.
Intent is to use it from tcp_send_rcvq(), tcp_collapse(), ...
We also want to replace some skb_linearize() calls to a more reliable
strategy in pathological cases where we need to reduce number of frags.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/skbuff.c | 78 | ||||
-rw-r--r-- | net/core/sock.c | 78 |
2 files changed, 93 insertions, 63 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 29f7f0121491..06a8feb10099 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4102,3 +4102,81 @@ err_free: | |||
4102 | return NULL; | 4102 | return NULL; |
4103 | } | 4103 | } |
4104 | EXPORT_SYMBOL(skb_vlan_untag); | 4104 | EXPORT_SYMBOL(skb_vlan_untag); |
4105 | |||
4106 | /** | ||
4107 | * alloc_skb_with_frags - allocate skb with page frags | ||
4108 | * | ||
4109 | * header_len: size of linear part | ||
4110 | * data_len: needed length in frags | ||
4111 | * max_page_order: max page order desired. | ||
4112 | * errcode: pointer to error code if any | ||
4113 | * gfp_mask: allocation mask | ||
4114 | * | ||
4115 | * This can be used to allocate a paged skb, given a maximal order for frags. | ||
4116 | */ | ||
4117 | struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | ||
4118 | unsigned long data_len, | ||
4119 | int max_page_order, | ||
4120 | int *errcode, | ||
4121 | gfp_t gfp_mask) | ||
4122 | { | ||
4123 | int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
4124 | unsigned long chunk; | ||
4125 | struct sk_buff *skb; | ||
4126 | struct page *page; | ||
4127 | gfp_t gfp_head; | ||
4128 | int i; | ||
4129 | |||
4130 | *errcode = -EMSGSIZE; | ||
4131 | /* Note this test could be relaxed, if we succeed to allocate | ||
4132 | * high order pages... | ||
4133 | */ | ||
4134 | if (npages > MAX_SKB_FRAGS) | ||
4135 | return NULL; | ||
4136 | |||
4137 | gfp_head = gfp_mask; | ||
4138 | if (gfp_head & __GFP_WAIT) | ||
4139 | gfp_head |= __GFP_REPEAT; | ||
4140 | |||
4141 | *errcode = -ENOBUFS; | ||
4142 | skb = alloc_skb(header_len, gfp_head); | ||
4143 | if (!skb) | ||
4144 | return NULL; | ||
4145 | |||
4146 | skb->truesize += npages << PAGE_SHIFT; | ||
4147 | |||
4148 | for (i = 0; npages > 0; i++) { | ||
4149 | int order = max_page_order; | ||
4150 | |||
4151 | while (order) { | ||
4152 | if (npages >= 1 << order) { | ||
4153 | page = alloc_pages(gfp_mask | | ||
4154 | __GFP_COMP | | ||
4155 | __GFP_NOWARN | | ||
4156 | __GFP_NORETRY, | ||
4157 | order); | ||
4158 | if (page) | ||
4159 | goto fill_page; | ||
4160 | /* Do not retry other high order allocations */ | ||
4161 | order = 1; | ||
4162 | max_page_order = 0; | ||
4163 | } | ||
4164 | order--; | ||
4165 | } | ||
4166 | page = alloc_page(gfp_mask); | ||
4167 | if (!page) | ||
4168 | goto failure; | ||
4169 | fill_page: | ||
4170 | chunk = min_t(unsigned long, data_len, | ||
4171 | PAGE_SIZE << order); | ||
4172 | skb_fill_page_desc(skb, i, page, 0, chunk); | ||
4173 | data_len -= chunk; | ||
4174 | npages -= 1 << order; | ||
4175 | } | ||
4176 | return skb; | ||
4177 | |||
4178 | failure: | ||
4179 | kfree_skb(skb); | ||
4180 | return NULL; | ||
4181 | } | ||
4182 | EXPORT_SYMBOL(alloc_skb_with_frags); | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 6f436b5e4961..de887c45c63b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1762,21 +1762,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, | |||
1762 | unsigned long data_len, int noblock, | 1762 | unsigned long data_len, int noblock, |
1763 | int *errcode, int max_page_order) | 1763 | int *errcode, int max_page_order) |
1764 | { | 1764 | { |
1765 | struct sk_buff *skb = NULL; | 1765 | struct sk_buff *skb; |
1766 | unsigned long chunk; | ||
1767 | gfp_t gfp_mask; | ||
1768 | long timeo; | 1766 | long timeo; |
1769 | int err; | 1767 | int err; |
1770 | int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
1771 | struct page *page; | ||
1772 | int i; | ||
1773 | |||
1774 | err = -EMSGSIZE; | ||
1775 | if (npages > MAX_SKB_FRAGS) | ||
1776 | goto failure; | ||
1777 | 1768 | ||
1778 | timeo = sock_sndtimeo(sk, noblock); | 1769 | timeo = sock_sndtimeo(sk, noblock); |
1779 | while (!skb) { | 1770 | for (;;) { |
1780 | err = sock_error(sk); | 1771 | err = sock_error(sk); |
1781 | if (err != 0) | 1772 | if (err != 0) |
1782 | goto failure; | 1773 | goto failure; |
@@ -1785,66 +1776,27 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, | |||
1785 | if (sk->sk_shutdown & SEND_SHUTDOWN) | 1776 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
1786 | goto failure; | 1777 | goto failure; |
1787 | 1778 | ||
1788 | if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) { | 1779 | if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) |
1789 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 1780 | break; |
1790 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | ||
1791 | err = -EAGAIN; | ||
1792 | if (!timeo) | ||
1793 | goto failure; | ||
1794 | if (signal_pending(current)) | ||
1795 | goto interrupted; | ||
1796 | timeo = sock_wait_for_wmem(sk, timeo); | ||
1797 | continue; | ||
1798 | } | ||
1799 | |||
1800 | err = -ENOBUFS; | ||
1801 | gfp_mask = sk->sk_allocation; | ||
1802 | if (gfp_mask & __GFP_WAIT) | ||
1803 | gfp_mask |= __GFP_REPEAT; | ||
1804 | 1781 | ||
1805 | skb = alloc_skb(header_len, gfp_mask); | 1782 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
1806 | if (!skb) | 1783 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1784 | err = -EAGAIN; | ||
1785 | if (!timeo) | ||
1807 | goto failure; | 1786 | goto failure; |
1808 | 1787 | if (signal_pending(current)) | |
1809 | skb->truesize += data_len; | 1788 | goto interrupted; |
1810 | 1789 | timeo = sock_wait_for_wmem(sk, timeo); | |
1811 | for (i = 0; npages > 0; i++) { | ||
1812 | int order = max_page_order; | ||
1813 | |||
1814 | while (order) { | ||
1815 | if (npages >= 1 << order) { | ||
1816 | page = alloc_pages(sk->sk_allocation | | ||
1817 | __GFP_COMP | | ||
1818 | __GFP_NOWARN | | ||
1819 | __GFP_NORETRY, | ||
1820 | order); | ||
1821 | if (page) | ||
1822 | goto fill_page; | ||
1823 | /* Do not retry other high order allocations */ | ||
1824 | order = 1; | ||
1825 | max_page_order = 0; | ||
1826 | } | ||
1827 | order--; | ||
1828 | } | ||
1829 | page = alloc_page(sk->sk_allocation); | ||
1830 | if (!page) | ||
1831 | goto failure; | ||
1832 | fill_page: | ||
1833 | chunk = min_t(unsigned long, data_len, | ||
1834 | PAGE_SIZE << order); | ||
1835 | skb_fill_page_desc(skb, i, page, 0, chunk); | ||
1836 | data_len -= chunk; | ||
1837 | npages -= 1 << order; | ||
1838 | } | ||
1839 | } | 1790 | } |
1840 | 1791 | skb = alloc_skb_with_frags(header_len, data_len, max_page_order, | |
1841 | skb_set_owner_w(skb, sk); | 1792 | errcode, sk->sk_allocation); |
1793 | if (skb) | ||
1794 | skb_set_owner_w(skb, sk); | ||
1842 | return skb; | 1795 | return skb; |
1843 | 1796 | ||
1844 | interrupted: | 1797 | interrupted: |
1845 | err = sock_intr_errno(timeo); | 1798 | err = sock_intr_errno(timeo); |
1846 | failure: | 1799 | failure: |
1847 | kfree_skb(skb); | ||
1848 | *errcode = err; | 1800 | *errcode = err; |
1849 | return NULL; | 1801 | return NULL; |
1850 | } | 1802 | } |