diff options
Diffstat (limited to 'net/core/sock.c')
-rw-r--r-- | net/core/sock.c | 45 |
1 files changed, 35 insertions, 10 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index 0b39e7ae4383..ab20ed9b0f31 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -475,12 +475,6 @@ discard_and_relse: | |||
475 | } | 475 | } |
476 | EXPORT_SYMBOL(sk_receive_skb); | 476 | EXPORT_SYMBOL(sk_receive_skb); |
477 | 477 | ||
478 | void sk_reset_txq(struct sock *sk) | ||
479 | { | ||
480 | sk_tx_queue_clear(sk); | ||
481 | } | ||
482 | EXPORT_SYMBOL(sk_reset_txq); | ||
483 | |||
484 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) | 478 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) |
485 | { | 479 | { |
486 | struct dst_entry *dst = __sk_dst_get(sk); | 480 | struct dst_entry *dst = __sk_dst_get(sk); |
@@ -914,6 +908,13 @@ set_rcvbuf: | |||
914 | } | 908 | } |
915 | break; | 909 | break; |
916 | #endif | 910 | #endif |
911 | |||
912 | case SO_MAX_PACING_RATE: | ||
913 | sk->sk_max_pacing_rate = val; | ||
914 | sk->sk_pacing_rate = min(sk->sk_pacing_rate, | ||
915 | sk->sk_max_pacing_rate); | ||
916 | break; | ||
917 | |||
917 | default: | 918 | default: |
918 | ret = -ENOPROTOOPT; | 919 | ret = -ENOPROTOOPT; |
919 | break; | 920 | break; |
@@ -1177,6 +1178,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
1177 | break; | 1178 | break; |
1178 | #endif | 1179 | #endif |
1179 | 1180 | ||
1181 | case SO_MAX_PACING_RATE: | ||
1182 | v.val = sk->sk_max_pacing_rate; | ||
1183 | break; | ||
1184 | |||
1180 | default: | 1185 | default: |
1181 | return -ENOPROTOOPT; | 1186 | return -ENOPROTOOPT; |
1182 | } | 1187 | } |
@@ -1836,7 +1841,17 @@ EXPORT_SYMBOL(sock_alloc_send_skb); | |||
1836 | /* On 32bit arches, an skb frag is limited to 2^15 */ | 1841 | /* On 32bit arches, an skb frag is limited to 2^15 */ |
1837 | #define SKB_FRAG_PAGE_ORDER get_order(32768) | 1842 | #define SKB_FRAG_PAGE_ORDER get_order(32768) |
1838 | 1843 | ||
1839 | bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | 1844 | /** |
1845 | * skb_page_frag_refill - check that a page_frag contains enough room | ||
1846 | * @sz: minimum size of the fragment we want to get | ||
1847 | * @pfrag: pointer to page_frag | ||
1848 | * @prio: priority for memory allocation | ||
1849 | * | ||
1850 | * Note: While this allocator tries to use high order pages, there is | ||
1851 | * no guarantee that allocations succeed. Therefore, @sz MUST be | ||
1852 | * less or equal than PAGE_SIZE. | ||
1853 | */ | ||
1854 | bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio) | ||
1840 | { | 1855 | { |
1841 | int order; | 1856 | int order; |
1842 | 1857 | ||
@@ -1845,16 +1860,16 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | |||
1845 | pfrag->offset = 0; | 1860 | pfrag->offset = 0; |
1846 | return true; | 1861 | return true; |
1847 | } | 1862 | } |
1848 | if (pfrag->offset < pfrag->size) | 1863 | if (pfrag->offset + sz <= pfrag->size) |
1849 | return true; | 1864 | return true; |
1850 | put_page(pfrag->page); | 1865 | put_page(pfrag->page); |
1851 | } | 1866 | } |
1852 | 1867 | ||
1853 | /* We restrict high order allocations to users that can afford to wait */ | 1868 | /* We restrict high order allocations to users that can afford to wait */ |
1854 | order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0; | 1869 | order = (prio & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0; |
1855 | 1870 | ||
1856 | do { | 1871 | do { |
1857 | gfp_t gfp = sk->sk_allocation; | 1872 | gfp_t gfp = prio; |
1858 | 1873 | ||
1859 | if (order) | 1874 | if (order) |
1860 | gfp |= __GFP_COMP | __GFP_NOWARN; | 1875 | gfp |= __GFP_COMP | __GFP_NOWARN; |
@@ -1866,6 +1881,15 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | |||
1866 | } | 1881 | } |
1867 | } while (--order >= 0); | 1882 | } while (--order >= 0); |
1868 | 1883 | ||
1884 | return false; | ||
1885 | } | ||
1886 | EXPORT_SYMBOL(skb_page_frag_refill); | ||
1887 | |||
1888 | bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | ||
1889 | { | ||
1890 | if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) | ||
1891 | return true; | ||
1892 | |||
1869 | sk_enter_memory_pressure(sk); | 1893 | sk_enter_memory_pressure(sk); |
1870 | sk_stream_moderate_sndbuf(sk); | 1894 | sk_stream_moderate_sndbuf(sk); |
1871 | return false; | 1895 | return false; |
@@ -2319,6 +2343,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
2319 | sk->sk_ll_usec = sysctl_net_busy_read; | 2343 | sk->sk_ll_usec = sysctl_net_busy_read; |
2320 | #endif | 2344 | #endif |
2321 | 2345 | ||
2346 | sk->sk_max_pacing_rate = ~0U; | ||
2322 | sk->sk_pacing_rate = ~0U; | 2347 | sk->sk_pacing_rate = ~0U; |
2323 | /* | 2348 | /* |
2324 | * Before updating sk_refcnt, we must commit prior changes to memory | 2349 | * Before updating sk_refcnt, we must commit prior changes to memory |