diff options
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 65 |
1 files changed, 41 insertions, 24 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b0a26bb25e2e..5901010fad55 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -536,8 +536,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb) | |||
536 | tp->nonagle &= ~TCP_NAGLE_PUSH; | 536 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
537 | } | 537 | } |
538 | 538 | ||
539 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, | 539 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) |
540 | struct sk_buff *skb) | ||
541 | { | 540 | { |
542 | if (flags & MSG_OOB) | 541 | if (flags & MSG_OOB) |
543 | tp->snd_up = tp->write_seq; | 542 | tp->snd_up = tp->write_seq; |
@@ -546,13 +545,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, | |||
546 | static inline void tcp_push(struct sock *sk, int flags, int mss_now, | 545 | static inline void tcp_push(struct sock *sk, int flags, int mss_now, |
547 | int nonagle) | 546 | int nonagle) |
548 | { | 547 | { |
549 | struct tcp_sock *tp = tcp_sk(sk); | ||
550 | |||
551 | if (tcp_send_head(sk)) { | 548 | if (tcp_send_head(sk)) { |
552 | struct sk_buff *skb = tcp_write_queue_tail(sk); | 549 | struct tcp_sock *tp = tcp_sk(sk); |
550 | |||
553 | if (!(flags & MSG_MORE) || forced_push(tp)) | 551 | if (!(flags & MSG_MORE) || forced_push(tp)) |
554 | tcp_mark_push(tp, skb); | 552 | tcp_mark_push(tp, tcp_write_queue_tail(sk)); |
555 | tcp_mark_urg(tp, flags, skb); | 553 | |
554 | tcp_mark_urg(tp, flags); | ||
556 | __tcp_push_pending_frames(sk, mss_now, | 555 | __tcp_push_pending_frames(sk, mss_now, |
557 | (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); | 556 | (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); |
558 | } | 557 | } |
@@ -877,12 +876,12 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, | |||
877 | #define TCP_PAGE(sk) (sk->sk_sndmsg_page) | 876 | #define TCP_PAGE(sk) (sk->sk_sndmsg_page) |
878 | #define TCP_OFF(sk) (sk->sk_sndmsg_off) | 877 | #define TCP_OFF(sk) (sk->sk_sndmsg_off) |
879 | 878 | ||
880 | static inline int select_size(struct sock *sk) | 879 | static inline int select_size(struct sock *sk, int sg) |
881 | { | 880 | { |
882 | struct tcp_sock *tp = tcp_sk(sk); | 881 | struct tcp_sock *tp = tcp_sk(sk); |
883 | int tmp = tp->mss_cache; | 882 | int tmp = tp->mss_cache; |
884 | 883 | ||
885 | if (sk->sk_route_caps & NETIF_F_SG) { | 884 | if (sg) { |
886 | if (sk_can_gso(sk)) | 885 | if (sk_can_gso(sk)) |
887 | tmp = 0; | 886 | tmp = 0; |
888 | else { | 887 | else { |
@@ -906,7 +905,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
906 | struct sk_buff *skb; | 905 | struct sk_buff *skb; |
907 | int iovlen, flags; | 906 | int iovlen, flags; |
908 | int mss_now, size_goal; | 907 | int mss_now, size_goal; |
909 | int err, copied; | 908 | int sg, err, copied; |
910 | long timeo; | 909 | long timeo; |
911 | 910 | ||
912 | lock_sock(sk); | 911 | lock_sock(sk); |
@@ -934,6 +933,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
934 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 933 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
935 | goto out_err; | 934 | goto out_err; |
936 | 935 | ||
936 | sg = sk->sk_route_caps & NETIF_F_SG; | ||
937 | |||
937 | while (--iovlen >= 0) { | 938 | while (--iovlen >= 0) { |
938 | int seglen = iov->iov_len; | 939 | int seglen = iov->iov_len; |
939 | unsigned char __user *from = iov->iov_base; | 940 | unsigned char __user *from = iov->iov_base; |
@@ -959,8 +960,9 @@ new_segment: | |||
959 | if (!sk_stream_memory_free(sk)) | 960 | if (!sk_stream_memory_free(sk)) |
960 | goto wait_for_sndbuf; | 961 | goto wait_for_sndbuf; |
961 | 962 | ||
962 | skb = sk_stream_alloc_skb(sk, select_size(sk), | 963 | skb = sk_stream_alloc_skb(sk, |
963 | sk->sk_allocation); | 964 | select_size(sk, sg), |
965 | sk->sk_allocation); | ||
964 | if (!skb) | 966 | if (!skb) |
965 | goto wait_for_memory; | 967 | goto wait_for_memory; |
966 | 968 | ||
@@ -997,9 +999,7 @@ new_segment: | |||
997 | /* We can extend the last page | 999 | /* We can extend the last page |
998 | * fragment. */ | 1000 | * fragment. */ |
999 | merge = 1; | 1001 | merge = 1; |
1000 | } else if (i == MAX_SKB_FRAGS || | 1002 | } else if (i == MAX_SKB_FRAGS || !sg) { |
1001 | (!i && | ||
1002 | !(sk->sk_route_caps & NETIF_F_SG))) { | ||
1003 | /* Need to add new fragment and cannot | 1003 | /* Need to add new fragment and cannot |
1004 | * do this because interface is non-SG, | 1004 | * do this because interface is non-SG, |
1005 | * or because all the page slots are | 1005 | * or because all the page slots are |
@@ -2229,6 +2229,20 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
2229 | } | 2229 | } |
2230 | break; | 2230 | break; |
2231 | 2231 | ||
2232 | case TCP_THIN_LINEAR_TIMEOUTS: | ||
2233 | if (val < 0 || val > 1) | ||
2234 | err = -EINVAL; | ||
2235 | else | ||
2236 | tp->thin_lto = val; | ||
2237 | break; | ||
2238 | |||
2239 | case TCP_THIN_DUPACK: | ||
2240 | if (val < 0 || val > 1) | ||
2241 | err = -EINVAL; | ||
2242 | else | ||
2243 | tp->thin_dupack = val; | ||
2244 | break; | ||
2245 | |||
2232 | case TCP_CORK: | 2246 | case TCP_CORK: |
2233 | /* When set indicates to always queue non-full frames. | 2247 | /* When set indicates to always queue non-full frames. |
2234 | * Later the user clears this option and we transmit | 2248 | * Later the user clears this option and we transmit |
@@ -2788,10 +2802,10 @@ EXPORT_SYMBOL(tcp_gro_complete); | |||
2788 | 2802 | ||
2789 | #ifdef CONFIG_TCP_MD5SIG | 2803 | #ifdef CONFIG_TCP_MD5SIG |
2790 | static unsigned long tcp_md5sig_users; | 2804 | static unsigned long tcp_md5sig_users; |
2791 | static struct tcp_md5sig_pool **tcp_md5sig_pool; | 2805 | static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool; |
2792 | static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); | 2806 | static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); |
2793 | 2807 | ||
2794 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) | 2808 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) |
2795 | { | 2809 | { |
2796 | int cpu; | 2810 | int cpu; |
2797 | for_each_possible_cpu(cpu) { | 2811 | for_each_possible_cpu(cpu) { |
@@ -2808,7 +2822,7 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) | |||
2808 | 2822 | ||
2809 | void tcp_free_md5sig_pool(void) | 2823 | void tcp_free_md5sig_pool(void) |
2810 | { | 2824 | { |
2811 | struct tcp_md5sig_pool **pool = NULL; | 2825 | struct tcp_md5sig_pool * __percpu *pool = NULL; |
2812 | 2826 | ||
2813 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2827 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2814 | if (--tcp_md5sig_users == 0) { | 2828 | if (--tcp_md5sig_users == 0) { |
@@ -2822,10 +2836,11 @@ void tcp_free_md5sig_pool(void) | |||
2822 | 2836 | ||
2823 | EXPORT_SYMBOL(tcp_free_md5sig_pool); | 2837 | EXPORT_SYMBOL(tcp_free_md5sig_pool); |
2824 | 2838 | ||
2825 | static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk) | 2839 | static struct tcp_md5sig_pool * __percpu * |
2840 | __tcp_alloc_md5sig_pool(struct sock *sk) | ||
2826 | { | 2841 | { |
2827 | int cpu; | 2842 | int cpu; |
2828 | struct tcp_md5sig_pool **pool; | 2843 | struct tcp_md5sig_pool * __percpu *pool; |
2829 | 2844 | ||
2830 | pool = alloc_percpu(struct tcp_md5sig_pool *); | 2845 | pool = alloc_percpu(struct tcp_md5sig_pool *); |
2831 | if (!pool) | 2846 | if (!pool) |
@@ -2852,9 +2867,9 @@ out_free: | |||
2852 | return NULL; | 2867 | return NULL; |
2853 | } | 2868 | } |
2854 | 2869 | ||
2855 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk) | 2870 | struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) |
2856 | { | 2871 | { |
2857 | struct tcp_md5sig_pool **pool; | 2872 | struct tcp_md5sig_pool * __percpu *pool; |
2858 | int alloc = 0; | 2873 | int alloc = 0; |
2859 | 2874 | ||
2860 | retry: | 2875 | retry: |
@@ -2873,7 +2888,9 @@ retry: | |||
2873 | 2888 | ||
2874 | if (alloc) { | 2889 | if (alloc) { |
2875 | /* we cannot hold spinlock here because this may sleep. */ | 2890 | /* we cannot hold spinlock here because this may sleep. */ |
2876 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk); | 2891 | struct tcp_md5sig_pool * __percpu *p; |
2892 | |||
2893 | p = __tcp_alloc_md5sig_pool(sk); | ||
2877 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2894 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2878 | if (!p) { | 2895 | if (!p) { |
2879 | tcp_md5sig_users--; | 2896 | tcp_md5sig_users--; |
@@ -2897,7 +2914,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | |||
2897 | 2914 | ||
2898 | struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) | 2915 | struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) |
2899 | { | 2916 | { |
2900 | struct tcp_md5sig_pool **p; | 2917 | struct tcp_md5sig_pool * __percpu *p; |
2901 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2918 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2902 | p = tcp_md5sig_pool; | 2919 | p = tcp_md5sig_pool; |
2903 | if (p) | 2920 | if (p) |