aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-19 13:05:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-19 13:05:34 -0400
commit1200b6809dfd9d73bc4c7db76d288c35fa4b2ebe (patch)
tree552e03de245cdbd0780ca1215914edc4a26540f7 /include/net/tcp.h
parent6b5f04b6cf8ebab9a65d9c0026c650bb2538fd0f (diff)
parentfe30937b65354c7fec244caebbdaae68e28ca797 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support more Realtek wireless chips, from Jes Sorenson. 2) New BPF types for per-cpu hash and arrap maps, from Alexei Starovoitov. 3) Make several TCP sysctls per-namespace, from Nikolay Borisov. 4) Allow the use of SO_REUSEPORT in order to do per-thread processing of incoming TCP/UDP connections. The muxing can be done using a BPF program which hashes the incoming packet. From Craig Gallek. 5) Add a multiplexer for TCP streams, to provide a messaged based interface. BPF programs can be used to determine the message boundaries. From Tom Herbert. 6) Add 802.1AE MACSEC support, from Sabrina Dubroca. 7) Avoid factorial complexity when taking down an inetdev interface with lots of configured addresses. We were doing things like traversing the entire address less for each address removed, and flushing the entire netfilter conntrack table for every address as well. 8) Add and use SKB bulk free infrastructure, from Jesper Brouer. 9) Allow offloading u32 classifiers to hardware, and implement for ixgbe, from John Fastabend. 10) Allow configuring IRQ coalescing parameters on a per-queue basis, from Kan Liang. 11) Extend ethtool so that larger link mode masks can be supported. From David Decotigny. 12) Introduce devlink, which can be used to configure port link types (ethernet vs Infiniband, etc.), port splitting, and switch device level attributes as a whole. From Jiri Pirko. 13) Hardware offload support for flower classifiers, from Amir Vadai. 14) Add "Local Checksum Offload". Basically, for a tunneled packet the checksum of the outer header is 'constant' (because with the checksum field filled into the inner protocol header, the payload of the outer frame checksums to 'zero'), and we can take advantage of that in various ways. From Edward Cree" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1548 commits) bonding: fix bond_get_stats() net: bcmgenet: fix dma api length mismatch net/mlx4_core: Fix backward compatibility on VFs phy: mdio-thunder: Fix some Kconfig typos lan78xx: add ndo_get_stats64 lan78xx: handle statistics counter rollover RDS: TCP: Remove unused constant RDS: TCP: Add sysctl tunables for sndbuf/rcvbuf on rds-tcp socket net: smc911x: convert pxa dma to dmaengine team: remove duplicate set of flag IFF_MULTICAST bonding: remove duplicate set of flag IFF_MULTICAST net: fix a comment typo ethernet: micrel: fix some error codes ip_tunnels, bpf: define IP_TUNNEL_OPTS_MAX and use it bpf, dst: add and use dst_tclassid helper bpf: make skb->tc_classid also readable net: mvneta: bm: clarify dependencies cls_bpf: reset class and reuse major in da ldmvsw: Checkpatch sunvnet.c and sunvnet_common.c ldmvsw: Add ldmvsw.c driver code ...
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h53
1 files changed, 42 insertions, 11 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b04bc989ad6c..b91370f61be6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -238,13 +238,6 @@ extern struct inet_timewait_death_row tcp_death_row;
238extern int sysctl_tcp_timestamps; 238extern int sysctl_tcp_timestamps;
239extern int sysctl_tcp_window_scaling; 239extern int sysctl_tcp_window_scaling;
240extern int sysctl_tcp_sack; 240extern int sysctl_tcp_sack;
241extern int sysctl_tcp_fin_timeout;
242extern int sysctl_tcp_syn_retries;
243extern int sysctl_tcp_synack_retries;
244extern int sysctl_tcp_retries1;
245extern int sysctl_tcp_retries2;
246extern int sysctl_tcp_orphan_retries;
247extern int sysctl_tcp_syncookies;
248extern int sysctl_tcp_fastopen; 241extern int sysctl_tcp_fastopen;
249extern int sysctl_tcp_retrans_collapse; 242extern int sysctl_tcp_retrans_collapse;
250extern int sysctl_tcp_stdurg; 243extern int sysctl_tcp_stdurg;
@@ -273,7 +266,6 @@ extern int sysctl_tcp_thin_dupack;
273extern int sysctl_tcp_early_retrans; 266extern int sysctl_tcp_early_retrans;
274extern int sysctl_tcp_limit_output_bytes; 267extern int sysctl_tcp_limit_output_bytes;
275extern int sysctl_tcp_challenge_ack_limit; 268extern int sysctl_tcp_challenge_ack_limit;
276extern unsigned int sysctl_tcp_notsent_lowat;
277extern int sysctl_tcp_min_tso_segs; 269extern int sysctl_tcp_min_tso_segs;
278extern int sysctl_tcp_min_rtt_wlen; 270extern int sysctl_tcp_min_rtt_wlen;
279extern int sysctl_tcp_autocorking; 271extern int sysctl_tcp_autocorking;
@@ -567,6 +559,7 @@ void tcp_rearm_rto(struct sock *sk);
567void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); 559void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
568void tcp_reset(struct sock *sk); 560void tcp_reset(struct sock *sk);
569void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); 561void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
562void tcp_fin(struct sock *sk);
570 563
571/* tcp_timer.c */ 564/* tcp_timer.c */
572void tcp_init_xmit_timers(struct sock *); 565void tcp_init_xmit_timers(struct sock *);
@@ -962,9 +955,11 @@ static inline void tcp_enable_fack(struct tcp_sock *tp)
962 */ 955 */
963static inline void tcp_enable_early_retrans(struct tcp_sock *tp) 956static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
964{ 957{
958 struct net *net = sock_net((struct sock *)tp);
959
965 tp->do_early_retrans = sysctl_tcp_early_retrans && 960 tp->do_early_retrans = sysctl_tcp_early_retrans &&
966 sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack && 961 sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
967 sysctl_tcp_reordering == 3; 962 net->ipv4.sysctl_tcp_reordering == 3;
968} 963}
969 964
970static inline void tcp_disable_early_retrans(struct tcp_sock *tp) 965static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
@@ -1251,7 +1246,7 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1251 1246
1252static inline int tcp_fin_time(const struct sock *sk) 1247static inline int tcp_fin_time(const struct sock *sk)
1253{ 1248{
1254 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; 1249 int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1255 const int rto = inet_csk(sk)->icsk_rto; 1250 const int rto = inet_csk(sk)->icsk_rto;
1256 1251
1257 if (fin_timeout < (rto << 2) - (rto >> 1)) 1252 if (fin_timeout < (rto << 2) - (rto >> 1))
@@ -1433,6 +1428,7 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
1433 1428
1434extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; 1429extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1435int tcp_fastopen_reset_cipher(void *key, unsigned int len); 1430int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1431void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1436struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 1432struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1437 struct request_sock *req, 1433 struct request_sock *req,
1438 struct tcp_fastopen_cookie *foc, 1434 struct tcp_fastopen_cookie *foc,
@@ -1681,7 +1677,8 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1681 1677
1682static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) 1678static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1683{ 1679{
1684 return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat; 1680 struct net *net = sock_net((struct sock *)tp);
1681 return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1685} 1682}
1686 1683
1687static inline bool tcp_stream_memory_free(const struct sock *sk) 1684static inline bool tcp_stream_memory_free(const struct sock *sk)
@@ -1815,4 +1812,38 @@ static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
1815 skb->truesize = 2; 1812 skb->truesize = 2;
1816} 1813}
1817 1814
1815static inline int tcp_inq(struct sock *sk)
1816{
1817 struct tcp_sock *tp = tcp_sk(sk);
1818 int answ;
1819
1820 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1821 answ = 0;
1822 } else if (sock_flag(sk, SOCK_URGINLINE) ||
1823 !tp->urg_data ||
1824 before(tp->urg_seq, tp->copied_seq) ||
1825 !before(tp->urg_seq, tp->rcv_nxt)) {
1826
1827 answ = tp->rcv_nxt - tp->copied_seq;
1828
1829 /* Subtract 1, if FIN was received */
1830 if (answ && sock_flag(sk, SOCK_DONE))
1831 answ--;
1832 } else {
1833 answ = tp->urg_seq - tp->copied_seq;
1834 }
1835
1836 return answ;
1837}
1838
1839static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1840{
1841 u16 segs_in;
1842
1843 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1844 tp->segs_in += segs_in;
1845 if (skb->len > tcp_hdrlen(skb))
1846 tp->data_segs_in += segs_in;
1847}
1848
1818#endif /* _TCP_H */ 1849#endif /* _TCP_H */