aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 17:27:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 17:27:40 -0400
commitf9da455b93f6ba076935b4ef4589f61e529ae046 (patch)
tree3c4e69ce1ba1d6bf65915b97a76ca2172105b278 /include/net/tcp.h
parent0e04c641b199435f3779454055f6a7de258ecdfc (diff)
parente5eca6d41f53db48edd8cf88a3f59d2c30227f8e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Seccomp BPF filters can now be JIT'd, from Alexei Starovoitov. 2) Multiqueue support in xen-netback and xen-netfront, from Andrew J Benniston. 3) Allow tweaking of aggregation settings in cdc_ncm driver, from Bjørn Mork. 4) BPF now has a "random" opcode, from Chema Gonzalez. 5) Add more BPF documentation and improve test framework, from Daniel Borkmann. 6) Support TCP fastopen over ipv6, from Daniel Lee. 7) Add software TSO helper functions and use them to support software TSO in mvneta and mv643xx_eth drivers. From Ezequiel Garcia. 8) Support software TSO in fec driver too, from Nimrod Andy. 9) Add Broadcom SYSTEMPORT driver, from Florian Fainelli. 10) Handle broadcasts more gracefully over macvlan when there are large numbers of interfaces configured, from Herbert Xu. 11) Allow more control over fwmark used for non-socket based responses, from Lorenzo Colitti. 12) Do TCP congestion window limiting based upon measurements, from Neal Cardwell. 13) Support busy polling in SCTP, from Neal Horman. 14) Allow RSS key to be configured via ethtool, from Venkata Duvvuru. 15) Bridge promisc mode handling improvements from Vlad Yasevich. 16) Don't use inetpeer entries to implement ID generation any more, it performs poorly, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits) rtnetlink: fix userspace API breakage for iproute2 < v3.9.0 tcp: fixing TLP's FIN recovery net: fec: Add software TSO support net: fec: Add Scatter/gather support net: fec: Increase buffer descriptor entry number net: fec: Factorize feature setting net: fec: Enable IP header hardware checksum net: fec: Factorize the .xmit transmit function bridge: fix compile error when compiling without IPv6 support bridge: fix smatch warning / potential null pointer dereference via-rhine: fix full-duplex with autoneg disable bnx2x: Enlarge the dorq threshold for VFs bnx2x: Check for UNDI in uncommon branch bnx2x: Fix 1G-baseT link bnx2x: Fix link for KR with swapped polarity lane sctp: Fix sk_ack_backlog wrap-around problem net/core: Add VF link state control policy net/fsl: xgmac_mdio is dependent on OF_MDIO net/fsl: Make xgmac_mdio read error message useful net_sched: drr: warn when qdisc is not work conserving ...
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h47
1 files changed, 34 insertions, 13 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 87d877408188..7286db80e8b8 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -220,8 +220,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
220#define TFO_SERVER_ENABLE 2 220#define TFO_SERVER_ENABLE 2
221#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ 221#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
222 222
223/* Process SYN data but skip cookie validation */
224#define TFO_SERVER_COOKIE_NOT_CHKED 0x100
225/* Accept SYN data w/o any cookie option */ 223/* Accept SYN data w/o any cookie option */
226#define TFO_SERVER_COOKIE_NOT_REQD 0x200 224#define TFO_SERVER_COOKIE_NOT_REQD 0x200
227 225
@@ -230,10 +228,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
230 */ 228 */
231#define TFO_SERVER_WO_SOCKOPT1 0x400 229#define TFO_SERVER_WO_SOCKOPT1 0x400
232#define TFO_SERVER_WO_SOCKOPT2 0x800 230#define TFO_SERVER_WO_SOCKOPT2 0x800
233/* Always create TFO child sockets on a TFO listener even when
234 * cookie/data not present. (For testing purpose!)
235 */
236#define TFO_SERVER_ALWAYS 0x1000
237 231
238extern struct inet_timewait_death_row tcp_death_row; 232extern struct inet_timewait_death_row tcp_death_row;
239 233
@@ -541,7 +535,7 @@ void tcp_retransmit_timer(struct sock *sk);
541void tcp_xmit_retransmit_queue(struct sock *); 535void tcp_xmit_retransmit_queue(struct sock *);
542void tcp_simple_retransmit(struct sock *); 536void tcp_simple_retransmit(struct sock *);
543int tcp_trim_head(struct sock *, struct sk_buff *, u32); 537int tcp_trim_head(struct sock *, struct sk_buff *, u32);
544int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); 538int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
545 539
546void tcp_send_probe0(struct sock *); 540void tcp_send_probe0(struct sock *);
547void tcp_send_partial(struct sock *); 541void tcp_send_partial(struct sock *);
@@ -558,7 +552,6 @@ void tcp_send_loss_probe(struct sock *sk);
558bool tcp_schedule_loss_probe(struct sock *sk); 552bool tcp_schedule_loss_probe(struct sock *sk);
559 553
560/* tcp_input.c */ 554/* tcp_input.c */
561void tcp_cwnd_application_limited(struct sock *sk);
562void tcp_resume_early_retransmit(struct sock *sk); 555void tcp_resume_early_retransmit(struct sock *sk);
563void tcp_rearm_rto(struct sock *sk); 556void tcp_rearm_rto(struct sock *sk);
564void tcp_reset(struct sock *sk); 557void tcp_reset(struct sock *sk);
@@ -797,7 +790,7 @@ struct tcp_congestion_ops {
797 /* return slow start threshold (required) */ 790 /* return slow start threshold (required) */
798 u32 (*ssthresh)(struct sock *sk); 791 u32 (*ssthresh)(struct sock *sk);
799 /* do new cwnd calculation (required) */ 792 /* do new cwnd calculation (required) */
800 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight); 793 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
801 /* call before changing ca_state (optional) */ 794 /* call before changing ca_state (optional) */
802 void (*set_state)(struct sock *sk, u8 new_state); 795 void (*set_state)(struct sock *sk, u8 new_state);
803 /* call when cwnd event occurs (optional) */ 796 /* call when cwnd event occurs (optional) */
@@ -829,7 +822,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
829 822
830extern struct tcp_congestion_ops tcp_init_congestion_ops; 823extern struct tcp_congestion_ops tcp_init_congestion_ops;
831u32 tcp_reno_ssthresh(struct sock *sk); 824u32 tcp_reno_ssthresh(struct sock *sk);
832void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight); 825void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
833extern struct tcp_congestion_ops tcp_reno; 826extern struct tcp_congestion_ops tcp_reno;
834 827
835static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) 828static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -975,7 +968,30 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
975{ 968{
976 return tp->snd_una + tp->snd_wnd; 969 return tp->snd_una + tp->snd_wnd;
977} 970}
978bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); 971
972/* We follow the spirit of RFC2861 to validate cwnd but implement a more
973 * flexible approach. The RFC suggests cwnd should not be raised unless
974 * it was fully used previously. And that's exactly what we do in
975 * congestion avoidance mode. But in slow start we allow cwnd to grow
976 * as long as the application has used half the cwnd.
977 * Example :
978 * cwnd is 10 (IW10), but application sends 9 frames.
979 * We allow cwnd to reach 18 when all frames are ACKed.
980 * This check is safe because it's as aggressive as slow start which already
981 * risks 100% overshoot. The advantage is that we discourage application to
982 * either send more filler packets or data to artificially blow up the cwnd
983 * usage, and allow application-limited process to probe bw more aggressively.
984 */
985static inline bool tcp_is_cwnd_limited(const struct sock *sk)
986{
987 const struct tcp_sock *tp = tcp_sk(sk);
988
989 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
990 if (tp->snd_cwnd <= tp->snd_ssthresh)
991 return tp->snd_cwnd < 2 * tp->max_packets_out;
992
993 return tp->is_cwnd_limited;
994}
979 995
980static inline void tcp_check_probe_timer(struct sock *sk) 996static inline void tcp_check_probe_timer(struct sock *sk)
981{ 997{
@@ -1103,6 +1119,9 @@ static inline void tcp_openreq_init(struct request_sock *req,
1103 ireq->ir_num = ntohs(tcp_hdr(skb)->dest); 1119 ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
1104} 1120}
1105 1121
1122extern void tcp_openreq_init_rwin(struct request_sock *req,
1123 struct sock *sk, struct dst_entry *dst);
1124
1106void tcp_enter_memory_pressure(struct sock *sk); 1125void tcp_enter_memory_pressure(struct sock *sk);
1107 1126
1108static inline int keepalive_intvl_when(const struct tcp_sock *tp) 1127static inline int keepalive_intvl_when(const struct tcp_sock *tp)
@@ -1312,8 +1331,10 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
1312 1331
1313extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; 1332extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1314int tcp_fastopen_reset_cipher(void *key, unsigned int len); 1333int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1315void tcp_fastopen_cookie_gen(__be32 src, __be32 dst, 1334bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1316 struct tcp_fastopen_cookie *foc); 1335 struct request_sock *req,
1336 struct tcp_fastopen_cookie *foc,
1337 struct dst_entry *dst);
1317void tcp_fastopen_init_key_once(bool publish); 1338void tcp_fastopen_init_key_once(bool publish);
1318#define TCP_FASTOPEN_KEY_LENGTH 16 1339#define TCP_FASTOPEN_KEY_LENGTH 16
1319 1340