aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c148
1 files changed, 148 insertions, 0 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b5c23756965a..97e48d60c4e8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5877,3 +5877,151 @@ discard:
5877 return 0; 5877 return 0;
5878} 5878}
5879EXPORT_SYMBOL(tcp_rcv_state_process); 5879EXPORT_SYMBOL(tcp_rcv_state_process);
5880
5881static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
5882{
5883 struct inet_request_sock *ireq = inet_rsk(req);
5884
5885 if (family == AF_INET)
5886 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
5887 &ireq->ir_rmt_addr, port);
5888 else
5889 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI6/%u\n"),
5890 &ireq->ir_v6_rmt_addr, port);
5891}
5892
5893int tcp_conn_request(struct request_sock_ops *rsk_ops,
5894 const struct tcp_request_sock_ops *af_ops,
5895 struct sock *sk, struct sk_buff *skb)
5896{
5897 struct tcp_options_received tmp_opt;
5898 struct request_sock *req;
5899 struct tcp_sock *tp = tcp_sk(sk);
5900 struct dst_entry *dst = NULL;
5901 __u32 isn = TCP_SKB_CB(skb)->when;
5902 bool want_cookie = false, fastopen;
5903 struct flowi fl;
5904 struct tcp_fastopen_cookie foc = { .len = -1 };
5905 int err;
5906
5907
5908 /* TW buckets are converted to open requests without
5909 * limitations, they conserve resources and peer is
5910 * evidently real one.
5911 */
5912 if ((sysctl_tcp_syncookies == 2 ||
5913 inet_csk_reqsk_queue_is_full(sk)) && !isn) {
5914 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name);
5915 if (!want_cookie)
5916 goto drop;
5917 }
5918
5919
5920 /* Accept backlog is full. If we have already queued enough
5921 * of warm entries in syn queue, drop request. It is better than
5922 * clogging syn queue with openreqs with exponentially increasing
5923 * timeout.
5924 */
5925 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
5926 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
5927 goto drop;
5928 }
5929
5930 req = inet_reqsk_alloc(rsk_ops);
5931 if (!req)
5932 goto drop;
5933
5934 tcp_rsk(req)->af_specific = af_ops;
5935
5936 tcp_clear_options(&tmp_opt);
5937 tmp_opt.mss_clamp = af_ops->mss_clamp;
5938 tmp_opt.user_mss = tp->rx_opt.user_mss;
5939 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
5940
5941 if (want_cookie && !tmp_opt.saw_tstamp)
5942 tcp_clear_options(&tmp_opt);
5943
5944 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
5945 tcp_openreq_init(req, &tmp_opt, skb, sk);
5946
5947 af_ops->init_req(req, sk, skb);
5948
5949 if (security_inet_conn_request(sk, skb, req))
5950 goto drop_and_free;
5951
5952 if (!want_cookie || tmp_opt.tstamp_ok)
5953 TCP_ECN_create_request(req, skb, sock_net(sk));
5954
5955 if (want_cookie) {
5956 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
5957 req->cookie_ts = tmp_opt.tstamp_ok;
5958 } else if (!isn) {
5959 /* VJ's idea. We save last timestamp seen
5960 * from the destination in peer table, when entering
5961 * state TIME-WAIT, and check against it before
5962 * accepting new connection request.
5963 *
5964 * If "isn" is not zero, this request hit alive
5965 * timewait bucket, so that all the necessary checks
5966 * are made in the function processing timewait state.
5967 */
5968 if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
5969 bool strict;
5970
5971 dst = af_ops->route_req(sk, &fl, req, &strict);
5972 if (dst && strict &&
5973 !tcp_peer_is_proven(req, dst, true)) {
5974 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
5975 goto drop_and_release;
5976 }
5977 }
5978 /* Kill the following clause, if you dislike this way. */
5979 else if (!sysctl_tcp_syncookies &&
5980 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
5981 (sysctl_max_syn_backlog >> 2)) &&
5982 !tcp_peer_is_proven(req, dst, false)) {
5983 /* Without syncookies last quarter of
5984 * backlog is filled with destinations,
5985 * proven to be alive.
5986 * It means that we continue to communicate
5987 * to destinations, already remembered
5988 * to the moment of synflood.
5989 */
5990 pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
5991 rsk_ops->family);
5992 goto drop_and_release;
5993 }
5994
5995 isn = af_ops->init_seq(skb);
5996 }
5997 if (!dst) {
5998 dst = af_ops->route_req(sk, &fl, req, NULL);
5999 if (!dst)
6000 goto drop_and_free;
6001 }
6002
6003 tcp_rsk(req)->snt_isn = isn;
6004 tcp_openreq_init_rwin(req, sk, dst);
6005 fastopen = !want_cookie &&
6006 tcp_try_fastopen(sk, skb, req, &foc, dst);
6007 err = af_ops->send_synack(sk, dst, &fl, req,
6008 skb_get_queue_mapping(skb), &foc);
6009 if (!fastopen) {
6010 if (err || want_cookie)
6011 goto drop_and_free;
6012
6013 tcp_rsk(req)->listener = NULL;
6014 af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
6015 }
6016
6017 return 0;
6018
6019drop_and_release:
6020 dst_release(dst);
6021drop_and_free:
6022 reqsk_free(req);
6023drop:
6024 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
6025 return 0;
6026}
6027EXPORT_SYMBOL(tcp_conn_request);