diff options
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 74 |
1 files changed, 45 insertions, 29 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 708dc203b03..6cdf6a28f6b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -72,6 +72,7 @@ | |||
72 | #include <net/timewait_sock.h> | 72 | #include <net/timewait_sock.h> |
73 | #include <net/xfrm.h> | 73 | #include <net/xfrm.h> |
74 | #include <net/netdma.h> | 74 | #include <net/netdma.h> |
75 | #include <net/secure_seq.h> | ||
75 | 76 | ||
76 | #include <linux/inet.h> | 77 | #include <linux/inet.h> |
77 | #include <linux/ipv6.h> | 78 | #include <linux/ipv6.h> |
@@ -429,8 +430,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
429 | break; | 430 | break; |
430 | 431 | ||
431 | icsk->icsk_backoff--; | 432 | icsk->icsk_backoff--; |
432 | inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << | 433 | inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) : |
433 | icsk->icsk_backoff; | 434 | TCP_TIMEOUT_INIT) << icsk->icsk_backoff; |
434 | tcp_bound_rto(sk); | 435 | tcp_bound_rto(sk); |
435 | 436 | ||
436 | skb = tcp_write_queue_head(sk); | 437 | skb = tcp_write_queue_head(sk); |
@@ -629,7 +630,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
629 | arg.iov[0].iov_len = sizeof(rep.th); | 630 | arg.iov[0].iov_len = sizeof(rep.th); |
630 | 631 | ||
631 | #ifdef CONFIG_TCP_MD5SIG | 632 | #ifdef CONFIG_TCP_MD5SIG |
632 | key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL; | 633 | key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL; |
633 | if (key) { | 634 | if (key) { |
634 | rep.opt[0] = htonl((TCPOPT_NOP << 24) | | 635 | rep.opt[0] = htonl((TCPOPT_NOP << 24) | |
635 | (TCPOPT_NOP << 16) | | 636 | (TCPOPT_NOP << 16) | |
@@ -807,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) | |||
807 | kfree(inet_rsk(req)->opt); | 808 | kfree(inet_rsk(req)->opt); |
808 | } | 809 | } |
809 | 810 | ||
810 | static void syn_flood_warning(const struct sk_buff *skb) | 811 | /* |
812 | * Return 1 if a syncookie should be sent | ||
813 | */ | ||
814 | int tcp_syn_flood_action(struct sock *sk, | ||
815 | const struct sk_buff *skb, | ||
816 | const char *proto) | ||
811 | { | 817 | { |
812 | const char *msg; | 818 | const char *msg = "Dropping request"; |
819 | int want_cookie = 0; | ||
820 | struct listen_sock *lopt; | ||
821 | |||
822 | |||
813 | 823 | ||
814 | #ifdef CONFIG_SYN_COOKIES | 824 | #ifdef CONFIG_SYN_COOKIES |
815 | if (sysctl_tcp_syncookies) | 825 | if (sysctl_tcp_syncookies) { |
816 | msg = "Sending cookies"; | 826 | msg = "Sending cookies"; |
817 | else | 827 | want_cookie = 1; |
828 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); | ||
829 | } else | ||
818 | #endif | 830 | #endif |
819 | msg = "Dropping request"; | 831 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); |
820 | 832 | ||
821 | pr_info("TCP: Possible SYN flooding on port %d. %s.\n", | 833 | lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; |
822 | ntohs(tcp_hdr(skb)->dest), msg); | 834 | if (!lopt->synflood_warned) { |
835 | lopt->synflood_warned = 1; | ||
836 | pr_info("%s: Possible SYN flooding on port %d. %s. " | ||
837 | " Check SNMP counters.\n", | ||
838 | proto, ntohs(tcp_hdr(skb)->dest), msg); | ||
839 | } | ||
840 | return want_cookie; | ||
823 | } | 841 | } |
842 | EXPORT_SYMBOL(tcp_syn_flood_action); | ||
824 | 843 | ||
825 | /* | 844 | /* |
826 | * Save and compile IPv4 options into the request_sock if needed. | 845 | * Save and compile IPv4 options into the request_sock if needed. |
@@ -908,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
908 | } | 927 | } |
909 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); | 928 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); |
910 | } | 929 | } |
911 | if (tcp_alloc_md5sig_pool(sk) == NULL) { | 930 | |
931 | md5sig = tp->md5sig_info; | ||
932 | if (md5sig->entries4 == 0 && | ||
933 | tcp_alloc_md5sig_pool(sk) == NULL) { | ||
912 | kfree(newkey); | 934 | kfree(newkey); |
913 | return -ENOMEM; | 935 | return -ENOMEM; |
914 | } | 936 | } |
915 | md5sig = tp->md5sig_info; | ||
916 | 937 | ||
917 | if (md5sig->alloced4 == md5sig->entries4) { | 938 | if (md5sig->alloced4 == md5sig->entries4) { |
918 | keys = kmalloc((sizeof(*keys) * | 939 | keys = kmalloc((sizeof(*keys) * |
919 | (md5sig->entries4 + 1)), GFP_ATOMIC); | 940 | (md5sig->entries4 + 1)), GFP_ATOMIC); |
920 | if (!keys) { | 941 | if (!keys) { |
921 | kfree(newkey); | 942 | kfree(newkey); |
922 | tcp_free_md5sig_pool(); | 943 | if (md5sig->entries4 == 0) |
944 | tcp_free_md5sig_pool(); | ||
923 | return -ENOMEM; | 945 | return -ENOMEM; |
924 | } | 946 | } |
925 | 947 | ||
@@ -963,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | |||
963 | kfree(tp->md5sig_info->keys4); | 985 | kfree(tp->md5sig_info->keys4); |
964 | tp->md5sig_info->keys4 = NULL; | 986 | tp->md5sig_info->keys4 = NULL; |
965 | tp->md5sig_info->alloced4 = 0; | 987 | tp->md5sig_info->alloced4 = 0; |
988 | tcp_free_md5sig_pool(); | ||
966 | } else if (tp->md5sig_info->entries4 != i) { | 989 | } else if (tp->md5sig_info->entries4 != i) { |
967 | /* Need to do some manipulation */ | 990 | /* Need to do some manipulation */ |
968 | memmove(&tp->md5sig_info->keys4[i], | 991 | memmove(&tp->md5sig_info->keys4[i], |
@@ -970,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | |||
970 | (tp->md5sig_info->entries4 - i) * | 993 | (tp->md5sig_info->entries4 - i) * |
971 | sizeof(struct tcp4_md5sig_key)); | 994 | sizeof(struct tcp4_md5sig_key)); |
972 | } | 995 | } |
973 | tcp_free_md5sig_pool(); | ||
974 | return 0; | 996 | return 0; |
975 | } | 997 | } |
976 | } | 998 | } |
@@ -1234,11 +1256,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1234 | __be32 saddr = ip_hdr(skb)->saddr; | 1256 | __be32 saddr = ip_hdr(skb)->saddr; |
1235 | __be32 daddr = ip_hdr(skb)->daddr; | 1257 | __be32 daddr = ip_hdr(skb)->daddr; |
1236 | __u32 isn = TCP_SKB_CB(skb)->when; | 1258 | __u32 isn = TCP_SKB_CB(skb)->when; |
1237 | #ifdef CONFIG_SYN_COOKIES | ||
1238 | int want_cookie = 0; | 1259 | int want_cookie = 0; |
1239 | #else | ||
1240 | #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */ | ||
1241 | #endif | ||
1242 | 1260 | ||
1243 | /* Never answer to SYNs send to broadcast or multicast */ | 1261 | /* Never answer to SYNs send to broadcast or multicast */ |
1244 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | 1262 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) |
@@ -1249,14 +1267,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1249 | * evidently real one. | 1267 | * evidently real one. |
1250 | */ | 1268 | */ |
1251 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { | 1269 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
1252 | if (net_ratelimit()) | 1270 | want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); |
1253 | syn_flood_warning(skb); | 1271 | if (!want_cookie) |
1254 | #ifdef CONFIG_SYN_COOKIES | 1272 | goto drop; |
1255 | if (sysctl_tcp_syncookies) { | ||
1256 | want_cookie = 1; | ||
1257 | } else | ||
1258 | #endif | ||
1259 | goto drop; | ||
1260 | } | 1273 | } |
1261 | 1274 | ||
1262 | /* Accept backlog is full. If we have already queued enough | 1275 | /* Accept backlog is full. If we have already queued enough |
@@ -1302,9 +1315,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1302 | while (l-- > 0) | 1315 | while (l-- > 0) |
1303 | *c++ ^= *hash_location++; | 1316 | *c++ ^= *hash_location++; |
1304 | 1317 | ||
1305 | #ifdef CONFIG_SYN_COOKIES | ||
1306 | want_cookie = 0; /* not our kind of cookie */ | 1318 | want_cookie = 0; /* not our kind of cookie */ |
1307 | #endif | ||
1308 | tmp_ext.cookie_out_never = 0; /* false */ | 1319 | tmp_ext.cookie_out_never = 0; /* false */ |
1309 | tmp_ext.cookie_plus = tmp_opt.cookie_plus; | 1320 | tmp_ext.cookie_plus = tmp_opt.cookie_plus; |
1310 | } else if (!tp->rx_opt.cookie_in_always) { | 1321 | } else if (!tp->rx_opt.cookie_in_always) { |
@@ -1384,6 +1395,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1384 | isn = tcp_v4_init_sequence(skb); | 1395 | isn = tcp_v4_init_sequence(skb); |
1385 | } | 1396 | } |
1386 | tcp_rsk(req)->snt_isn = isn; | 1397 | tcp_rsk(req)->snt_isn = isn; |
1398 | tcp_rsk(req)->snt_synack = tcp_time_stamp; | ||
1387 | 1399 | ||
1388 | if (tcp_v4_send_synack(sk, dst, req, | 1400 | if (tcp_v4_send_synack(sk, dst, req, |
1389 | (struct request_values *)&tmp_ext) || | 1401 | (struct request_values *)&tmp_ext) || |
@@ -1458,6 +1470,10 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1458 | newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; | 1470 | newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; |
1459 | 1471 | ||
1460 | tcp_initialize_rcv_mss(newsk); | 1472 | tcp_initialize_rcv_mss(newsk); |
1473 | if (tcp_rsk(req)->snt_synack) | ||
1474 | tcp_valid_rtt_meas(newsk, | ||
1475 | tcp_time_stamp - tcp_rsk(req)->snt_synack); | ||
1476 | newtp->total_retrans = req->retrans; | ||
1461 | 1477 | ||
1462 | #ifdef CONFIG_TCP_MD5SIG | 1478 | #ifdef CONFIG_TCP_MD5SIG |
1463 | /* Copy over the MD5 key from the original socket */ | 1479 | /* Copy over the MD5 key from the original socket */ |
@@ -1855,7 +1871,7 @@ static int tcp_v4_init_sock(struct sock *sk) | |||
1855 | * algorithms that we must have the following bandaid to talk | 1871 | * algorithms that we must have the following bandaid to talk |
1856 | * efficiently to them. -DaveM | 1872 | * efficiently to them. -DaveM |
1857 | */ | 1873 | */ |
1858 | tp->snd_cwnd = 2; | 1874 | tp->snd_cwnd = TCP_INIT_CWND; |
1859 | 1875 | ||
1860 | /* See draft-stevens-tcpca-spec-01 for discussion of the | 1876 | /* See draft-stevens-tcpca-spec-01 for discussion of the |
1861 | * initialization of these values. | 1877 | * initialization of these values. |