diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2012-01-09 02:38:23 -0500 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2012-01-09 02:38:23 -0500 |
commit | da733563be5a9da26fe81d9f007262d00b846e22 (patch) | |
tree | db28291df94a2043af2123911984c5c173da4e6f /net/ipv4/tcp_ipv4.c | |
parent | 6ccbcf2cb41131f8d56ef0723bf3f7c1f8486076 (diff) | |
parent | dab78d7924598ea4031663dd10db814e2e324928 (diff) |
Merge branch 'next' into for-linus
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 141 |
1 files changed, 79 insertions, 62 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1c12b8ec849d..a9db4b1a2215 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -92,7 +92,7 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency); | |||
92 | static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, | 92 | static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, |
93 | __be32 addr); | 93 | __be32 addr); |
94 | static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, | 94 | static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, |
95 | __be32 daddr, __be32 saddr, struct tcphdr *th); | 95 | __be32 daddr, __be32 saddr, const struct tcphdr *th); |
96 | #else | 96 | #else |
97 | static inline | 97 | static inline |
98 | struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) | 98 | struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) |
@@ -104,7 +104,7 @@ struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) | |||
104 | struct inet_hashinfo tcp_hashinfo; | 104 | struct inet_hashinfo tcp_hashinfo; |
105 | EXPORT_SYMBOL(tcp_hashinfo); | 105 | EXPORT_SYMBOL(tcp_hashinfo); |
106 | 106 | ||
107 | static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) | 107 | static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb) |
108 | { | 108 | { |
109 | return secure_tcp_sequence_number(ip_hdr(skb)->daddr, | 109 | return secure_tcp_sequence_number(ip_hdr(skb)->daddr, |
110 | ip_hdr(skb)->saddr, | 110 | ip_hdr(skb)->saddr, |
@@ -552,7 +552,7 @@ static void __tcp_v4_send_check(struct sk_buff *skb, | |||
552 | /* This routine computes an IPv4 TCP checksum. */ | 552 | /* This routine computes an IPv4 TCP checksum. */ |
553 | void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) | 553 | void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) |
554 | { | 554 | { |
555 | struct inet_sock *inet = inet_sk(sk); | 555 | const struct inet_sock *inet = inet_sk(sk); |
556 | 556 | ||
557 | __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); | 557 | __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); |
558 | } | 558 | } |
@@ -590,7 +590,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) | |||
590 | 590 | ||
591 | static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | 591 | static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) |
592 | { | 592 | { |
593 | struct tcphdr *th = tcp_hdr(skb); | 593 | const struct tcphdr *th = tcp_hdr(skb); |
594 | struct { | 594 | struct { |
595 | struct tcphdr th; | 595 | struct tcphdr th; |
596 | #ifdef CONFIG_TCP_MD5SIG | 596 | #ifdef CONFIG_TCP_MD5SIG |
@@ -652,6 +652,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
652 | arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; | 652 | arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; |
653 | 653 | ||
654 | net = dev_net(skb_dst(skb)->dev); | 654 | net = dev_net(skb_dst(skb)->dev); |
655 | arg.tos = ip_hdr(skb)->tos; | ||
655 | ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, | 656 | ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, |
656 | &arg, arg.iov[0].iov_len); | 657 | &arg, arg.iov[0].iov_len); |
657 | 658 | ||
@@ -666,9 +667,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
666 | static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | 667 | static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, |
667 | u32 win, u32 ts, int oif, | 668 | u32 win, u32 ts, int oif, |
668 | struct tcp_md5sig_key *key, | 669 | struct tcp_md5sig_key *key, |
669 | int reply_flags) | 670 | int reply_flags, u8 tos) |
670 | { | 671 | { |
671 | struct tcphdr *th = tcp_hdr(skb); | 672 | const struct tcphdr *th = tcp_hdr(skb); |
672 | struct { | 673 | struct { |
673 | struct tcphdr th; | 674 | struct tcphdr th; |
674 | __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) | 675 | __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) |
@@ -726,7 +727,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
726 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 727 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
727 | if (oif) | 728 | if (oif) |
728 | arg.bound_dev_if = oif; | 729 | arg.bound_dev_if = oif; |
729 | 730 | arg.tos = tos; | |
730 | ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, | 731 | ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, |
731 | &arg, arg.iov[0].iov_len); | 732 | &arg, arg.iov[0].iov_len); |
732 | 733 | ||
@@ -743,7 +744,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
743 | tcptw->tw_ts_recent, | 744 | tcptw->tw_ts_recent, |
744 | tw->tw_bound_dev_if, | 745 | tw->tw_bound_dev_if, |
745 | tcp_twsk_md5_key(tcptw), | 746 | tcp_twsk_md5_key(tcptw), |
746 | tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0 | 747 | tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, |
748 | tw->tw_tos | ||
747 | ); | 749 | ); |
748 | 750 | ||
749 | inet_twsk_put(tw); | 751 | inet_twsk_put(tw); |
@@ -757,7 +759,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
757 | req->ts_recent, | 759 | req->ts_recent, |
758 | 0, | 760 | 0, |
759 | tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr), | 761 | tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr), |
760 | inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0); | 762 | inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, |
763 | ip_hdr(skb)->tos); | ||
761 | } | 764 | } |
762 | 765 | ||
763 | /* | 766 | /* |
@@ -808,20 +811,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) | |||
808 | kfree(inet_rsk(req)->opt); | 811 | kfree(inet_rsk(req)->opt); |
809 | } | 812 | } |
810 | 813 | ||
811 | static void syn_flood_warning(const struct sk_buff *skb) | 814 | /* |
815 | * Return 1 if a syncookie should be sent | ||
816 | */ | ||
817 | int tcp_syn_flood_action(struct sock *sk, | ||
818 | const struct sk_buff *skb, | ||
819 | const char *proto) | ||
812 | { | 820 | { |
813 | const char *msg; | 821 | const char *msg = "Dropping request"; |
822 | int want_cookie = 0; | ||
823 | struct listen_sock *lopt; | ||
824 | |||
825 | |||
814 | 826 | ||
815 | #ifdef CONFIG_SYN_COOKIES | 827 | #ifdef CONFIG_SYN_COOKIES |
816 | if (sysctl_tcp_syncookies) | 828 | if (sysctl_tcp_syncookies) { |
817 | msg = "Sending cookies"; | 829 | msg = "Sending cookies"; |
818 | else | 830 | want_cookie = 1; |
831 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); | ||
832 | } else | ||
819 | #endif | 833 | #endif |
820 | msg = "Dropping request"; | 834 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); |
821 | 835 | ||
822 | pr_info("TCP: Possible SYN flooding on port %d. %s.\n", | 836 | lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; |
823 | ntohs(tcp_hdr(skb)->dest), msg); | 837 | if (!lopt->synflood_warned) { |
838 | lopt->synflood_warned = 1; | ||
839 | pr_info("%s: Possible SYN flooding on port %d. %s. " | ||
840 | " Check SNMP counters.\n", | ||
841 | proto, ntohs(tcp_hdr(skb)->dest), msg); | ||
842 | } | ||
843 | return want_cookie; | ||
824 | } | 844 | } |
845 | EXPORT_SYMBOL(tcp_syn_flood_action); | ||
825 | 846 | ||
826 | /* | 847 | /* |
827 | * Save and compile IPv4 options into the request_sock if needed. | 848 | * Save and compile IPv4 options into the request_sock if needed. |
@@ -909,18 +930,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
909 | } | 930 | } |
910 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); | 931 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); |
911 | } | 932 | } |
912 | if (tcp_alloc_md5sig_pool(sk) == NULL) { | 933 | |
934 | md5sig = tp->md5sig_info; | ||
935 | if (md5sig->entries4 == 0 && | ||
936 | tcp_alloc_md5sig_pool(sk) == NULL) { | ||
913 | kfree(newkey); | 937 | kfree(newkey); |
914 | return -ENOMEM; | 938 | return -ENOMEM; |
915 | } | 939 | } |
916 | md5sig = tp->md5sig_info; | ||
917 | 940 | ||
918 | if (md5sig->alloced4 == md5sig->entries4) { | 941 | if (md5sig->alloced4 == md5sig->entries4) { |
919 | keys = kmalloc((sizeof(*keys) * | 942 | keys = kmalloc((sizeof(*keys) * |
920 | (md5sig->entries4 + 1)), GFP_ATOMIC); | 943 | (md5sig->entries4 + 1)), GFP_ATOMIC); |
921 | if (!keys) { | 944 | if (!keys) { |
922 | kfree(newkey); | 945 | kfree(newkey); |
923 | tcp_free_md5sig_pool(); | 946 | if (md5sig->entries4 == 0) |
947 | tcp_free_md5sig_pool(); | ||
924 | return -ENOMEM; | 948 | return -ENOMEM; |
925 | } | 949 | } |
926 | 950 | ||
@@ -964,6 +988,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | |||
964 | kfree(tp->md5sig_info->keys4); | 988 | kfree(tp->md5sig_info->keys4); |
965 | tp->md5sig_info->keys4 = NULL; | 989 | tp->md5sig_info->keys4 = NULL; |
966 | tp->md5sig_info->alloced4 = 0; | 990 | tp->md5sig_info->alloced4 = 0; |
991 | tcp_free_md5sig_pool(); | ||
967 | } else if (tp->md5sig_info->entries4 != i) { | 992 | } else if (tp->md5sig_info->entries4 != i) { |
968 | /* Need to do some manipulation */ | 993 | /* Need to do some manipulation */ |
969 | memmove(&tp->md5sig_info->keys4[i], | 994 | memmove(&tp->md5sig_info->keys4[i], |
@@ -971,7 +996,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | |||
971 | (tp->md5sig_info->entries4 - i) * | 996 | (tp->md5sig_info->entries4 - i) * |
972 | sizeof(struct tcp4_md5sig_key)); | 997 | sizeof(struct tcp4_md5sig_key)); |
973 | } | 998 | } |
974 | tcp_free_md5sig_pool(); | ||
975 | return 0; | 999 | return 0; |
976 | } | 1000 | } |
977 | } | 1001 | } |
@@ -1069,7 +1093,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, | |||
1069 | } | 1093 | } |
1070 | 1094 | ||
1071 | static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, | 1095 | static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, |
1072 | __be32 daddr, __be32 saddr, struct tcphdr *th) | 1096 | __be32 daddr, __be32 saddr, const struct tcphdr *th) |
1073 | { | 1097 | { |
1074 | struct tcp_md5sig_pool *hp; | 1098 | struct tcp_md5sig_pool *hp; |
1075 | struct hash_desc *desc; | 1099 | struct hash_desc *desc; |
@@ -1101,12 +1125,12 @@ clear_hash_noput: | |||
1101 | } | 1125 | } |
1102 | 1126 | ||
1103 | int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, | 1127 | int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, |
1104 | struct sock *sk, struct request_sock *req, | 1128 | const struct sock *sk, const struct request_sock *req, |
1105 | struct sk_buff *skb) | 1129 | const struct sk_buff *skb) |
1106 | { | 1130 | { |
1107 | struct tcp_md5sig_pool *hp; | 1131 | struct tcp_md5sig_pool *hp; |
1108 | struct hash_desc *desc; | 1132 | struct hash_desc *desc; |
1109 | struct tcphdr *th = tcp_hdr(skb); | 1133 | const struct tcphdr *th = tcp_hdr(skb); |
1110 | __be32 saddr, daddr; | 1134 | __be32 saddr, daddr; |
1111 | 1135 | ||
1112 | if (sk) { | 1136 | if (sk) { |
@@ -1151,7 +1175,7 @@ clear_hash_noput: | |||
1151 | } | 1175 | } |
1152 | EXPORT_SYMBOL(tcp_v4_md5_hash_skb); | 1176 | EXPORT_SYMBOL(tcp_v4_md5_hash_skb); |
1153 | 1177 | ||
1154 | static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) | 1178 | static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) |
1155 | { | 1179 | { |
1156 | /* | 1180 | /* |
1157 | * This gets called for each TCP segment that arrives | 1181 | * This gets called for each TCP segment that arrives |
@@ -1161,10 +1185,10 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) | |||
1161 | * o MD5 hash and we're not expecting one. | 1185 | * o MD5 hash and we're not expecting one. |
1162 | * o MD5 hash and its wrong. | 1186 | * o MD5 hash and its wrong. |
1163 | */ | 1187 | */ |
1164 | __u8 *hash_location = NULL; | 1188 | const __u8 *hash_location = NULL; |
1165 | struct tcp_md5sig_key *hash_expected; | 1189 | struct tcp_md5sig_key *hash_expected; |
1166 | const struct iphdr *iph = ip_hdr(skb); | 1190 | const struct iphdr *iph = ip_hdr(skb); |
1167 | struct tcphdr *th = tcp_hdr(skb); | 1191 | const struct tcphdr *th = tcp_hdr(skb); |
1168 | int genhash; | 1192 | int genhash; |
1169 | unsigned char newhash[16]; | 1193 | unsigned char newhash[16]; |
1170 | 1194 | ||
@@ -1227,7 +1251,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1227 | { | 1251 | { |
1228 | struct tcp_extend_values tmp_ext; | 1252 | struct tcp_extend_values tmp_ext; |
1229 | struct tcp_options_received tmp_opt; | 1253 | struct tcp_options_received tmp_opt; |
1230 | u8 *hash_location; | 1254 | const u8 *hash_location; |
1231 | struct request_sock *req; | 1255 | struct request_sock *req; |
1232 | struct inet_request_sock *ireq; | 1256 | struct inet_request_sock *ireq; |
1233 | struct tcp_sock *tp = tcp_sk(sk); | 1257 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -1235,11 +1259,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1235 | __be32 saddr = ip_hdr(skb)->saddr; | 1259 | __be32 saddr = ip_hdr(skb)->saddr; |
1236 | __be32 daddr = ip_hdr(skb)->daddr; | 1260 | __be32 daddr = ip_hdr(skb)->daddr; |
1237 | __u32 isn = TCP_SKB_CB(skb)->when; | 1261 | __u32 isn = TCP_SKB_CB(skb)->when; |
1238 | #ifdef CONFIG_SYN_COOKIES | ||
1239 | int want_cookie = 0; | 1262 | int want_cookie = 0; |
1240 | #else | ||
1241 | #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */ | ||
1242 | #endif | ||
1243 | 1263 | ||
1244 | /* Never answer to SYNs send to broadcast or multicast */ | 1264 | /* Never answer to SYNs send to broadcast or multicast */ |
1245 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | 1265 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) |
@@ -1250,14 +1270,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1250 | * evidently real one. | 1270 | * evidently real one. |
1251 | */ | 1271 | */ |
1252 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { | 1272 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
1253 | if (net_ratelimit()) | 1273 | want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); |
1254 | syn_flood_warning(skb); | 1274 | if (!want_cookie) |
1255 | #ifdef CONFIG_SYN_COOKIES | 1275 | goto drop; |
1256 | if (sysctl_tcp_syncookies) { | ||
1257 | want_cookie = 1; | ||
1258 | } else | ||
1259 | #endif | ||
1260 | goto drop; | ||
1261 | } | 1276 | } |
1262 | 1277 | ||
1263 | /* Accept backlog is full. If we have already queued enough | 1278 | /* Accept backlog is full. If we have already queued enough |
@@ -1303,9 +1318,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1303 | while (l-- > 0) | 1318 | while (l-- > 0) |
1304 | *c++ ^= *hash_location++; | 1319 | *c++ ^= *hash_location++; |
1305 | 1320 | ||
1306 | #ifdef CONFIG_SYN_COOKIES | ||
1307 | want_cookie = 0; /* not our kind of cookie */ | 1321 | want_cookie = 0; /* not our kind of cookie */ |
1308 | #endif | ||
1309 | tmp_ext.cookie_out_never = 0; /* false */ | 1322 | tmp_ext.cookie_out_never = 0; /* false */ |
1310 | tmp_ext.cookie_plus = tmp_opt.cookie_plus; | 1323 | tmp_ext.cookie_plus = tmp_opt.cookie_plus; |
1311 | } else if (!tp->rx_opt.cookie_in_always) { | 1324 | } else if (!tp->rx_opt.cookie_in_always) { |
@@ -1497,6 +1510,8 @@ exit: | |||
1497 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | 1510 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); |
1498 | return NULL; | 1511 | return NULL; |
1499 | put_and_exit: | 1512 | put_and_exit: |
1513 | tcp_clear_xmit_timers(newsk); | ||
1514 | bh_unlock_sock(newsk); | ||
1500 | sock_put(newsk); | 1515 | sock_put(newsk); |
1501 | goto exit; | 1516 | goto exit; |
1502 | } | 1517 | } |
@@ -1578,7 +1593,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1578 | #endif | 1593 | #endif |
1579 | 1594 | ||
1580 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1595 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
1581 | sock_rps_save_rxhash(sk, skb->rxhash); | 1596 | sock_rps_save_rxhash(sk, skb); |
1582 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { | 1597 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { |
1583 | rsk = sk; | 1598 | rsk = sk; |
1584 | goto reset; | 1599 | goto reset; |
@@ -1595,7 +1610,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1595 | goto discard; | 1610 | goto discard; |
1596 | 1611 | ||
1597 | if (nsk != sk) { | 1612 | if (nsk != sk) { |
1598 | sock_rps_save_rxhash(nsk, skb->rxhash); | 1613 | sock_rps_save_rxhash(nsk, skb); |
1599 | if (tcp_child_process(sk, nsk, skb)) { | 1614 | if (tcp_child_process(sk, nsk, skb)) { |
1600 | rsk = nsk; | 1615 | rsk = nsk; |
1601 | goto reset; | 1616 | goto reset; |
@@ -1603,7 +1618,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1603 | return 0; | 1618 | return 0; |
1604 | } | 1619 | } |
1605 | } else | 1620 | } else |
1606 | sock_rps_save_rxhash(sk, skb->rxhash); | 1621 | sock_rps_save_rxhash(sk, skb); |
1607 | 1622 | ||
1608 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { | 1623 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { |
1609 | rsk = sk; | 1624 | rsk = sk; |
@@ -1635,7 +1650,7 @@ EXPORT_SYMBOL(tcp_v4_do_rcv); | |||
1635 | int tcp_v4_rcv(struct sk_buff *skb) | 1650 | int tcp_v4_rcv(struct sk_buff *skb) |
1636 | { | 1651 | { |
1637 | const struct iphdr *iph; | 1652 | const struct iphdr *iph; |
1638 | struct tcphdr *th; | 1653 | const struct tcphdr *th; |
1639 | struct sock *sk; | 1654 | struct sock *sk; |
1640 | int ret; | 1655 | int ret; |
1641 | struct net *net = dev_net(skb->dev); | 1656 | struct net *net = dev_net(skb->dev); |
@@ -1670,7 +1685,7 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1670 | skb->len - th->doff * 4); | 1685 | skb->len - th->doff * 4); |
1671 | TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); | 1686 | TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); |
1672 | TCP_SKB_CB(skb)->when = 0; | 1687 | TCP_SKB_CB(skb)->when = 0; |
1673 | TCP_SKB_CB(skb)->flags = iph->tos; | 1688 | TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); |
1674 | TCP_SKB_CB(skb)->sacked = 0; | 1689 | TCP_SKB_CB(skb)->sacked = 0; |
1675 | 1690 | ||
1676 | sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); | 1691 | sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); |
@@ -1799,7 +1814,7 @@ EXPORT_SYMBOL(tcp_v4_get_peer); | |||
1799 | 1814 | ||
1800 | void *tcp_v4_tw_get_peer(struct sock *sk) | 1815 | void *tcp_v4_tw_get_peer(struct sock *sk) |
1801 | { | 1816 | { |
1802 | struct inet_timewait_sock *tw = inet_twsk(sk); | 1817 | const struct inet_timewait_sock *tw = inet_twsk(sk); |
1803 | 1818 | ||
1804 | return inet_getpeer_v4(tw->tw_daddr, 1); | 1819 | return inet_getpeer_v4(tw->tw_daddr, 1); |
1805 | } | 1820 | } |
@@ -2326,7 +2341,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) | |||
2326 | } | 2341 | } |
2327 | } | 2342 | } |
2328 | 2343 | ||
2329 | static int tcp_seq_open(struct inode *inode, struct file *file) | 2344 | int tcp_seq_open(struct inode *inode, struct file *file) |
2330 | { | 2345 | { |
2331 | struct tcp_seq_afinfo *afinfo = PDE(inode)->data; | 2346 | struct tcp_seq_afinfo *afinfo = PDE(inode)->data; |
2332 | struct tcp_iter_state *s; | 2347 | struct tcp_iter_state *s; |
@@ -2342,23 +2357,19 @@ static int tcp_seq_open(struct inode *inode, struct file *file) | |||
2342 | s->last_pos = 0; | 2357 | s->last_pos = 0; |
2343 | return 0; | 2358 | return 0; |
2344 | } | 2359 | } |
2360 | EXPORT_SYMBOL(tcp_seq_open); | ||
2345 | 2361 | ||
2346 | int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) | 2362 | int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) |
2347 | { | 2363 | { |
2348 | int rc = 0; | 2364 | int rc = 0; |
2349 | struct proc_dir_entry *p; | 2365 | struct proc_dir_entry *p; |
2350 | 2366 | ||
2351 | afinfo->seq_fops.open = tcp_seq_open; | ||
2352 | afinfo->seq_fops.read = seq_read; | ||
2353 | afinfo->seq_fops.llseek = seq_lseek; | ||
2354 | afinfo->seq_fops.release = seq_release_net; | ||
2355 | |||
2356 | afinfo->seq_ops.start = tcp_seq_start; | 2367 | afinfo->seq_ops.start = tcp_seq_start; |
2357 | afinfo->seq_ops.next = tcp_seq_next; | 2368 | afinfo->seq_ops.next = tcp_seq_next; |
2358 | afinfo->seq_ops.stop = tcp_seq_stop; | 2369 | afinfo->seq_ops.stop = tcp_seq_stop; |
2359 | 2370 | ||
2360 | p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, | 2371 | p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, |
2361 | &afinfo->seq_fops, afinfo); | 2372 | afinfo->seq_fops, afinfo); |
2362 | if (!p) | 2373 | if (!p) |
2363 | rc = -ENOMEM; | 2374 | rc = -ENOMEM; |
2364 | return rc; | 2375 | return rc; |
@@ -2371,7 +2382,7 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) | |||
2371 | } | 2382 | } |
2372 | EXPORT_SYMBOL(tcp_proc_unregister); | 2383 | EXPORT_SYMBOL(tcp_proc_unregister); |
2373 | 2384 | ||
2374 | static void get_openreq4(struct sock *sk, struct request_sock *req, | 2385 | static void get_openreq4(const struct sock *sk, const struct request_sock *req, |
2375 | struct seq_file *f, int i, int uid, int *len) | 2386 | struct seq_file *f, int i, int uid, int *len) |
2376 | { | 2387 | { |
2377 | const struct inet_request_sock *ireq = inet_rsk(req); | 2388 | const struct inet_request_sock *ireq = inet_rsk(req); |
@@ -2401,9 +2412,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) | |||
2401 | { | 2412 | { |
2402 | int timer_active; | 2413 | int timer_active; |
2403 | unsigned long timer_expires; | 2414 | unsigned long timer_expires; |
2404 | struct tcp_sock *tp = tcp_sk(sk); | 2415 | const struct tcp_sock *tp = tcp_sk(sk); |
2405 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2416 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2406 | struct inet_sock *inet = inet_sk(sk); | 2417 | const struct inet_sock *inet = inet_sk(sk); |
2407 | __be32 dest = inet->inet_daddr; | 2418 | __be32 dest = inet->inet_daddr; |
2408 | __be32 src = inet->inet_rcv_saddr; | 2419 | __be32 src = inet->inet_rcv_saddr; |
2409 | __u16 destp = ntohs(inet->inet_dport); | 2420 | __u16 destp = ntohs(inet->inet_dport); |
@@ -2452,7 +2463,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) | |||
2452 | len); | 2463 | len); |
2453 | } | 2464 | } |
2454 | 2465 | ||
2455 | static void get_timewait4_sock(struct inet_timewait_sock *tw, | 2466 | static void get_timewait4_sock(const struct inet_timewait_sock *tw, |
2456 | struct seq_file *f, int i, int *len) | 2467 | struct seq_file *f, int i, int *len) |
2457 | { | 2468 | { |
2458 | __be32 dest, src; | 2469 | __be32 dest, src; |
@@ -2507,12 +2518,18 @@ out: | |||
2507 | return 0; | 2518 | return 0; |
2508 | } | 2519 | } |
2509 | 2520 | ||
2521 | static const struct file_operations tcp_afinfo_seq_fops = { | ||
2522 | .owner = THIS_MODULE, | ||
2523 | .open = tcp_seq_open, | ||
2524 | .read = seq_read, | ||
2525 | .llseek = seq_lseek, | ||
2526 | .release = seq_release_net | ||
2527 | }; | ||
2528 | |||
2510 | static struct tcp_seq_afinfo tcp4_seq_afinfo = { | 2529 | static struct tcp_seq_afinfo tcp4_seq_afinfo = { |
2511 | .name = "tcp", | 2530 | .name = "tcp", |
2512 | .family = AF_INET, | 2531 | .family = AF_INET, |
2513 | .seq_fops = { | 2532 | .seq_fops = &tcp_afinfo_seq_fops, |
2514 | .owner = THIS_MODULE, | ||
2515 | }, | ||
2516 | .seq_ops = { | 2533 | .seq_ops = { |
2517 | .show = tcp4_seq_show, | 2534 | .show = tcp4_seq_show, |
2518 | }, | 2535 | }, |