aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c142
1 files changed, 80 insertions, 62 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 955b8e65b69e..a9db4b1a2215 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -72,6 +72,7 @@
72#include <net/timewait_sock.h> 72#include <net/timewait_sock.h>
73#include <net/xfrm.h> 73#include <net/xfrm.h>
74#include <net/netdma.h> 74#include <net/netdma.h>
75#include <net/secure_seq.h>
75 76
76#include <linux/inet.h> 77#include <linux/inet.h>
77#include <linux/ipv6.h> 78#include <linux/ipv6.h>
@@ -91,7 +92,7 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency);
91static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, 92static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
92 __be32 addr); 93 __be32 addr);
93static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 94static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, struct tcphdr *th); 95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95#else 96#else
96static inline 97static inline
97struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) 98struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
@@ -103,7 +104,7 @@ struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
103struct inet_hashinfo tcp_hashinfo; 104struct inet_hashinfo tcp_hashinfo;
104EXPORT_SYMBOL(tcp_hashinfo); 105EXPORT_SYMBOL(tcp_hashinfo);
105 106
106static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) 107static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
107{ 108{
108 return secure_tcp_sequence_number(ip_hdr(skb)->daddr, 109 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
109 ip_hdr(skb)->saddr, 110 ip_hdr(skb)->saddr,
@@ -551,7 +552,7 @@ static void __tcp_v4_send_check(struct sk_buff *skb,
551/* This routine computes an IPv4 TCP checksum. */ 552/* This routine computes an IPv4 TCP checksum. */
552void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) 553void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
553{ 554{
554 struct inet_sock *inet = inet_sk(sk); 555 const struct inet_sock *inet = inet_sk(sk);
555 556
556 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); 557 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
557} 558}
@@ -589,7 +590,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
589 590
590static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) 591static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
591{ 592{
592 struct tcphdr *th = tcp_hdr(skb); 593 const struct tcphdr *th = tcp_hdr(skb);
593 struct { 594 struct {
594 struct tcphdr th; 595 struct tcphdr th;
595#ifdef CONFIG_TCP_MD5SIG 596#ifdef CONFIG_TCP_MD5SIG
@@ -651,6 +652,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
651 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 652 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
652 653
653 net = dev_net(skb_dst(skb)->dev); 654 net = dev_net(skb_dst(skb)->dev);
655 arg.tos = ip_hdr(skb)->tos;
654 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 656 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
655 &arg, arg.iov[0].iov_len); 657 &arg, arg.iov[0].iov_len);
656 658
@@ -665,9 +667,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
665static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 667static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
666 u32 win, u32 ts, int oif, 668 u32 win, u32 ts, int oif,
667 struct tcp_md5sig_key *key, 669 struct tcp_md5sig_key *key,
668 int reply_flags) 670 int reply_flags, u8 tos)
669{ 671{
670 struct tcphdr *th = tcp_hdr(skb); 672 const struct tcphdr *th = tcp_hdr(skb);
671 struct { 673 struct {
672 struct tcphdr th; 674 struct tcphdr th;
673 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) 675 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
@@ -725,7 +727,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
725 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 727 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
726 if (oif) 728 if (oif)
727 arg.bound_dev_if = oif; 729 arg.bound_dev_if = oif;
728 730 arg.tos = tos;
729 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 731 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
730 &arg, arg.iov[0].iov_len); 732 &arg, arg.iov[0].iov_len);
731 733
@@ -742,7 +744,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
742 tcptw->tw_ts_recent, 744 tcptw->tw_ts_recent,
743 tw->tw_bound_dev_if, 745 tw->tw_bound_dev_if,
744 tcp_twsk_md5_key(tcptw), 746 tcp_twsk_md5_key(tcptw),
745 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0 747 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
748 tw->tw_tos
746 ); 749 );
747 750
748 inet_twsk_put(tw); 751 inet_twsk_put(tw);
@@ -756,7 +759,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
756 req->ts_recent, 759 req->ts_recent,
757 0, 760 0,
758 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr), 761 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
759 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0); 762 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
763 ip_hdr(skb)->tos);
760} 764}
761 765
762/* 766/*
@@ -807,20 +811,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
807 kfree(inet_rsk(req)->opt); 811 kfree(inet_rsk(req)->opt);
808} 812}
809 813
810static void syn_flood_warning(const struct sk_buff *skb) 814/*
815 * Return 1 if a syncookie should be sent
816 */
817int tcp_syn_flood_action(struct sock *sk,
818 const struct sk_buff *skb,
819 const char *proto)
811{ 820{
812 const char *msg; 821 const char *msg = "Dropping request";
822 int want_cookie = 0;
823 struct listen_sock *lopt;
824
825
813 826
814#ifdef CONFIG_SYN_COOKIES 827#ifdef CONFIG_SYN_COOKIES
815 if (sysctl_tcp_syncookies) 828 if (sysctl_tcp_syncookies) {
816 msg = "Sending cookies"; 829 msg = "Sending cookies";
817 else 830 want_cookie = 1;
831 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
832 } else
818#endif 833#endif
819 msg = "Dropping request"; 834 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
820 835
821 pr_info("TCP: Possible SYN flooding on port %d. %s.\n", 836 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
822 ntohs(tcp_hdr(skb)->dest), msg); 837 if (!lopt->synflood_warned) {
838 lopt->synflood_warned = 1;
839 pr_info("%s: Possible SYN flooding on port %d. %s. "
840 " Check SNMP counters.\n",
841 proto, ntohs(tcp_hdr(skb)->dest), msg);
842 }
843 return want_cookie;
823} 844}
845EXPORT_SYMBOL(tcp_syn_flood_action);
824 846
825/* 847/*
826 * Save and compile IPv4 options into the request_sock if needed. 848 * Save and compile IPv4 options into the request_sock if needed.
@@ -908,18 +930,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
908 } 930 }
909 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 931 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
910 } 932 }
911 if (tcp_alloc_md5sig_pool(sk) == NULL) { 933
934 md5sig = tp->md5sig_info;
935 if (md5sig->entries4 == 0 &&
936 tcp_alloc_md5sig_pool(sk) == NULL) {
912 kfree(newkey); 937 kfree(newkey);
913 return -ENOMEM; 938 return -ENOMEM;
914 } 939 }
915 md5sig = tp->md5sig_info;
916 940
917 if (md5sig->alloced4 == md5sig->entries4) { 941 if (md5sig->alloced4 == md5sig->entries4) {
918 keys = kmalloc((sizeof(*keys) * 942 keys = kmalloc((sizeof(*keys) *
919 (md5sig->entries4 + 1)), GFP_ATOMIC); 943 (md5sig->entries4 + 1)), GFP_ATOMIC);
920 if (!keys) { 944 if (!keys) {
921 kfree(newkey); 945 kfree(newkey);
922 tcp_free_md5sig_pool(); 946 if (md5sig->entries4 == 0)
947 tcp_free_md5sig_pool();
923 return -ENOMEM; 948 return -ENOMEM;
924 } 949 }
925 950
@@ -963,6 +988,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
963 kfree(tp->md5sig_info->keys4); 988 kfree(tp->md5sig_info->keys4);
964 tp->md5sig_info->keys4 = NULL; 989 tp->md5sig_info->keys4 = NULL;
965 tp->md5sig_info->alloced4 = 0; 990 tp->md5sig_info->alloced4 = 0;
991 tcp_free_md5sig_pool();
966 } else if (tp->md5sig_info->entries4 != i) { 992 } else if (tp->md5sig_info->entries4 != i) {
967 /* Need to do some manipulation */ 993 /* Need to do some manipulation */
968 memmove(&tp->md5sig_info->keys4[i], 994 memmove(&tp->md5sig_info->keys4[i],
@@ -970,7 +996,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
970 (tp->md5sig_info->entries4 - i) * 996 (tp->md5sig_info->entries4 - i) *
971 sizeof(struct tcp4_md5sig_key)); 997 sizeof(struct tcp4_md5sig_key));
972 } 998 }
973 tcp_free_md5sig_pool();
974 return 0; 999 return 0;
975 } 1000 }
976 } 1001 }
@@ -1068,7 +1093,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1068} 1093}
1069 1094
1070static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 1095static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1071 __be32 daddr, __be32 saddr, struct tcphdr *th) 1096 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1072{ 1097{
1073 struct tcp_md5sig_pool *hp; 1098 struct tcp_md5sig_pool *hp;
1074 struct hash_desc *desc; 1099 struct hash_desc *desc;
@@ -1100,12 +1125,12 @@ clear_hash_noput:
1100} 1125}
1101 1126
1102int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, 1127int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1103 struct sock *sk, struct request_sock *req, 1128 const struct sock *sk, const struct request_sock *req,
1104 struct sk_buff *skb) 1129 const struct sk_buff *skb)
1105{ 1130{
1106 struct tcp_md5sig_pool *hp; 1131 struct tcp_md5sig_pool *hp;
1107 struct hash_desc *desc; 1132 struct hash_desc *desc;
1108 struct tcphdr *th = tcp_hdr(skb); 1133 const struct tcphdr *th = tcp_hdr(skb);
1109 __be32 saddr, daddr; 1134 __be32 saddr, daddr;
1110 1135
1111 if (sk) { 1136 if (sk) {
@@ -1150,7 +1175,7 @@ clear_hash_noput:
1150} 1175}
1151EXPORT_SYMBOL(tcp_v4_md5_hash_skb); 1176EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1152 1177
1153static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) 1178static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1154{ 1179{
1155 /* 1180 /*
1156 * This gets called for each TCP segment that arrives 1181 * This gets called for each TCP segment that arrives
@@ -1160,10 +1185,10 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1160 * o MD5 hash and we're not expecting one. 1185 * o MD5 hash and we're not expecting one.
1161 * o MD5 hash and its wrong. 1186 * o MD5 hash and its wrong.
1162 */ 1187 */
1163 __u8 *hash_location = NULL; 1188 const __u8 *hash_location = NULL;
1164 struct tcp_md5sig_key *hash_expected; 1189 struct tcp_md5sig_key *hash_expected;
1165 const struct iphdr *iph = ip_hdr(skb); 1190 const struct iphdr *iph = ip_hdr(skb);
1166 struct tcphdr *th = tcp_hdr(skb); 1191 const struct tcphdr *th = tcp_hdr(skb);
1167 int genhash; 1192 int genhash;
1168 unsigned char newhash[16]; 1193 unsigned char newhash[16];
1169 1194
@@ -1226,7 +1251,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1226{ 1251{
1227 struct tcp_extend_values tmp_ext; 1252 struct tcp_extend_values tmp_ext;
1228 struct tcp_options_received tmp_opt; 1253 struct tcp_options_received tmp_opt;
1229 u8 *hash_location; 1254 const u8 *hash_location;
1230 struct request_sock *req; 1255 struct request_sock *req;
1231 struct inet_request_sock *ireq; 1256 struct inet_request_sock *ireq;
1232 struct tcp_sock *tp = tcp_sk(sk); 1257 struct tcp_sock *tp = tcp_sk(sk);
@@ -1234,11 +1259,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1234 __be32 saddr = ip_hdr(skb)->saddr; 1259 __be32 saddr = ip_hdr(skb)->saddr;
1235 __be32 daddr = ip_hdr(skb)->daddr; 1260 __be32 daddr = ip_hdr(skb)->daddr;
1236 __u32 isn = TCP_SKB_CB(skb)->when; 1261 __u32 isn = TCP_SKB_CB(skb)->when;
1237#ifdef CONFIG_SYN_COOKIES
1238 int want_cookie = 0; 1262 int want_cookie = 0;
1239#else
1240#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1241#endif
1242 1263
1243 /* Never answer to SYNs send to broadcast or multicast */ 1264 /* Never answer to SYNs send to broadcast or multicast */
1244 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1265 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1249,14 +1270,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1249 * evidently real one. 1270 * evidently real one.
1250 */ 1271 */
1251 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1272 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1252 if (net_ratelimit()) 1273 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1253 syn_flood_warning(skb); 1274 if (!want_cookie)
1254#ifdef CONFIG_SYN_COOKIES 1275 goto drop;
1255 if (sysctl_tcp_syncookies) {
1256 want_cookie = 1;
1257 } else
1258#endif
1259 goto drop;
1260 } 1276 }
1261 1277
1262 /* Accept backlog is full. If we have already queued enough 1278 /* Accept backlog is full. If we have already queued enough
@@ -1302,9 +1318,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1302 while (l-- > 0) 1318 while (l-- > 0)
1303 *c++ ^= *hash_location++; 1319 *c++ ^= *hash_location++;
1304 1320
1305#ifdef CONFIG_SYN_COOKIES
1306 want_cookie = 0; /* not our kind of cookie */ 1321 want_cookie = 0; /* not our kind of cookie */
1307#endif
1308 tmp_ext.cookie_out_never = 0; /* false */ 1322 tmp_ext.cookie_out_never = 0; /* false */
1309 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1323 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1310 } else if (!tp->rx_opt.cookie_in_always) { 1324 } else if (!tp->rx_opt.cookie_in_always) {
@@ -1496,6 +1510,8 @@ exit:
1496 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1510 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1497 return NULL; 1511 return NULL;
1498put_and_exit: 1512put_and_exit:
1513 tcp_clear_xmit_timers(newsk);
1514 bh_unlock_sock(newsk);
1499 sock_put(newsk); 1515 sock_put(newsk);
1500 goto exit; 1516 goto exit;
1501} 1517}
@@ -1577,7 +1593,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1577#endif 1593#endif
1578 1594
1579 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1595 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1580 sock_rps_save_rxhash(sk, skb->rxhash); 1596 sock_rps_save_rxhash(sk, skb);
1581 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1597 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1582 rsk = sk; 1598 rsk = sk;
1583 goto reset; 1599 goto reset;
@@ -1594,7 +1610,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1594 goto discard; 1610 goto discard;
1595 1611
1596 if (nsk != sk) { 1612 if (nsk != sk) {
1597 sock_rps_save_rxhash(nsk, skb->rxhash); 1613 sock_rps_save_rxhash(nsk, skb);
1598 if (tcp_child_process(sk, nsk, skb)) { 1614 if (tcp_child_process(sk, nsk, skb)) {
1599 rsk = nsk; 1615 rsk = nsk;
1600 goto reset; 1616 goto reset;
@@ -1602,7 +1618,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1602 return 0; 1618 return 0;
1603 } 1619 }
1604 } else 1620 } else
1605 sock_rps_save_rxhash(sk, skb->rxhash); 1621 sock_rps_save_rxhash(sk, skb);
1606 1622
1607 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { 1623 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1608 rsk = sk; 1624 rsk = sk;
@@ -1634,7 +1650,7 @@ EXPORT_SYMBOL(tcp_v4_do_rcv);
1634int tcp_v4_rcv(struct sk_buff *skb) 1650int tcp_v4_rcv(struct sk_buff *skb)
1635{ 1651{
1636 const struct iphdr *iph; 1652 const struct iphdr *iph;
1637 struct tcphdr *th; 1653 const struct tcphdr *th;
1638 struct sock *sk; 1654 struct sock *sk;
1639 int ret; 1655 int ret;
1640 struct net *net = dev_net(skb->dev); 1656 struct net *net = dev_net(skb->dev);
@@ -1669,7 +1685,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1669 skb->len - th->doff * 4); 1685 skb->len - th->doff * 4);
1670 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1686 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1671 TCP_SKB_CB(skb)->when = 0; 1687 TCP_SKB_CB(skb)->when = 0;
1672 TCP_SKB_CB(skb)->flags = iph->tos; 1688 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1673 TCP_SKB_CB(skb)->sacked = 0; 1689 TCP_SKB_CB(skb)->sacked = 0;
1674 1690
1675 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); 1691 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1798,7 +1814,7 @@ EXPORT_SYMBOL(tcp_v4_get_peer);
1798 1814
1799void *tcp_v4_tw_get_peer(struct sock *sk) 1815void *tcp_v4_tw_get_peer(struct sock *sk)
1800{ 1816{
1801 struct inet_timewait_sock *tw = inet_twsk(sk); 1817 const struct inet_timewait_sock *tw = inet_twsk(sk);
1802 1818
1803 return inet_getpeer_v4(tw->tw_daddr, 1); 1819 return inet_getpeer_v4(tw->tw_daddr, 1);
1804} 1820}
@@ -2325,7 +2341,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2325 } 2341 }
2326} 2342}
2327 2343
2328static int tcp_seq_open(struct inode *inode, struct file *file) 2344int tcp_seq_open(struct inode *inode, struct file *file)
2329{ 2345{
2330 struct tcp_seq_afinfo *afinfo = PDE(inode)->data; 2346 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2331 struct tcp_iter_state *s; 2347 struct tcp_iter_state *s;
@@ -2341,23 +2357,19 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
2341 s->last_pos = 0; 2357 s->last_pos = 0;
2342 return 0; 2358 return 0;
2343} 2359}
2360EXPORT_SYMBOL(tcp_seq_open);
2344 2361
2345int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) 2362int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2346{ 2363{
2347 int rc = 0; 2364 int rc = 0;
2348 struct proc_dir_entry *p; 2365 struct proc_dir_entry *p;
2349 2366
2350 afinfo->seq_fops.open = tcp_seq_open;
2351 afinfo->seq_fops.read = seq_read;
2352 afinfo->seq_fops.llseek = seq_lseek;
2353 afinfo->seq_fops.release = seq_release_net;
2354
2355 afinfo->seq_ops.start = tcp_seq_start; 2367 afinfo->seq_ops.start = tcp_seq_start;
2356 afinfo->seq_ops.next = tcp_seq_next; 2368 afinfo->seq_ops.next = tcp_seq_next;
2357 afinfo->seq_ops.stop = tcp_seq_stop; 2369 afinfo->seq_ops.stop = tcp_seq_stop;
2358 2370
2359 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, 2371 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2360 &afinfo->seq_fops, afinfo); 2372 afinfo->seq_fops, afinfo);
2361 if (!p) 2373 if (!p)
2362 rc = -ENOMEM; 2374 rc = -ENOMEM;
2363 return rc; 2375 return rc;
@@ -2370,7 +2382,7 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2370} 2382}
2371EXPORT_SYMBOL(tcp_proc_unregister); 2383EXPORT_SYMBOL(tcp_proc_unregister);
2372 2384
2373static void get_openreq4(struct sock *sk, struct request_sock *req, 2385static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2374 struct seq_file *f, int i, int uid, int *len) 2386 struct seq_file *f, int i, int uid, int *len)
2375{ 2387{
2376 const struct inet_request_sock *ireq = inet_rsk(req); 2388 const struct inet_request_sock *ireq = inet_rsk(req);
@@ -2400,9 +2412,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2400{ 2412{
2401 int timer_active; 2413 int timer_active;
2402 unsigned long timer_expires; 2414 unsigned long timer_expires;
2403 struct tcp_sock *tp = tcp_sk(sk); 2415 const struct tcp_sock *tp = tcp_sk(sk);
2404 const struct inet_connection_sock *icsk = inet_csk(sk); 2416 const struct inet_connection_sock *icsk = inet_csk(sk);
2405 struct inet_sock *inet = inet_sk(sk); 2417 const struct inet_sock *inet = inet_sk(sk);
2406 __be32 dest = inet->inet_daddr; 2418 __be32 dest = inet->inet_daddr;
2407 __be32 src = inet->inet_rcv_saddr; 2419 __be32 src = inet->inet_rcv_saddr;
2408 __u16 destp = ntohs(inet->inet_dport); 2420 __u16 destp = ntohs(inet->inet_dport);
@@ -2451,7 +2463,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2451 len); 2463 len);
2452} 2464}
2453 2465
2454static void get_timewait4_sock(struct inet_timewait_sock *tw, 2466static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2455 struct seq_file *f, int i, int *len) 2467 struct seq_file *f, int i, int *len)
2456{ 2468{
2457 __be32 dest, src; 2469 __be32 dest, src;
@@ -2506,12 +2518,18 @@ out:
2506 return 0; 2518 return 0;
2507} 2519}
2508 2520
2521static const struct file_operations tcp_afinfo_seq_fops = {
2522 .owner = THIS_MODULE,
2523 .open = tcp_seq_open,
2524 .read = seq_read,
2525 .llseek = seq_lseek,
2526 .release = seq_release_net
2527};
2528
2509static struct tcp_seq_afinfo tcp4_seq_afinfo = { 2529static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2510 .name = "tcp", 2530 .name = "tcp",
2511 .family = AF_INET, 2531 .family = AF_INET,
2512 .seq_fops = { 2532 .seq_fops = &tcp_afinfo_seq_fops,
2513 .owner = THIS_MODULE,
2514 },
2515 .seq_ops = { 2533 .seq_ops = {
2516 .show = tcp4_seq_show, 2534 .show = tcp4_seq_show,
2517 }, 2535 },