diff options
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 158 |
1 files changed, 85 insertions, 73 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e7e91e60ac74..2cd41265d17f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -104,7 +104,7 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | |||
104 | */ | 104 | */ |
105 | int sysctl_local_port_range[2] = { 1024, 4999 }; | 105 | int sysctl_local_port_range[2] = { 1024, 4999 }; |
106 | 106 | ||
107 | static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) | 107 | static inline int inet_csk_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) |
108 | { | 108 | { |
109 | const u32 sk_rcv_saddr = inet_rcv_saddr(sk); | 109 | const u32 sk_rcv_saddr = inet_rcv_saddr(sk); |
110 | struct sock *sk2; | 110 | struct sock *sk2; |
@@ -113,7 +113,7 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb | |||
113 | 113 | ||
114 | sk_for_each_bound(sk2, node, &tb->owners) { | 114 | sk_for_each_bound(sk2, node, &tb->owners) { |
115 | if (sk != sk2 && | 115 | if (sk != sk2 && |
116 | !tcp_v6_ipv6only(sk2) && | 116 | !inet_v6_ipv6only(sk2) && |
117 | (!sk->sk_bound_dev_if || | 117 | (!sk->sk_bound_dev_if || |
118 | !sk2->sk_bound_dev_if || | 118 | !sk2->sk_bound_dev_if || |
119 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | 119 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { |
@@ -132,7 +132,8 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb | |||
132 | /* Obtain a reference to a local port for the given sock, | 132 | /* Obtain a reference to a local port for the given sock, |
133 | * if snum is zero it means select any available local port. | 133 | * if snum is zero it means select any available local port. |
134 | */ | 134 | */ |
135 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | 135 | int inet_csk_get_port(struct inet_hashinfo *hashinfo, |
136 | struct sock *sk, unsigned short snum) | ||
136 | { | 137 | { |
137 | struct inet_bind_hashbucket *head; | 138 | struct inet_bind_hashbucket *head; |
138 | struct hlist_node *node; | 139 | struct hlist_node *node; |
@@ -146,16 +147,16 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
146 | int remaining = (high - low) + 1; | 147 | int remaining = (high - low) + 1; |
147 | int rover; | 148 | int rover; |
148 | 149 | ||
149 | spin_lock(&tcp_hashinfo.portalloc_lock); | 150 | spin_lock(&hashinfo->portalloc_lock); |
150 | if (tcp_hashinfo.port_rover < low) | 151 | if (hashinfo->port_rover < low) |
151 | rover = low; | 152 | rover = low; |
152 | else | 153 | else |
153 | rover = tcp_hashinfo.port_rover; | 154 | rover = hashinfo->port_rover; |
154 | do { | 155 | do { |
155 | rover++; | 156 | rover++; |
156 | if (rover > high) | 157 | if (rover > high) |
157 | rover = low; | 158 | rover = low; |
158 | head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)]; | 159 | head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; |
159 | spin_lock(&head->lock); | 160 | spin_lock(&head->lock); |
160 | inet_bind_bucket_for_each(tb, node, &head->chain) | 161 | inet_bind_bucket_for_each(tb, node, &head->chain) |
161 | if (tb->port == rover) | 162 | if (tb->port == rover) |
@@ -164,8 +165,8 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
164 | next: | 165 | next: |
165 | spin_unlock(&head->lock); | 166 | spin_unlock(&head->lock); |
166 | } while (--remaining > 0); | 167 | } while (--remaining > 0); |
167 | tcp_hashinfo.port_rover = rover; | 168 | hashinfo->port_rover = rover; |
168 | spin_unlock(&tcp_hashinfo.portalloc_lock); | 169 | spin_unlock(&hashinfo->portalloc_lock); |
169 | 170 | ||
170 | /* Exhausted local port range during search? It is not | 171 | /* Exhausted local port range during search? It is not |
171 | * possible for us to be holding one of the bind hash | 172 | * possible for us to be holding one of the bind hash |
@@ -182,7 +183,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
182 | */ | 183 | */ |
183 | snum = rover; | 184 | snum = rover; |
184 | } else { | 185 | } else { |
185 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | 186 | head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)]; |
186 | spin_lock(&head->lock); | 187 | spin_lock(&head->lock); |
187 | inet_bind_bucket_for_each(tb, node, &head->chain) | 188 | inet_bind_bucket_for_each(tb, node, &head->chain) |
188 | if (tb->port == snum) | 189 | if (tb->port == snum) |
@@ -199,13 +200,13 @@ tb_found: | |||
199 | goto success; | 200 | goto success; |
200 | } else { | 201 | } else { |
201 | ret = 1; | 202 | ret = 1; |
202 | if (tcp_bind_conflict(sk, tb)) | 203 | if (inet_csk_bind_conflict(sk, tb)) |
203 | goto fail_unlock; | 204 | goto fail_unlock; |
204 | } | 205 | } |
205 | } | 206 | } |
206 | tb_not_found: | 207 | tb_not_found: |
207 | ret = 1; | 208 | ret = 1; |
208 | if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL) | 209 | if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL) |
209 | goto fail_unlock; | 210 | goto fail_unlock; |
210 | if (hlist_empty(&tb->owners)) { | 211 | if (hlist_empty(&tb->owners)) { |
211 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 212 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) |
@@ -216,9 +217,9 @@ tb_not_found: | |||
216 | (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) | 217 | (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) |
217 | tb->fastreuse = 0; | 218 | tb->fastreuse = 0; |
218 | success: | 219 | success: |
219 | if (!inet_sk(sk)->bind_hash) | 220 | if (!inet_csk(sk)->icsk_bind_hash) |
220 | inet_bind_hash(sk, tb, snum); | 221 | inet_bind_hash(sk, tb, snum); |
221 | BUG_TRAP(inet_sk(sk)->bind_hash == tb); | 222 | BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); |
222 | ret = 0; | 223 | ret = 0; |
223 | 224 | ||
224 | fail_unlock: | 225 | fail_unlock: |
@@ -228,6 +229,11 @@ fail: | |||
228 | return ret; | 229 | return ret; |
229 | } | 230 | } |
230 | 231 | ||
232 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | ||
233 | { | ||
234 | return inet_csk_get_port(&tcp_hashinfo, sk, snum); | ||
235 | } | ||
236 | |||
231 | static void tcp_v4_hash(struct sock *sk) | 237 | static void tcp_v4_hash(struct sock *sk) |
232 | { | 238 | { |
233 | inet_hash(&tcp_hashinfo, sk); | 239 | inet_hash(&tcp_hashinfo, sk); |
@@ -426,7 +432,7 @@ ok: | |||
426 | } | 432 | } |
427 | 433 | ||
428 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | 434 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; |
429 | tb = inet_sk(sk)->bind_hash; | 435 | tb = inet_csk(sk)->icsk_bind_hash; |
430 | spin_lock_bh(&head->lock); | 436 | spin_lock_bh(&head->lock); |
431 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 437 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { |
432 | __inet_hash(&tcp_hashinfo, sk, 0); | 438 | __inet_hash(&tcp_hashinfo, sk, 0); |
@@ -557,25 +563,28 @@ failure: | |||
557 | return err; | 563 | return err; |
558 | } | 564 | } |
559 | 565 | ||
560 | static __inline__ int tcp_v4_iif(struct sk_buff *skb) | 566 | static inline int inet_iif(const struct sk_buff *skb) |
561 | { | 567 | { |
562 | return ((struct rtable *)skb->dst)->rt_iif; | 568 | return ((struct rtable *)skb->dst)->rt_iif; |
563 | } | 569 | } |
564 | 570 | ||
565 | static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd) | 571 | static inline u32 inet_synq_hash(const u32 raddr, const u16 rport, |
572 | const u32 rnd, const u16 synq_hsize) | ||
566 | { | 573 | { |
567 | return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1)); | 574 | return jhash_2words(raddr, (u32)rport, rnd) & (synq_hsize - 1); |
568 | } | 575 | } |
569 | 576 | ||
570 | static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, | 577 | struct request_sock *inet_csk_search_req(const struct sock *sk, |
571 | struct request_sock ***prevp, | 578 | struct request_sock ***prevp, |
572 | __u16 rport, | 579 | const __u16 rport, const __u32 raddr, |
573 | __u32 raddr, __u32 laddr) | 580 | const __u32 laddr) |
574 | { | 581 | { |
575 | struct listen_sock *lopt = tp->accept_queue.listen_opt; | 582 | const struct inet_connection_sock *icsk = inet_csk(sk); |
583 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; | ||
576 | struct request_sock *req, **prev; | 584 | struct request_sock *req, **prev; |
577 | 585 | ||
578 | for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)]; | 586 | for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, |
587 | lopt->nr_table_entries)]; | ||
579 | (req = *prev) != NULL; | 588 | (req = *prev) != NULL; |
580 | prev = &req->dl_next) { | 589 | prev = &req->dl_next) { |
581 | const struct inet_request_sock *ireq = inet_rsk(req); | 590 | const struct inet_request_sock *ireq = inet_rsk(req); |
@@ -583,7 +592,7 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, | |||
583 | if (ireq->rmt_port == rport && | 592 | if (ireq->rmt_port == rport && |
584 | ireq->rmt_addr == raddr && | 593 | ireq->rmt_addr == raddr && |
585 | ireq->loc_addr == laddr && | 594 | ireq->loc_addr == laddr && |
586 | TCP_INET_FAMILY(req->rsk_ops->family)) { | 595 | AF_INET_FAMILY(req->rsk_ops->family)) { |
587 | BUG_TRAP(!req->sk); | 596 | BUG_TRAP(!req->sk); |
588 | *prevp = prev; | 597 | *prevp = prev; |
589 | break; | 598 | break; |
@@ -595,12 +604,13 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, | |||
595 | 604 | ||
596 | static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req) | 605 | static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req) |
597 | { | 606 | { |
598 | struct tcp_sock *tp = tcp_sk(sk); | 607 | struct inet_connection_sock *icsk = inet_csk(sk); |
599 | struct listen_sock *lopt = tp->accept_queue.listen_opt; | 608 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; |
600 | u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); | 609 | const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, |
610 | lopt->hash_rnd, lopt->nr_table_entries); | ||
601 | 611 | ||
602 | reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); | 612 | reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT); |
603 | tcp_synq_added(sk); | 613 | inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT); |
604 | } | 614 | } |
605 | 615 | ||
606 | 616 | ||
@@ -687,7 +697,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
687 | } | 697 | } |
688 | 698 | ||
689 | sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr, | 699 | sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr, |
690 | th->source, tcp_v4_iif(skb)); | 700 | th->source, inet_iif(skb)); |
691 | if (!sk) { | 701 | if (!sk) { |
692 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 702 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); |
693 | return; | 703 | return; |
@@ -747,8 +757,8 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
747 | if (sock_owned_by_user(sk)) | 757 | if (sock_owned_by_user(sk)) |
748 | goto out; | 758 | goto out; |
749 | 759 | ||
750 | req = tcp_v4_search_req(tp, &prev, th->dest, | 760 | req = inet_csk_search_req(sk, &prev, th->dest, |
751 | iph->daddr, iph->saddr); | 761 | iph->daddr, iph->saddr); |
752 | if (!req) | 762 | if (!req) |
753 | goto out; | 763 | goto out; |
754 | 764 | ||
@@ -768,7 +778,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
768 | * created socket, and POSIX does not want network | 778 | * created socket, and POSIX does not want network |
769 | * errors returned from accept(). | 779 | * errors returned from accept(). |
770 | */ | 780 | */ |
771 | tcp_synq_drop(sk, req, prev); | 781 | inet_csk_reqsk_queue_drop(sk, req, prev); |
772 | goto out; | 782 | goto out; |
773 | 783 | ||
774 | case TCP_SYN_SENT: | 784 | case TCP_SYN_SENT: |
@@ -953,8 +963,8 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) | |||
953 | req->ts_recent); | 963 | req->ts_recent); |
954 | } | 964 | } |
955 | 965 | ||
956 | static struct dst_entry* tcp_v4_route_req(struct sock *sk, | 966 | struct dst_entry* inet_csk_route_req(struct sock *sk, |
957 | struct request_sock *req) | 967 | const struct request_sock *req) |
958 | { | 968 | { |
959 | struct rtable *rt; | 969 | struct rtable *rt; |
960 | const struct inet_request_sock *ireq = inet_rsk(req); | 970 | const struct inet_request_sock *ireq = inet_rsk(req); |
@@ -966,7 +976,7 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk, | |||
966 | ireq->rmt_addr), | 976 | ireq->rmt_addr), |
967 | .saddr = ireq->loc_addr, | 977 | .saddr = ireq->loc_addr, |
968 | .tos = RT_CONN_FLAGS(sk) } }, | 978 | .tos = RT_CONN_FLAGS(sk) } }, |
969 | .proto = IPPROTO_TCP, | 979 | .proto = sk->sk_protocol, |
970 | .uli_u = { .ports = | 980 | .uli_u = { .ports = |
971 | { .sport = inet_sk(sk)->sport, | 981 | { .sport = inet_sk(sk)->sport, |
972 | .dport = ireq->rmt_port } } }; | 982 | .dport = ireq->rmt_port } } }; |
@@ -996,7 +1006,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, | |||
996 | struct sk_buff * skb; | 1006 | struct sk_buff * skb; |
997 | 1007 | ||
998 | /* First, grab a route. */ | 1008 | /* First, grab a route. */ |
999 | if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) | 1009 | if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) |
1000 | goto out; | 1010 | goto out; |
1001 | 1011 | ||
1002 | skb = tcp_make_synack(sk, dst, req); | 1012 | skb = tcp_make_synack(sk, dst, req); |
@@ -1098,7 +1108,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1098 | * limitations, they conserve resources and peer is | 1108 | * limitations, they conserve resources and peer is |
1099 | * evidently real one. | 1109 | * evidently real one. |
1100 | */ | 1110 | */ |
1101 | if (tcp_synq_is_full(sk) && !isn) { | 1111 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
1102 | #ifdef CONFIG_SYN_COOKIES | 1112 | #ifdef CONFIG_SYN_COOKIES |
1103 | if (sysctl_tcp_syncookies) { | 1113 | if (sysctl_tcp_syncookies) { |
1104 | want_cookie = 1; | 1114 | want_cookie = 1; |
@@ -1112,7 +1122,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1112 | * clogging syn queue with openreqs with exponentially increasing | 1122 | * clogging syn queue with openreqs with exponentially increasing |
1113 | * timeout. | 1123 | * timeout. |
1114 | */ | 1124 | */ |
1115 | if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) | 1125 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) |
1116 | goto drop; | 1126 | goto drop; |
1117 | 1127 | ||
1118 | req = reqsk_alloc(&tcp_request_sock_ops); | 1128 | req = reqsk_alloc(&tcp_request_sock_ops); |
@@ -1169,7 +1179,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1169 | */ | 1179 | */ |
1170 | if (tmp_opt.saw_tstamp && | 1180 | if (tmp_opt.saw_tstamp && |
1171 | sysctl_tcp_tw_recycle && | 1181 | sysctl_tcp_tw_recycle && |
1172 | (dst = tcp_v4_route_req(sk, req)) != NULL && | 1182 | (dst = inet_csk_route_req(sk, req)) != NULL && |
1173 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && | 1183 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && |
1174 | peer->v4daddr == saddr) { | 1184 | peer->v4daddr == saddr) { |
1175 | if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && | 1185 | if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && |
@@ -1182,7 +1192,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1182 | } | 1192 | } |
1183 | /* Kill the following clause, if you dislike this way. */ | 1193 | /* Kill the following clause, if you dislike this way. */ |
1184 | else if (!sysctl_tcp_syncookies && | 1194 | else if (!sysctl_tcp_syncookies && |
1185 | (sysctl_max_syn_backlog - tcp_synq_len(sk) < | 1195 | (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < |
1186 | (sysctl_max_syn_backlog >> 2)) && | 1196 | (sysctl_max_syn_backlog >> 2)) && |
1187 | (!peer || !peer->tcp_ts_stamp) && | 1197 | (!peer || !peer->tcp_ts_stamp) && |
1188 | (!dst || !dst_metric(dst, RTAX_RTT))) { | 1198 | (!dst || !dst_metric(dst, RTAX_RTT))) { |
@@ -1240,7 +1250,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1240 | if (sk_acceptq_is_full(sk)) | 1250 | if (sk_acceptq_is_full(sk)) |
1241 | goto exit_overflow; | 1251 | goto exit_overflow; |
1242 | 1252 | ||
1243 | if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) | 1253 | if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) |
1244 | goto exit; | 1254 | goto exit; |
1245 | 1255 | ||
1246 | newsk = tcp_create_openreq_child(sk, req, skb); | 1256 | newsk = tcp_create_openreq_child(sk, req, skb); |
@@ -1257,7 +1267,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1257 | newinet->saddr = ireq->loc_addr; | 1267 | newinet->saddr = ireq->loc_addr; |
1258 | newinet->opt = ireq->opt; | 1268 | newinet->opt = ireq->opt; |
1259 | ireq->opt = NULL; | 1269 | ireq->opt = NULL; |
1260 | newinet->mc_index = tcp_v4_iif(skb); | 1270 | newinet->mc_index = inet_iif(skb); |
1261 | newinet->mc_ttl = skb->nh.iph->ttl; | 1271 | newinet->mc_ttl = skb->nh.iph->ttl; |
1262 | newtp->ext_header_len = 0; | 1272 | newtp->ext_header_len = 0; |
1263 | if (newinet->opt) | 1273 | if (newinet->opt) |
@@ -1285,18 +1295,17 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
1285 | { | 1295 | { |
1286 | struct tcphdr *th = skb->h.th; | 1296 | struct tcphdr *th = skb->h.th; |
1287 | struct iphdr *iph = skb->nh.iph; | 1297 | struct iphdr *iph = skb->nh.iph; |
1288 | struct tcp_sock *tp = tcp_sk(sk); | ||
1289 | struct sock *nsk; | 1298 | struct sock *nsk; |
1290 | struct request_sock **prev; | 1299 | struct request_sock **prev; |
1291 | /* Find possible connection requests. */ | 1300 | /* Find possible connection requests. */ |
1292 | struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source, | 1301 | struct request_sock *req = inet_csk_search_req(sk, &prev, th->source, |
1293 | iph->saddr, iph->daddr); | 1302 | iph->saddr, iph->daddr); |
1294 | if (req) | 1303 | if (req) |
1295 | return tcp_check_req(sk, skb, req, prev); | 1304 | return tcp_check_req(sk, skb, req, prev); |
1296 | 1305 | ||
1297 | nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr, | 1306 | nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr, |
1298 | th->source, skb->nh.iph->daddr, | 1307 | th->source, skb->nh.iph->daddr, |
1299 | ntohs(th->dest), tcp_v4_iif(skb)); | 1308 | ntohs(th->dest), inet_iif(skb)); |
1300 | 1309 | ||
1301 | if (nsk) { | 1310 | if (nsk) { |
1302 | if (nsk->sk_state != TCP_TIME_WAIT) { | 1311 | if (nsk->sk_state != TCP_TIME_WAIT) { |
@@ -1440,7 +1449,7 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1440 | 1449 | ||
1441 | sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source, | 1450 | sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source, |
1442 | skb->nh.iph->daddr, ntohs(th->dest), | 1451 | skb->nh.iph->daddr, ntohs(th->dest), |
1443 | tcp_v4_iif(skb)); | 1452 | inet_iif(skb)); |
1444 | 1453 | ||
1445 | if (!sk) | 1454 | if (!sk) |
1446 | goto no_tcp_socket; | 1455 | goto no_tcp_socket; |
@@ -1507,7 +1516,7 @@ do_time_wait: | |||
1507 | struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, | 1516 | struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, |
1508 | skb->nh.iph->daddr, | 1517 | skb->nh.iph->daddr, |
1509 | ntohs(th->dest), | 1518 | ntohs(th->dest), |
1510 | tcp_v4_iif(skb)); | 1519 | inet_iif(skb)); |
1511 | if (sk2) { | 1520 | if (sk2) { |
1512 | tcp_tw_deschedule((struct inet_timewait_sock *)sk); | 1521 | tcp_tw_deschedule((struct inet_timewait_sock *)sk); |
1513 | inet_twsk_put((struct inet_timewait_sock *)sk); | 1522 | inet_twsk_put((struct inet_timewait_sock *)sk); |
@@ -1619,7 +1628,7 @@ static int tcp_v4_init_sock(struct sock *sk) | |||
1619 | tcp_init_xmit_timers(sk); | 1628 | tcp_init_xmit_timers(sk); |
1620 | tcp_prequeue_init(tp); | 1629 | tcp_prequeue_init(tp); |
1621 | 1630 | ||
1622 | tp->rto = TCP_TIMEOUT_INIT; | 1631 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; |
1623 | tp->mdev = TCP_TIMEOUT_INIT; | 1632 | tp->mdev = TCP_TIMEOUT_INIT; |
1624 | 1633 | ||
1625 | /* So many TCP implementations out there (incorrectly) count the | 1634 | /* So many TCP implementations out there (incorrectly) count the |
@@ -1672,7 +1681,7 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1672 | __skb_queue_purge(&tp->ucopy.prequeue); | 1681 | __skb_queue_purge(&tp->ucopy.prequeue); |
1673 | 1682 | ||
1674 | /* Clean up a referenced TCP bind bucket. */ | 1683 | /* Clean up a referenced TCP bind bucket. */ |
1675 | if (inet_sk(sk)->bind_hash) | 1684 | if (inet_csk(sk)->icsk_bind_hash) |
1676 | inet_put_port(&tcp_hashinfo, sk); | 1685 | inet_put_port(&tcp_hashinfo, sk); |
1677 | 1686 | ||
1678 | /* | 1687 | /* |
@@ -1707,7 +1716,7 @@ static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw) | |||
1707 | 1716 | ||
1708 | static void *listening_get_next(struct seq_file *seq, void *cur) | 1717 | static void *listening_get_next(struct seq_file *seq, void *cur) |
1709 | { | 1718 | { |
1710 | struct tcp_sock *tp; | 1719 | struct inet_connection_sock *icsk; |
1711 | struct hlist_node *node; | 1720 | struct hlist_node *node; |
1712 | struct sock *sk = cur; | 1721 | struct sock *sk = cur; |
1713 | struct tcp_iter_state* st = seq->private; | 1722 | struct tcp_iter_state* st = seq->private; |
@@ -1723,7 +1732,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1723 | if (st->state == TCP_SEQ_STATE_OPENREQ) { | 1732 | if (st->state == TCP_SEQ_STATE_OPENREQ) { |
1724 | struct request_sock *req = cur; | 1733 | struct request_sock *req = cur; |
1725 | 1734 | ||
1726 | tp = tcp_sk(st->syn_wait_sk); | 1735 | icsk = inet_csk(st->syn_wait_sk); |
1727 | req = req->dl_next; | 1736 | req = req->dl_next; |
1728 | while (1) { | 1737 | while (1) { |
1729 | while (req) { | 1738 | while (req) { |
@@ -1736,17 +1745,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1736 | if (++st->sbucket >= TCP_SYNQ_HSIZE) | 1745 | if (++st->sbucket >= TCP_SYNQ_HSIZE) |
1737 | break; | 1746 | break; |
1738 | get_req: | 1747 | get_req: |
1739 | req = tp->accept_queue.listen_opt->syn_table[st->sbucket]; | 1748 | req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; |
1740 | } | 1749 | } |
1741 | sk = sk_next(st->syn_wait_sk); | 1750 | sk = sk_next(st->syn_wait_sk); |
1742 | st->state = TCP_SEQ_STATE_LISTENING; | 1751 | st->state = TCP_SEQ_STATE_LISTENING; |
1743 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 1752 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1744 | } else { | 1753 | } else { |
1745 | tp = tcp_sk(sk); | 1754 | icsk = inet_csk(sk); |
1746 | read_lock_bh(&tp->accept_queue.syn_wait_lock); | 1755 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1747 | if (reqsk_queue_len(&tp->accept_queue)) | 1756 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) |
1748 | goto start_req; | 1757 | goto start_req; |
1749 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 1758 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1750 | sk = sk_next(sk); | 1759 | sk = sk_next(sk); |
1751 | } | 1760 | } |
1752 | get_sk: | 1761 | get_sk: |
@@ -1755,9 +1764,9 @@ get_sk: | |||
1755 | cur = sk; | 1764 | cur = sk; |
1756 | goto out; | 1765 | goto out; |
1757 | } | 1766 | } |
1758 | tp = tcp_sk(sk); | 1767 | icsk = inet_csk(sk); |
1759 | read_lock_bh(&tp->accept_queue.syn_wait_lock); | 1768 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1760 | if (reqsk_queue_len(&tp->accept_queue)) { | 1769 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) { |
1761 | start_req: | 1770 | start_req: |
1762 | st->uid = sock_i_uid(sk); | 1771 | st->uid = sock_i_uid(sk); |
1763 | st->syn_wait_sk = sk; | 1772 | st->syn_wait_sk = sk; |
@@ -1765,7 +1774,7 @@ start_req: | |||
1765 | st->sbucket = 0; | 1774 | st->sbucket = 0; |
1766 | goto get_req; | 1775 | goto get_req; |
1767 | } | 1776 | } |
1768 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 1777 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1769 | } | 1778 | } |
1770 | if (++st->bucket < INET_LHTABLE_SIZE) { | 1779 | if (++st->bucket < INET_LHTABLE_SIZE) { |
1771 | sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]); | 1780 | sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]); |
@@ -1951,8 +1960,8 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) | |||
1951 | switch (st->state) { | 1960 | switch (st->state) { |
1952 | case TCP_SEQ_STATE_OPENREQ: | 1961 | case TCP_SEQ_STATE_OPENREQ: |
1953 | if (v) { | 1962 | if (v) { |
1954 | struct tcp_sock *tp = tcp_sk(st->syn_wait_sk); | 1963 | struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk); |
1955 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 1964 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1956 | } | 1965 | } |
1957 | case TCP_SEQ_STATE_LISTENING: | 1966 | case TCP_SEQ_STATE_LISTENING: |
1958 | if (v != SEQ_START_TOKEN) | 1967 | if (v != SEQ_START_TOKEN) |
@@ -2058,18 +2067,19 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) | |||
2058 | int timer_active; | 2067 | int timer_active; |
2059 | unsigned long timer_expires; | 2068 | unsigned long timer_expires; |
2060 | struct tcp_sock *tp = tcp_sk(sp); | 2069 | struct tcp_sock *tp = tcp_sk(sp); |
2070 | const struct inet_connection_sock *icsk = inet_csk(sp); | ||
2061 | struct inet_sock *inet = inet_sk(sp); | 2071 | struct inet_sock *inet = inet_sk(sp); |
2062 | unsigned int dest = inet->daddr; | 2072 | unsigned int dest = inet->daddr; |
2063 | unsigned int src = inet->rcv_saddr; | 2073 | unsigned int src = inet->rcv_saddr; |
2064 | __u16 destp = ntohs(inet->dport); | 2074 | __u16 destp = ntohs(inet->dport); |
2065 | __u16 srcp = ntohs(inet->sport); | 2075 | __u16 srcp = ntohs(inet->sport); |
2066 | 2076 | ||
2067 | if (tp->pending == TCP_TIME_RETRANS) { | 2077 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { |
2068 | timer_active = 1; | 2078 | timer_active = 1; |
2069 | timer_expires = tp->timeout; | 2079 | timer_expires = icsk->icsk_timeout; |
2070 | } else if (tp->pending == TCP_TIME_PROBE0) { | 2080 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { |
2071 | timer_active = 4; | 2081 | timer_active = 4; |
2072 | timer_expires = tp->timeout; | 2082 | timer_expires = icsk->icsk_timeout; |
2073 | } else if (timer_pending(&sp->sk_timer)) { | 2083 | } else if (timer_pending(&sp->sk_timer)) { |
2074 | timer_active = 2; | 2084 | timer_active = 2; |
2075 | timer_expires = sp->sk_timer.expires; | 2085 | timer_expires = sp->sk_timer.expires; |
@@ -2084,12 +2094,14 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) | |||
2084 | tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, | 2094 | tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, |
2085 | timer_active, | 2095 | timer_active, |
2086 | jiffies_to_clock_t(timer_expires - jiffies), | 2096 | jiffies_to_clock_t(timer_expires - jiffies), |
2087 | tp->retransmits, | 2097 | icsk->icsk_retransmits, |
2088 | sock_i_uid(sp), | 2098 | sock_i_uid(sp), |
2089 | tp->probes_out, | 2099 | tp->probes_out, |
2090 | sock_i_ino(sp), | 2100 | sock_i_ino(sp), |
2091 | atomic_read(&sp->sk_refcnt), sp, | 2101 | atomic_read(&sp->sk_refcnt), sp, |
2092 | tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong, | 2102 | icsk->icsk_rto, |
2103 | icsk->icsk_ack.ato, | ||
2104 | (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, | ||
2093 | tp->snd_cwnd, | 2105 | tp->snd_cwnd, |
2094 | tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); | 2106 | tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); |
2095 | } | 2107 | } |
@@ -2174,7 +2186,7 @@ struct proto tcp_prot = { | |||
2174 | .close = tcp_close, | 2186 | .close = tcp_close, |
2175 | .connect = tcp_v4_connect, | 2187 | .connect = tcp_v4_connect, |
2176 | .disconnect = tcp_disconnect, | 2188 | .disconnect = tcp_disconnect, |
2177 | .accept = tcp_accept, | 2189 | .accept = inet_csk_accept, |
2178 | .ioctl = tcp_ioctl, | 2190 | .ioctl = tcp_ioctl, |
2179 | .init = tcp_v4_init_sock, | 2191 | .init = tcp_v4_init_sock, |
2180 | .destroy = tcp_v4_destroy_sock, | 2192 | .destroy = tcp_v4_destroy_sock, |