aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c210
1 files changed, 1 insertions, 209 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 2cd41265d17f..2f605b9e6b67 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -97,138 +97,6 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
97 .port_rover = 1024 - 1, 97 .port_rover = 1024 - 1,
98}; 98};
99 99
100/*
101 * This array holds the first and last local port number.
102 * For high-usage systems, use sysctl to change this to
103 * 32768-61000
104 */
105int sysctl_local_port_range[2] = { 1024, 4999 };
106
107static inline int inet_csk_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
108{
109 const u32 sk_rcv_saddr = inet_rcv_saddr(sk);
110 struct sock *sk2;
111 struct hlist_node *node;
112 int reuse = sk->sk_reuse;
113
114 sk_for_each_bound(sk2, node, &tb->owners) {
115 if (sk != sk2 &&
116 !inet_v6_ipv6only(sk2) &&
117 (!sk->sk_bound_dev_if ||
118 !sk2->sk_bound_dev_if ||
119 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
120 if (!reuse || !sk2->sk_reuse ||
121 sk2->sk_state == TCP_LISTEN) {
122 const u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
123 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
124 sk2_rcv_saddr == sk_rcv_saddr)
125 break;
126 }
127 }
128 }
129 return node != NULL;
130}
131
132/* Obtain a reference to a local port for the given sock,
133 * if snum is zero it means select any available local port.
134 */
135int inet_csk_get_port(struct inet_hashinfo *hashinfo,
136 struct sock *sk, unsigned short snum)
137{
138 struct inet_bind_hashbucket *head;
139 struct hlist_node *node;
140 struct inet_bind_bucket *tb;
141 int ret;
142
143 local_bh_disable();
144 if (!snum) {
145 int low = sysctl_local_port_range[0];
146 int high = sysctl_local_port_range[1];
147 int remaining = (high - low) + 1;
148 int rover;
149
150 spin_lock(&hashinfo->portalloc_lock);
151 if (hashinfo->port_rover < low)
152 rover = low;
153 else
154 rover = hashinfo->port_rover;
155 do {
156 rover++;
157 if (rover > high)
158 rover = low;
159 head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)];
160 spin_lock(&head->lock);
161 inet_bind_bucket_for_each(tb, node, &head->chain)
162 if (tb->port == rover)
163 goto next;
164 break;
165 next:
166 spin_unlock(&head->lock);
167 } while (--remaining > 0);
168 hashinfo->port_rover = rover;
169 spin_unlock(&hashinfo->portalloc_lock);
170
171 /* Exhausted local port range during search? It is not
172 * possible for us to be holding one of the bind hash
173 * locks if this test triggers, because if 'remaining'
174 * drops to zero, we broke out of the do/while loop at
175 * the top level, not from the 'break;' statement.
176 */
177 ret = 1;
178 if (unlikely(remaining <= 0))
179 goto fail;
180
181 /* OK, here is the one we will use. HEAD is
182 * non-NULL and we hold it's mutex.
183 */
184 snum = rover;
185 } else {
186 head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)];
187 spin_lock(&head->lock);
188 inet_bind_bucket_for_each(tb, node, &head->chain)
189 if (tb->port == snum)
190 goto tb_found;
191 }
192 tb = NULL;
193 goto tb_not_found;
194tb_found:
195 if (!hlist_empty(&tb->owners)) {
196 if (sk->sk_reuse > 1)
197 goto success;
198 if (tb->fastreuse > 0 &&
199 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
200 goto success;
201 } else {
202 ret = 1;
203 if (inet_csk_bind_conflict(sk, tb))
204 goto fail_unlock;
205 }
206 }
207tb_not_found:
208 ret = 1;
209 if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL)
210 goto fail_unlock;
211 if (hlist_empty(&tb->owners)) {
212 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
213 tb->fastreuse = 1;
214 else
215 tb->fastreuse = 0;
216 } else if (tb->fastreuse &&
217 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
218 tb->fastreuse = 0;
219success:
220 if (!inet_csk(sk)->icsk_bind_hash)
221 inet_bind_hash(sk, tb, snum);
222 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
223 ret = 0;
224
225fail_unlock:
226 spin_unlock(&head->lock);
227fail:
228 local_bh_enable();
229 return ret;
230}
231
232static int tcp_v4_get_port(struct sock *sk, unsigned short snum) 100static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
233{ 101{
234 return inet_csk_get_port(&tcp_hashinfo, sk, snum); 102 return inet_csk_get_port(&tcp_hashinfo, sk, snum);
@@ -568,52 +436,6 @@ static inline int inet_iif(const struct sk_buff *skb)
568 return ((struct rtable *)skb->dst)->rt_iif; 436 return ((struct rtable *)skb->dst)->rt_iif;
569} 437}
570 438
571static inline u32 inet_synq_hash(const u32 raddr, const u16 rport,
572 const u32 rnd, const u16 synq_hsize)
573{
574 return jhash_2words(raddr, (u32)rport, rnd) & (synq_hsize - 1);
575}
576
577struct request_sock *inet_csk_search_req(const struct sock *sk,
578 struct request_sock ***prevp,
579 const __u16 rport, const __u32 raddr,
580 const __u32 laddr)
581{
582 const struct inet_connection_sock *icsk = inet_csk(sk);
583 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
584 struct request_sock *req, **prev;
585
586 for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
587 lopt->nr_table_entries)];
588 (req = *prev) != NULL;
589 prev = &req->dl_next) {
590 const struct inet_request_sock *ireq = inet_rsk(req);
591
592 if (ireq->rmt_port == rport &&
593 ireq->rmt_addr == raddr &&
594 ireq->loc_addr == laddr &&
595 AF_INET_FAMILY(req->rsk_ops->family)) {
596 BUG_TRAP(!req->sk);
597 *prevp = prev;
598 break;
599 }
600 }
601
602 return req;
603}
604
605static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req)
606{
607 struct inet_connection_sock *icsk = inet_csk(sk);
608 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
609 const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
610 lopt->hash_rnd, lopt->nr_table_entries);
611
612 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
613 inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
614}
615
616
617/* 439/*
618 * This routine does path mtu discovery as defined in RFC1191. 440 * This routine does path mtu discovery as defined in RFC1191.
619 */ 441 */
@@ -963,36 +785,6 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
963 req->ts_recent); 785 req->ts_recent);
964} 786}
965 787
966struct dst_entry* inet_csk_route_req(struct sock *sk,
967 const struct request_sock *req)
968{
969 struct rtable *rt;
970 const struct inet_request_sock *ireq = inet_rsk(req);
971 struct ip_options *opt = inet_rsk(req)->opt;
972 struct flowi fl = { .oif = sk->sk_bound_dev_if,
973 .nl_u = { .ip4_u =
974 { .daddr = ((opt && opt->srr) ?
975 opt->faddr :
976 ireq->rmt_addr),
977 .saddr = ireq->loc_addr,
978 .tos = RT_CONN_FLAGS(sk) } },
979 .proto = sk->sk_protocol,
980 .uli_u = { .ports =
981 { .sport = inet_sk(sk)->sport,
982 .dport = ireq->rmt_port } } };
983
984 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
985 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
986 return NULL;
987 }
988 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
989 ip_rt_put(rt);
990 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
991 return NULL;
992 }
993 return &rt->u.dst;
994}
995
996/* 788/*
997 * Send a SYN-ACK after having received an ACK. 789 * Send a SYN-ACK after having received an ACK.
998 * This still operates on a request_sock only, not on a big 790 * This still operates on a request_sock only, not on a big
@@ -1222,7 +1014,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1222 if (want_cookie) { 1014 if (want_cookie) {
1223 reqsk_free(req); 1015 reqsk_free(req);
1224 } else { 1016 } else {
1225 tcp_v4_synq_add(sk, req); 1017 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1226 } 1018 }
1227 return 0; 1019 return 0;
1228 1020