aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_hashtables.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/inet_hashtables.c')
-rw-r--r--net/ipv4/inet_hashtables.c73
1 files changed, 46 insertions, 27 deletions
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 625cc5f64c94..2b79377b468d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -64,7 +64,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
64 64
65 atomic_inc(&hashinfo->bsockets); 65 atomic_inc(&hashinfo->bsockets);
66 66
67 inet_sk(sk)->num = snum; 67 inet_sk(sk)->inet_num = snum;
68 sk_add_bind_node(sk, &tb->owners); 68 sk_add_bind_node(sk, &tb->owners);
69 tb->num_owners++; 69 tb->num_owners++;
70 inet_csk(sk)->icsk_bind_hash = tb; 70 inet_csk(sk)->icsk_bind_hash = tb;
@@ -76,7 +76,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
76static void __inet_put_port(struct sock *sk) 76static void __inet_put_port(struct sock *sk)
77{ 77{
78 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 78 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
79 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->num, 79 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
80 hashinfo->bhash_size); 80 hashinfo->bhash_size);
81 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; 81 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
82 struct inet_bind_bucket *tb; 82 struct inet_bind_bucket *tb;
@@ -88,7 +88,7 @@ static void __inet_put_port(struct sock *sk)
88 __sk_del_bind_node(sk); 88 __sk_del_bind_node(sk);
89 tb->num_owners--; 89 tb->num_owners--;
90 inet_csk(sk)->icsk_bind_hash = NULL; 90 inet_csk(sk)->icsk_bind_hash = NULL;
91 inet_sk(sk)->num = 0; 91 inet_sk(sk)->inet_num = 0;
92 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); 92 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
93 spin_unlock(&head->lock); 93 spin_unlock(&head->lock);
94} 94}
@@ -105,7 +105,7 @@ EXPORT_SYMBOL(inet_put_port);
105void __inet_inherit_port(struct sock *sk, struct sock *child) 105void __inet_inherit_port(struct sock *sk, struct sock *child)
106{ 106{
107 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; 107 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
108 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->num, 108 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->inet_num,
109 table->bhash_size); 109 table->bhash_size);
110 struct inet_bind_hashbucket *head = &table->bhash[bhash]; 110 struct inet_bind_hashbucket *head = &table->bhash[bhash];
111 struct inet_bind_bucket *tb; 111 struct inet_bind_bucket *tb;
@@ -126,9 +126,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
126 int score = -1; 126 int score = -1;
127 struct inet_sock *inet = inet_sk(sk); 127 struct inet_sock *inet = inet_sk(sk);
128 128
129 if (net_eq(sock_net(sk), net) && inet->num == hnum && 129 if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
130 !ipv6_only_sock(sk)) { 130 !ipv6_only_sock(sk)) {
131 __be32 rcv_saddr = inet->rcv_saddr; 131 __be32 rcv_saddr = inet->inet_rcv_saddr;
132 score = sk->sk_family == PF_INET ? 1 : 0; 132 score = sk->sk_family == PF_INET ? 1 : 0;
133 if (rcv_saddr) { 133 if (rcv_saddr) {
134 if (rcv_saddr != daddr) 134 if (rcv_saddr != daddr)
@@ -209,7 +209,7 @@ struct sock * __inet_lookup_established(struct net *net,
209 * have wildcards anyways. 209 * have wildcards anyways.
210 */ 210 */
211 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); 211 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
212 unsigned int slot = hash & (hashinfo->ehash_size - 1); 212 unsigned int slot = hash & hashinfo->ehash_mask;
213 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; 213 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
214 214
215 rcu_read_lock(); 215 rcu_read_lock();
@@ -273,18 +273,20 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
273{ 273{
274 struct inet_hashinfo *hinfo = death_row->hashinfo; 274 struct inet_hashinfo *hinfo = death_row->hashinfo;
275 struct inet_sock *inet = inet_sk(sk); 275 struct inet_sock *inet = inet_sk(sk);
276 __be32 daddr = inet->rcv_saddr; 276 __be32 daddr = inet->inet_rcv_saddr;
277 __be32 saddr = inet->daddr; 277 __be32 saddr = inet->inet_daddr;
278 int dif = sk->sk_bound_dev_if; 278 int dif = sk->sk_bound_dev_if;
279 INET_ADDR_COOKIE(acookie, saddr, daddr) 279 INET_ADDR_COOKIE(acookie, saddr, daddr)
280 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); 280 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
281 struct net *net = sock_net(sk); 281 struct net *net = sock_net(sk);
282 unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport); 282 unsigned int hash = inet_ehashfn(net, daddr, lport,
283 saddr, inet->inet_dport);
283 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 284 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
284 spinlock_t *lock = inet_ehash_lockp(hinfo, hash); 285 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
285 struct sock *sk2; 286 struct sock *sk2;
286 const struct hlist_nulls_node *node; 287 const struct hlist_nulls_node *node;
287 struct inet_timewait_sock *tw; 288 struct inet_timewait_sock *tw;
289 int twrefcnt = 0;
288 290
289 spin_lock(lock); 291 spin_lock(lock);
290 292
@@ -312,25 +314,28 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
312unique: 314unique:
313 /* Must record num and sport now. Otherwise we will see 315 /* Must record num and sport now. Otherwise we will see
314 * in hash table socket with a funny identity. */ 316 * in hash table socket with a funny identity. */
315 inet->num = lport; 317 inet->inet_num = lport;
316 inet->sport = htons(lport); 318 inet->inet_sport = htons(lport);
317 sk->sk_hash = hash; 319 sk->sk_hash = hash;
318 WARN_ON(!sk_unhashed(sk)); 320 WARN_ON(!sk_unhashed(sk));
319 __sk_nulls_add_node_rcu(sk, &head->chain); 321 __sk_nulls_add_node_rcu(sk, &head->chain);
322 if (tw) {
323 twrefcnt = inet_twsk_unhash(tw);
324 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
325 }
320 spin_unlock(lock); 326 spin_unlock(lock);
327 if (twrefcnt)
328 inet_twsk_put(tw);
321 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 329 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
322 330
323 if (twp) { 331 if (twp) {
324 *twp = tw; 332 *twp = tw;
325 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
326 } else if (tw) { 333 } else if (tw) {
327 /* Silly. Should hash-dance instead... */ 334 /* Silly. Should hash-dance instead... */
328 inet_twsk_deschedule(tw, death_row); 335 inet_twsk_deschedule(tw, death_row);
329 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
330 336
331 inet_twsk_put(tw); 337 inet_twsk_put(tw);
332 } 338 }
333
334 return 0; 339 return 0;
335 340
336not_unique: 341not_unique:
@@ -341,16 +346,18 @@ not_unique:
341static inline u32 inet_sk_port_offset(const struct sock *sk) 346static inline u32 inet_sk_port_offset(const struct sock *sk)
342{ 347{
343 const struct inet_sock *inet = inet_sk(sk); 348 const struct inet_sock *inet = inet_sk(sk);
344 return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr, 349 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
345 inet->dport); 350 inet->inet_daddr,
351 inet->inet_dport);
346} 352}
347 353
348void __inet_hash_nolisten(struct sock *sk) 354int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
349{ 355{
350 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 356 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
351 struct hlist_nulls_head *list; 357 struct hlist_nulls_head *list;
352 spinlock_t *lock; 358 spinlock_t *lock;
353 struct inet_ehash_bucket *head; 359 struct inet_ehash_bucket *head;
360 int twrefcnt = 0;
354 361
355 WARN_ON(!sk_unhashed(sk)); 362 WARN_ON(!sk_unhashed(sk));
356 363
@@ -361,8 +368,13 @@ void __inet_hash_nolisten(struct sock *sk)
361 368
362 spin_lock(lock); 369 spin_lock(lock);
363 __sk_nulls_add_node_rcu(sk, list); 370 __sk_nulls_add_node_rcu(sk, list);
371 if (tw) {
372 WARN_ON(sk->sk_hash != tw->tw_hash);
373 twrefcnt = inet_twsk_unhash(tw);
374 }
364 spin_unlock(lock); 375 spin_unlock(lock);
365 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 376 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
377 return twrefcnt;
366} 378}
367EXPORT_SYMBOL_GPL(__inet_hash_nolisten); 379EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
368 380
@@ -372,7 +384,7 @@ static void __inet_hash(struct sock *sk)
372 struct inet_listen_hashbucket *ilb; 384 struct inet_listen_hashbucket *ilb;
373 385
374 if (sk->sk_state != TCP_LISTEN) { 386 if (sk->sk_state != TCP_LISTEN) {
375 __inet_hash_nolisten(sk); 387 __inet_hash_nolisten(sk, NULL);
376 return; 388 return;
377 } 389 }
378 390
@@ -421,14 +433,15 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
421 struct sock *sk, u32 port_offset, 433 struct sock *sk, u32 port_offset,
422 int (*check_established)(struct inet_timewait_death_row *, 434 int (*check_established)(struct inet_timewait_death_row *,
423 struct sock *, __u16, struct inet_timewait_sock **), 435 struct sock *, __u16, struct inet_timewait_sock **),
424 void (*hash)(struct sock *sk)) 436 int (*hash)(struct sock *sk, struct inet_timewait_sock *twp))
425{ 437{
426 struct inet_hashinfo *hinfo = death_row->hashinfo; 438 struct inet_hashinfo *hinfo = death_row->hashinfo;
427 const unsigned short snum = inet_sk(sk)->num; 439 const unsigned short snum = inet_sk(sk)->inet_num;
428 struct inet_bind_hashbucket *head; 440 struct inet_bind_hashbucket *head;
429 struct inet_bind_bucket *tb; 441 struct inet_bind_bucket *tb;
430 int ret; 442 int ret;
431 struct net *net = sock_net(sk); 443 struct net *net = sock_net(sk);
444 int twrefcnt = 1;
432 445
433 if (!snum) { 446 if (!snum) {
434 int i, remaining, low, high, port; 447 int i, remaining, low, high, port;
@@ -452,7 +465,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
452 * unique enough. 465 * unique enough.
453 */ 466 */
454 inet_bind_bucket_for_each(tb, node, &head->chain) { 467 inet_bind_bucket_for_each(tb, node, &head->chain) {
455 if (ib_net(tb) == net && tb->port == port) { 468 if (net_eq(ib_net(tb), net) &&
469 tb->port == port) {
456 if (tb->fastreuse >= 0) 470 if (tb->fastreuse >= 0)
457 goto next_port; 471 goto next_port;
458 WARN_ON(hlist_empty(&tb->owners)); 472 WARN_ON(hlist_empty(&tb->owners));
@@ -485,14 +499,19 @@ ok:
485 /* Head lock still held and bh's disabled */ 499 /* Head lock still held and bh's disabled */
486 inet_bind_hash(sk, tb, port); 500 inet_bind_hash(sk, tb, port);
487 if (sk_unhashed(sk)) { 501 if (sk_unhashed(sk)) {
488 inet_sk(sk)->sport = htons(port); 502 inet_sk(sk)->inet_sport = htons(port);
489 hash(sk); 503 twrefcnt += hash(sk, tw);
490 } 504 }
505 if (tw)
506 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
491 spin_unlock(&head->lock); 507 spin_unlock(&head->lock);
492 508
493 if (tw) { 509 if (tw) {
494 inet_twsk_deschedule(tw, death_row); 510 inet_twsk_deschedule(tw, death_row);
495 inet_twsk_put(tw); 511 while (twrefcnt) {
512 twrefcnt--;
513 inet_twsk_put(tw);
514 }
496 } 515 }
497 516
498 ret = 0; 517 ret = 0;
@@ -503,7 +522,7 @@ ok:
503 tb = inet_csk(sk)->icsk_bind_hash; 522 tb = inet_csk(sk)->icsk_bind_hash;
504 spin_lock_bh(&head->lock); 523 spin_lock_bh(&head->lock);
505 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 524 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
506 hash(sk); 525 hash(sk, NULL);
507 spin_unlock_bh(&head->lock); 526 spin_unlock_bh(&head->lock);
508 return 0; 527 return 0;
509 } else { 528 } else {