diff options
author | David S. Miller <davem@davemloft.net> | 2010-11-30 14:53:55 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-11-30 14:53:55 -0500 |
commit | 582a72da9a41be9227dc931d728ae2906880a589 (patch) | |
tree | 0c1943d6c5eabdbfef6560ac49db322d4becf43d | |
parent | 98158f5a853cafd33b254ae0eacc0dd69f90b93b (diff) |
inetpeer: Introduce inet_peer_address_t.
Currently only the v4 aspect is used, but this will change.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/inetpeer.h | 10 | ||||
-rw-r--r-- | net/ipv4/inetpeer.c | 16 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 2 |
3 files changed, 18 insertions, 10 deletions
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index fe239bfe5f7f..d7e60792d76e 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h | |||
@@ -13,10 +13,18 @@ | |||
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <asm/atomic.h> | 14 | #include <asm/atomic.h> |
15 | 15 | ||
16 | typedef struct { | ||
17 | union { | ||
18 | __be32 a4; | ||
19 | __be32 a6[4]; | ||
20 | }; | ||
21 | __u16 family; | ||
22 | } inet_peer_address_t; | ||
23 | |||
16 | struct inet_peer { | 24 | struct inet_peer { |
17 | /* group together avl_left,avl_right,v4daddr to speedup lookups */ | 25 | /* group together avl_left,avl_right,v4daddr to speedup lookups */ |
18 | struct inet_peer __rcu *avl_left, *avl_right; | 26 | struct inet_peer __rcu *avl_left, *avl_right; |
19 | __be32 v4daddr; /* peer's address */ | 27 | inet_peer_address_t daddr; |
20 | __u32 avl_height; | 28 | __u32 avl_height; |
21 | struct list_head unused; | 29 | struct list_head unused; |
22 | __u32 dtime; /* the time of last use of not | 30 | __u32 dtime; /* the time of last use of not |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index f94400848921..893f998efdbb 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -63,7 +63,7 @@ | |||
63 | * refcnt: atomically against modifications on other CPU; | 63 | * refcnt: atomically against modifications on other CPU; |
64 | * usually under some other lock to prevent node disappearing | 64 | * usually under some other lock to prevent node disappearing |
65 | * dtime: unused node list lock | 65 | * dtime: unused node list lock |
66 | * v4daddr: unchangeable | 66 | * daddr: unchangeable |
67 | * ip_id_count: atomic value (no lock needed) | 67 | * ip_id_count: atomic value (no lock needed) |
68 | */ | 68 | */ |
69 | 69 | ||
@@ -165,9 +165,9 @@ static void unlink_from_unused(struct inet_peer *p) | |||
165 | for (u = rcu_dereference_protected(_base->root, \ | 165 | for (u = rcu_dereference_protected(_base->root, \ |
166 | lockdep_is_held(&_base->lock)); \ | 166 | lockdep_is_held(&_base->lock)); \ |
167 | u != peer_avl_empty; ) { \ | 167 | u != peer_avl_empty; ) { \ |
168 | if (_daddr == u->v4daddr) \ | 168 | if (_daddr == u->daddr.a4) \ |
169 | break; \ | 169 | break; \ |
170 | if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ | 170 | if ((__force __u32)_daddr < (__force __u32)u->daddr.a4) \ |
171 | v = &u->avl_left; \ | 171 | v = &u->avl_left; \ |
172 | else \ | 172 | else \ |
173 | v = &u->avl_right; \ | 173 | v = &u->avl_right; \ |
@@ -191,7 +191,7 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr, struct inet_peer_base *base | |||
191 | int count = 0; | 191 | int count = 0; |
192 | 192 | ||
193 | while (u != peer_avl_empty) { | 193 | while (u != peer_avl_empty) { |
194 | if (daddr == u->v4daddr) { | 194 | if (daddr == u->daddr.a4) { |
195 | /* Before taking a reference, check if this entry was | 195 | /* Before taking a reference, check if this entry was |
196 | * deleted, unlink_from_pool() sets refcnt=-1 to make | 196 | * deleted, unlink_from_pool() sets refcnt=-1 to make |
197 | * distinction between an unused entry (refcnt=0) and | 197 | * distinction between an unused entry (refcnt=0) and |
@@ -201,7 +201,7 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr, struct inet_peer_base *base | |||
201 | u = NULL; | 201 | u = NULL; |
202 | return u; | 202 | return u; |
203 | } | 203 | } |
204 | if ((__force __u32)daddr < (__force __u32)u->v4daddr) | 204 | if ((__force __u32)daddr < (__force __u32)u->daddr.a4) |
205 | u = rcu_dereference_bh(u->avl_left); | 205 | u = rcu_dereference_bh(u->avl_left); |
206 | else | 206 | else |
207 | u = rcu_dereference_bh(u->avl_right); | 207 | u = rcu_dereference_bh(u->avl_right); |
@@ -354,7 +354,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) | |||
354 | if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { | 354 | if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { |
355 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]; | 355 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]; |
356 | struct inet_peer __rcu ***stackptr, ***delp; | 356 | struct inet_peer __rcu ***stackptr, ***delp; |
357 | if (lookup(p->v4daddr, stack, base) != p) | 357 | if (lookup(p->daddr.a4, stack, base) != p) |
358 | BUG(); | 358 | BUG(); |
359 | delp = stackptr - 1; /* *delp[0] == p */ | 359 | delp = stackptr - 1; /* *delp[0] == p */ |
360 | if (p->avl_left == peer_avl_empty_rcu) { | 360 | if (p->avl_left == peer_avl_empty_rcu) { |
@@ -367,7 +367,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) | |||
367 | BUG_ON(rcu_dereference_protected(*stackptr[-1], | 367 | BUG_ON(rcu_dereference_protected(*stackptr[-1], |
368 | lockdep_is_held(&base->lock)) != t); | 368 | lockdep_is_held(&base->lock)) != t); |
369 | **--stackptr = t->avl_left; | 369 | **--stackptr = t->avl_left; |
370 | /* t is removed, t->v4daddr > x->v4daddr for any | 370 | /* t is removed, t->daddr > x->daddr for any |
371 | * x in p->avl_left subtree. | 371 | * x in p->avl_left subtree. |
372 | * Put t in the old place of p. */ | 372 | * Put t in the old place of p. */ |
373 | RCU_INIT_POINTER(*delp[0], t); | 373 | RCU_INIT_POINTER(*delp[0], t); |
@@ -479,7 +479,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
479 | } | 479 | } |
480 | p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; | 480 | p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; |
481 | if (p) { | 481 | if (p) { |
482 | p->v4daddr = daddr; | 482 | p->daddr.a4 = daddr; |
483 | atomic_set(&p->refcnt, 1); | 483 | atomic_set(&p->refcnt, 1); |
484 | atomic_set(&p->rid, 0); | 484 | atomic_set(&p->rid, 0); |
485 | atomic_set(&p->ip_id_count, secure_ip_id(daddr)); | 485 | atomic_set(&p->ip_id_count, secure_ip_id(daddr)); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 69ccbc1dde9c..b8bbf89409b0 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1347,7 +1347,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1347 | tcp_death_row.sysctl_tw_recycle && | 1347 | tcp_death_row.sysctl_tw_recycle && |
1348 | (dst = inet_csk_route_req(sk, req)) != NULL && | 1348 | (dst = inet_csk_route_req(sk, req)) != NULL && |
1349 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && | 1349 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && |
1350 | peer->v4daddr == saddr) { | 1350 | peer->daddr.a4 == saddr) { |
1351 | inet_peer_refcheck(peer); | 1351 | inet_peer_refcheck(peer); |
1352 | if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && | 1352 | if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && |
1353 | (s32)(peer->tcp_ts - req->ts_recent) > | 1353 | (s32)(peer->tcp_ts - req->ts_recent) > |