aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-11-30 15:08:53 -0500
committerDavid S. Miller <davem@davemloft.net>2010-11-30 15:08:53 -0500
commit026630450244b8f8d1baf54548be0800aa1823ed (patch)
tree073336c16d43756018592a5cd3714aafab6a4582 /net
parentb534ecf1cd26f094497da6ae28a6ab64cdbe1617 (diff)
inetpeer: Abstract address comparisons.
Now v4 and v6 addresses will both work properly. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/inetpeer.c35
1 files changed, 27 insertions, 8 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9aa76b8dd490..c96dc51c2e49 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -152,6 +152,22 @@ static void unlink_from_unused(struct inet_peer *p)
152 } 152 }
153} 153}
154 154
155static int addr_compare(const inet_peer_address_t *a,
156 const inet_peer_address_t *b)
157{
158 int i, n = (a->family == AF_INET ? 1 : 4);
159
160 for (i = 0; i < n; i++) {
161 if (a->a6[i] == b->a6[i])
162 continue;
163 if (a->a6[i] < b->a6[i])
164 return -1;
165 return 1;
166 }
167
168 return 0;
169}
170
155/* 171/*
156 * Called with local BH disabled and the pool lock held. 172 * Called with local BH disabled and the pool lock held.
157 */ 173 */
@@ -165,9 +181,10 @@ static void unlink_from_unused(struct inet_peer *p)
165 for (u = rcu_dereference_protected(_base->root, \ 181 for (u = rcu_dereference_protected(_base->root, \
166 lockdep_is_held(&_base->lock)); \ 182 lockdep_is_held(&_base->lock)); \
167 u != peer_avl_empty; ) { \ 183 u != peer_avl_empty; ) { \
168 if (_daddr == u->daddr.a4) \ 184 int cmp = addr_compare(_daddr, &u->daddr); \
185 if (cmp == 0) \
169 break; \ 186 break; \
170 if ((__force __u32)_daddr < (__force __u32)u->daddr.a4) \ 187 if (cmp == -1) \
171 v = &u->avl_left; \ 188 v = &u->avl_left; \
172 else \ 189 else \
173 v = &u->avl_right; \ 190 v = &u->avl_right; \
@@ -185,13 +202,15 @@ static void unlink_from_unused(struct inet_peer *p)
185 * But every pointer we follow is guaranteed to be valid thanks to RCU. 202 * But every pointer we follow is guaranteed to be valid thanks to RCU.
186 * We exit from this function if number of links exceeds PEER_MAXDEPTH 203 * We exit from this function if number of links exceeds PEER_MAXDEPTH
187 */ 204 */
188static struct inet_peer *lookup_rcu_bh(__be32 daddr, struct inet_peer_base *base) 205static struct inet_peer *lookup_rcu_bh(const inet_peer_address_t *daddr,
206 struct inet_peer_base *base)
189{ 207{
190 struct inet_peer *u = rcu_dereference_bh(base->root); 208 struct inet_peer *u = rcu_dereference_bh(base->root);
191 int count = 0; 209 int count = 0;
192 210
193 while (u != peer_avl_empty) { 211 while (u != peer_avl_empty) {
194 if (daddr == u->daddr.a4) { 212 int cmp = addr_compare(daddr, &u->daddr);
213 if (cmp == 0) {
195 /* Before taking a reference, check if this entry was 214 /* Before taking a reference, check if this entry was
196 * deleted, unlink_from_pool() sets refcnt=-1 to make 215 * deleted, unlink_from_pool() sets refcnt=-1 to make
197 * distinction between an unused entry (refcnt=0) and 216 * distinction between an unused entry (refcnt=0) and
@@ -201,7 +220,7 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr, struct inet_peer_base *base
201 u = NULL; 220 u = NULL;
202 return u; 221 return u;
203 } 222 }
204 if ((__force __u32)daddr < (__force __u32)u->daddr.a4) 223 if (cmp == -1)
205 u = rcu_dereference_bh(u->avl_left); 224 u = rcu_dereference_bh(u->avl_left);
206 else 225 else
207 u = rcu_dereference_bh(u->avl_right); 226 u = rcu_dereference_bh(u->avl_right);
@@ -354,7 +373,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
354 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 373 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
355 struct inet_peer __rcu **stack[PEER_MAXDEPTH]; 374 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
356 struct inet_peer __rcu ***stackptr, ***delp; 375 struct inet_peer __rcu ***stackptr, ***delp;
357 if (lookup(p->daddr.a4, stack, base) != p) 376 if (lookup(&p->daddr, stack, base) != p)
358 BUG(); 377 BUG();
359 delp = stackptr - 1; /* *delp[0] == p */ 378 delp = stackptr - 1; /* *delp[0] == p */
360 if (p->avl_left == peer_avl_empty_rcu) { 379 if (p->avl_left == peer_avl_empty_rcu) {
@@ -454,7 +473,7 @@ struct inet_peer *inet_getpeer(inet_peer_address_t *daddr, int create)
454 * Because of a concurrent writer, we might not find an existing entry. 473 * Because of a concurrent writer, we might not find an existing entry.
455 */ 474 */
456 rcu_read_lock_bh(); 475 rcu_read_lock_bh();
457 p = lookup_rcu_bh(daddr->a4, base); 476 p = lookup_rcu_bh(daddr, base);
458 rcu_read_unlock_bh(); 477 rcu_read_unlock_bh();
459 478
460 if (p) { 479 if (p) {
@@ -469,7 +488,7 @@ struct inet_peer *inet_getpeer(inet_peer_address_t *daddr, int create)
469 * At least, nodes should be hot in our cache. 488 * At least, nodes should be hot in our cache.
470 */ 489 */
471 spin_lock_bh(&base->lock); 490 spin_lock_bh(&base->lock);
472 p = lookup(daddr->a4, stack, base); 491 p = lookup(daddr, stack, base);
473 if (p != peer_avl_empty) { 492 if (p != peer_avl_empty) {
474 atomic_inc(&p->refcnt); 493 atomic_inc(&p->refcnt);
475 spin_unlock_bh(&base->lock); 494 spin_unlock_bh(&base->lock);