diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2007-11-13 00:27:28 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-11-13 00:27:28 -0500 |
commit | d71209ded2ba6010070d02005384897c59859d00 (patch) | |
tree | a812e34e54e36f74ce9ef61f6e9e42100d0a6ef4 /net | |
parent | 22649d1afbe6988688a07fd70abb06f1e2213567 (diff) |
[INET]: Use list_head-s in inetpeer.c
The inetpeer.c tracks the LRU list of inet_perr-s, but makes
it by hands. Use the list_head-s for this.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/inetpeer.c | 42 |
1 files changed, 15 insertions, 27 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 771031dfbd0f..af995198f643 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -61,7 +61,7 @@ | |||
61 | * 4. Global variable peer_total is modified under the pool lock. | 61 | * 4. Global variable peer_total is modified under the pool lock. |
62 | * 5. struct inet_peer fields modification: | 62 | * 5. struct inet_peer fields modification: |
63 | * avl_left, avl_right, avl_parent, avl_height: pool lock | 63 | * avl_left, avl_right, avl_parent, avl_height: pool lock |
64 | * unused_next, unused_prevp: unused node list lock | 64 | * unused: unused node list lock |
65 | * refcnt: atomically against modifications on other CPU; | 65 | * refcnt: atomically against modifications on other CPU; |
66 | * usually under some other lock to prevent node disappearing | 66 | * usually under some other lock to prevent node disappearing |
67 | * dtime: unused node list lock | 67 | * dtime: unused node list lock |
@@ -94,8 +94,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min | |||
94 | int inet_peer_gc_mintime __read_mostly = 10 * HZ; | 94 | int inet_peer_gc_mintime __read_mostly = 10 * HZ; |
95 | int inet_peer_gc_maxtime __read_mostly = 120 * HZ; | 95 | int inet_peer_gc_maxtime __read_mostly = 120 * HZ; |
96 | 96 | ||
97 | static struct inet_peer *inet_peer_unused_head; | 97 | static LIST_HEAD(unused_peers); |
98 | static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; | ||
99 | static DEFINE_SPINLOCK(inet_peer_unused_lock); | 98 | static DEFINE_SPINLOCK(inet_peer_unused_lock); |
100 | 99 | ||
101 | static void peer_check_expire(unsigned long dummy); | 100 | static void peer_check_expire(unsigned long dummy); |
@@ -138,15 +137,7 @@ void __init inet_initpeers(void) | |||
138 | static void unlink_from_unused(struct inet_peer *p) | 137 | static void unlink_from_unused(struct inet_peer *p) |
139 | { | 138 | { |
140 | spin_lock_bh(&inet_peer_unused_lock); | 139 | spin_lock_bh(&inet_peer_unused_lock); |
141 | if (p->unused_prevp != NULL) { | 140 | list_del_init(&p->unused); |
142 | /* On unused list. */ | ||
143 | *p->unused_prevp = p->unused_next; | ||
144 | if (p->unused_next != NULL) | ||
145 | p->unused_next->unused_prevp = p->unused_prevp; | ||
146 | else | ||
147 | inet_peer_unused_tailp = p->unused_prevp; | ||
148 | p->unused_prevp = NULL; /* mark it as removed */ | ||
149 | } | ||
150 | spin_unlock_bh(&inet_peer_unused_lock); | 141 | spin_unlock_bh(&inet_peer_unused_lock); |
151 | } | 142 | } |
152 | 143 | ||
@@ -337,24 +328,24 @@ static void unlink_from_pool(struct inet_peer *p) | |||
337 | /* May be called with local BH enabled. */ | 328 | /* May be called with local BH enabled. */ |
338 | static int cleanup_once(unsigned long ttl) | 329 | static int cleanup_once(unsigned long ttl) |
339 | { | 330 | { |
340 | struct inet_peer *p; | 331 | struct inet_peer *p = NULL; |
341 | 332 | ||
342 | /* Remove the first entry from the list of unused nodes. */ | 333 | /* Remove the first entry from the list of unused nodes. */ |
343 | spin_lock_bh(&inet_peer_unused_lock); | 334 | spin_lock_bh(&inet_peer_unused_lock); |
344 | p = inet_peer_unused_head; | 335 | if (!list_empty(&unused_peers)) { |
345 | if (p != NULL) { | 336 | __u32 delta; |
346 | __u32 delta = (__u32)jiffies - p->dtime; | 337 | |
338 | p = list_first_entry(&unused_peers, struct inet_peer, unused); | ||
339 | delta = (__u32)jiffies - p->dtime; | ||
340 | |||
347 | if (delta < ttl) { | 341 | if (delta < ttl) { |
348 | /* Do not prune fresh entries. */ | 342 | /* Do not prune fresh entries. */ |
349 | spin_unlock_bh(&inet_peer_unused_lock); | 343 | spin_unlock_bh(&inet_peer_unused_lock); |
350 | return -1; | 344 | return -1; |
351 | } | 345 | } |
352 | inet_peer_unused_head = p->unused_next; | 346 | |
353 | if (p->unused_next != NULL) | 347 | list_del_init(&p->unused); |
354 | p->unused_next->unused_prevp = p->unused_prevp; | 348 | |
355 | else | ||
356 | inet_peer_unused_tailp = p->unused_prevp; | ||
357 | p->unused_prevp = NULL; /* mark as not on the list */ | ||
358 | /* Grab an extra reference to prevent node disappearing | 349 | /* Grab an extra reference to prevent node disappearing |
359 | * before unlink_from_pool() call. */ | 350 | * before unlink_from_pool() call. */ |
360 | atomic_inc(&p->refcnt); | 351 | atomic_inc(&p->refcnt); |
@@ -412,7 +403,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
412 | 403 | ||
413 | /* Link the node. */ | 404 | /* Link the node. */ |
414 | link_to_pool(n); | 405 | link_to_pool(n); |
415 | n->unused_prevp = NULL; /* not on the list */ | 406 | INIT_LIST_HEAD(&n->unused); |
416 | peer_total++; | 407 | peer_total++; |
417 | write_unlock_bh(&peer_pool_lock); | 408 | write_unlock_bh(&peer_pool_lock); |
418 | 409 | ||
@@ -467,10 +458,7 @@ void inet_putpeer(struct inet_peer *p) | |||
467 | { | 458 | { |
468 | spin_lock_bh(&inet_peer_unused_lock); | 459 | spin_lock_bh(&inet_peer_unused_lock); |
469 | if (atomic_dec_and_test(&p->refcnt)) { | 460 | if (atomic_dec_and_test(&p->refcnt)) { |
470 | p->unused_prevp = inet_peer_unused_tailp; | 461 | list_add_tail(&p->unused, &unused_peers); |
471 | p->unused_next = NULL; | ||
472 | *inet_peer_unused_tailp = p; | ||
473 | inet_peer_unused_tailp = &p->unused_next; | ||
474 | p->dtime = (__u32)jiffies; | 462 | p->dtime = (__u32)jiffies; |
475 | } | 463 | } |
476 | spin_unlock_bh(&inet_peer_unused_lock); | 464 | spin_unlock_bh(&inet_peer_unused_lock); |