aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2006-10-13 00:21:06 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-10-16 02:14:17 -0400
commit4663afe2c848e2abc8791202beecf40684f13eb4 (patch)
tree4d67cf3a9910bb9c224b4495b554560ec438477e
parentea614d7f4fb2d436b7a5ee490d1011615f6b38d5 (diff)
[NET]: reduce sizeof(struct inet_peer), cleanup, change in peer_check_expire()
1) shrink struct inet_peer on 64 bits platforms.
-rw-r--r--include/net/inetpeer.h17
-rw-r--r--net/ipv4/inetpeer.c29
2 files changed, 24 insertions, 22 deletions
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 925573fd2aed..f13cc0c2b163 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -19,7 +19,7 @@ struct inet_peer
19{ 19{
20 struct inet_peer *avl_left, *avl_right; 20 struct inet_peer *avl_left, *avl_right;
21 struct inet_peer *unused_next, **unused_prevp; 21 struct inet_peer *unused_next, **unused_prevp;
22 unsigned long dtime; /* the time of last use of not 22 __u32 dtime; /* the time of last use of not
23 * referenced entries */ 23 * referenced entries */
24 atomic_t refcnt; 24 atomic_t refcnt;
25 __be32 v4daddr; /* peer's address */ 25 __be32 v4daddr; /* peer's address */
@@ -35,21 +35,8 @@ void inet_initpeers(void) __init;
35/* can be called with or without local BH being disabled */ 35/* can be called with or without local BH being disabled */
36struct inet_peer *inet_getpeer(__be32 daddr, int create); 36struct inet_peer *inet_getpeer(__be32 daddr, int create);
37 37
38extern spinlock_t inet_peer_unused_lock;
39extern struct inet_peer **inet_peer_unused_tailp;
40/* can be called from BH context or outside */ 38/* can be called from BH context or outside */
41static inline void inet_putpeer(struct inet_peer *p) 39extern void inet_putpeer(struct inet_peer *p);
42{
43 spin_lock_bh(&inet_peer_unused_lock);
44 if (atomic_dec_and_test(&p->refcnt)) {
45 p->unused_prevp = inet_peer_unused_tailp;
46 p->unused_next = NULL;
47 *inet_peer_unused_tailp = p;
48 inet_peer_unused_tailp = &p->unused_next;
49 p->dtime = jiffies;
50 }
51 spin_unlock_bh(&inet_peer_unused_lock);
52}
53 40
54extern spinlock_t inet_peer_idlock; 41extern spinlock_t inet_peer_idlock;
55/* can be called with or without local BH being disabled */ 42/* can be called with or without local BH being disabled */
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 2b1a54b59c48..f072f3875af8 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -94,10 +94,8 @@ int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */
94int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */ 94int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */
95 95
96static struct inet_peer *inet_peer_unused_head; 96static struct inet_peer *inet_peer_unused_head;
97/* Exported for inet_putpeer inline function. */ 97static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
98struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; 98static DEFINE_SPINLOCK(inet_peer_unused_lock);
99DEFINE_SPINLOCK(inet_peer_unused_lock);
100#define PEER_MAX_CLEANUP_WORK 30
101 99
102static void peer_check_expire(unsigned long dummy); 100static void peer_check_expire(unsigned long dummy);
103static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); 101static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
@@ -340,7 +338,8 @@ static int cleanup_once(unsigned long ttl)
340 spin_lock_bh(&inet_peer_unused_lock); 338 spin_lock_bh(&inet_peer_unused_lock);
341 p = inet_peer_unused_head; 339 p = inet_peer_unused_head;
342 if (p != NULL) { 340 if (p != NULL) {
343 if (time_after(p->dtime + ttl, jiffies)) { 341 __u32 delta = (__u32)jiffies - p->dtime;
342 if (delta < ttl) {
344 /* Do not prune fresh entries. */ 343 /* Do not prune fresh entries. */
345 spin_unlock_bh(&inet_peer_unused_lock); 344 spin_unlock_bh(&inet_peer_unused_lock);
346 return -1; 345 return -1;
@@ -432,7 +431,7 @@ out_free:
432/* Called with local BH disabled. */ 431/* Called with local BH disabled. */
433static void peer_check_expire(unsigned long dummy) 432static void peer_check_expire(unsigned long dummy)
434{ 433{
435 int i; 434 unsigned long now = jiffies;
436 int ttl; 435 int ttl;
437 436
438 if (peer_total >= inet_peer_threshold) 437 if (peer_total >= inet_peer_threshold)
@@ -441,7 +440,10 @@ static void peer_check_expire(unsigned long dummy)
441 ttl = inet_peer_maxttl 440 ttl = inet_peer_maxttl
442 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 441 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
443 peer_total / inet_peer_threshold * HZ; 442 peer_total / inet_peer_threshold * HZ;
444 for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++); 443 while (!cleanup_once(ttl)) {
444 if (jiffies != now)
445 break;
446 }
445 447
446 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime 448 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
447 * interval depending on the total number of entries (more entries, 449 * interval depending on the total number of entries (more entries,
@@ -455,3 +457,16 @@ static void peer_check_expire(unsigned long dummy)
455 peer_total / inet_peer_threshold * HZ; 457 peer_total / inet_peer_threshold * HZ;
456 add_timer(&peer_periodic_timer); 458 add_timer(&peer_periodic_timer);
457} 459}
460
461void inet_putpeer(struct inet_peer *p)
462{
463 spin_lock_bh(&inet_peer_unused_lock);
464 if (atomic_dec_and_test(&p->refcnt)) {
465 p->unused_prevp = inet_peer_unused_tailp;
466 p->unused_next = NULL;
467 *inet_peer_unused_tailp = p;
468 inet_peer_unused_tailp = &p->unused_next;
469 p->dtime = (__u32)jiffies;
470 }
471 spin_unlock_bh(&inet_peer_unused_lock);
472}