aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSven Wegener <sven.wegener@stealer.net>2008-08-10 14:24:41 -0400
committerSven Wegener <sven.wegener@stealer.net>2008-08-11 08:00:43 -0400
commit3a14a313f9b406c37ab7e3f855b060eb8587b8c7 (patch)
tree86dbebf182b9f8dc66cfce4e3defb79fb9e279cb
parent5587da55fbf332ab8d1b37637536f94bc373867f (diff)
ipvs: Embed estimator object into stats object
There's no reason for dynamically allocating an estimator object for every stats object. Directly embed an estimator object into every stats object and switch to using the kernel-provided list implementation. This makes the code much simpler and faster, as we do not need to traverse the list of all estimators to find the one belonging to a stats object. There's no need to use an rwlock, as we only have one reader. Also reorder the members of the estimator structure slightly to avoid padding overhead. This can't be done with the stats object as the members are currently copied to our user space object via memcpy() and changing it would break ABI. Signed-off-by: Sven Wegener <sven.wegener@stealer.net> Acked-by: Simon Horman <horms@verge.net.au>
-rw-r--r--include/net/ip_vs.h28
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c117
3 files changed, 65 insertions, 82 deletions
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index c8ee9b89b023..7312c3dd309f 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -140,8 +140,24 @@ struct ip_vs_seq {
140 140
141 141
142/* 142/*
143 * IPVS statistics object 143 * IPVS statistics objects
144 */ 144 */
145struct ip_vs_estimator {
146 struct list_head list;
147
148 u64 last_inbytes;
149 u64 last_outbytes;
150 u32 last_conns;
151 u32 last_inpkts;
152 u32 last_outpkts;
153
154 u32 cps;
155 u32 inpps;
156 u32 outpps;
157 u32 inbps;
158 u32 outbps;
159};
160
145struct ip_vs_stats 161struct ip_vs_stats
146{ 162{
147 __u32 conns; /* connections scheduled */ 163 __u32 conns; /* connections scheduled */
@@ -156,7 +172,15 @@ struct ip_vs_stats
156 __u32 inbps; /* current in byte rate */ 172 __u32 inbps; /* current in byte rate */
157 __u32 outbps; /* current out byte rate */ 173 __u32 outbps; /* current out byte rate */
158 174
175 /*
176 * Don't add anything before the lock, because we use memcpy() to copy
177 * the members before the lock to struct ip_vs_stats_user in
178 * ip_vs_ctl.c.
179 */
180
159 spinlock_t lock; /* spin lock */ 181 spinlock_t lock; /* spin lock */
182
183 struct ip_vs_estimator est; /* estimator */
160}; 184};
161 185
162struct dst_entry; 186struct dst_entry;
@@ -659,7 +683,7 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
659/* 683/*
660 * IPVS rate estimator prototypes (from ip_vs_est.c) 684 * IPVS rate estimator prototypes (from ip_vs_est.c)
661 */ 685 */
662extern int ip_vs_new_estimator(struct ip_vs_stats *stats); 686extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
663extern void ip_vs_kill_estimator(struct ip_vs_stats *stats); 687extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
664extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); 688extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
665 689
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 999d884e8862..d651bce05493 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -684,8 +684,8 @@ ip_vs_zero_stats(struct ip_vs_stats *stats)
684{ 684{
685 spin_lock_bh(&stats->lock); 685 spin_lock_bh(&stats->lock);
686 memset(stats, 0, (char *)&stats->lock - (char *)stats); 686 memset(stats, 0, (char *)&stats->lock - (char *)stats);
687 spin_unlock_bh(&stats->lock);
688 ip_vs_zero_estimator(stats); 687 ip_vs_zero_estimator(stats);
688 spin_unlock_bh(&stats->lock);
689} 689}
690 690
691/* 691/*
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index 1d6e58e502fd..5a20f93bd7f9 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -17,6 +17,7 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/sysctl.h> 19#include <linux/sysctl.h>
20#include <linux/list.h>
20 21
21#include <net/ip_vs.h> 22#include <net/ip_vs.h>
22 23
@@ -44,28 +45,11 @@
44 */ 45 */
45 46
46 47
47struct ip_vs_estimator 48static void estimation_timer(unsigned long arg);
48{
49 struct ip_vs_estimator *next;
50 struct ip_vs_stats *stats;
51
52 u32 last_conns;
53 u32 last_inpkts;
54 u32 last_outpkts;
55 u64 last_inbytes;
56 u64 last_outbytes;
57
58 u32 cps;
59 u32 inpps;
60 u32 outpps;
61 u32 inbps;
62 u32 outbps;
63};
64 49
65 50static LIST_HEAD(est_list);
66static struct ip_vs_estimator *est_list = NULL; 51static DEFINE_SPINLOCK(est_lock);
67static DEFINE_RWLOCK(est_lock); 52static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
68static struct timer_list est_timer;
69 53
70static void estimation_timer(unsigned long arg) 54static void estimation_timer(unsigned long arg)
71{ 55{
@@ -76,9 +60,9 @@ static void estimation_timer(unsigned long arg)
76 u64 n_inbytes, n_outbytes; 60 u64 n_inbytes, n_outbytes;
77 u32 rate; 61 u32 rate;
78 62
79 read_lock(&est_lock); 63 spin_lock(&est_lock);
80 for (e = est_list; e; e = e->next) { 64 list_for_each_entry(e, &est_list, list) {
81 s = e->stats; 65 s = container_of(e, struct ip_vs_stats, est);
82 66
83 spin_lock(&s->lock); 67 spin_lock(&s->lock);
84 n_conns = s->conns; 68 n_conns = s->conns;
@@ -114,19 +98,16 @@ static void estimation_timer(unsigned long arg)
114 s->outbps = (e->outbps+0xF)>>5; 98 s->outbps = (e->outbps+0xF)>>5;
115 spin_unlock(&s->lock); 99 spin_unlock(&s->lock);
116 } 100 }
117 read_unlock(&est_lock); 101 spin_unlock(&est_lock);
118 mod_timer(&est_timer, jiffies + 2*HZ); 102 mod_timer(&est_timer, jiffies + 2*HZ);
119} 103}
120 104
121int ip_vs_new_estimator(struct ip_vs_stats *stats) 105void ip_vs_new_estimator(struct ip_vs_stats *stats)
122{ 106{
123 struct ip_vs_estimator *est; 107 struct ip_vs_estimator *est = &stats->est;
124 108
125 est = kzalloc(sizeof(*est), GFP_KERNEL); 109 INIT_LIST_HEAD(&est->list);
126 if (est == NULL)
127 return -ENOMEM;
128 110
129 est->stats = stats;
130 est->last_conns = stats->conns; 111 est->last_conns = stats->conns;
131 est->cps = stats->cps<<10; 112 est->cps = stats->cps<<10;
132 113
@@ -142,62 +123,40 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
142 est->last_outbytes = stats->outbytes; 123 est->last_outbytes = stats->outbytes;
143 est->outbps = stats->outbps<<5; 124 est->outbps = stats->outbps<<5;
144 125
145 write_lock_bh(&est_lock); 126 spin_lock_bh(&est_lock);
146 est->next = est_list; 127 if (list_empty(&est_list))
147 if (est->next == NULL) { 128 mod_timer(&est_timer, jiffies + 2 * HZ);
148 setup_timer(&est_timer, estimation_timer, 0); 129 list_add(&est->list, &est_list);
149 est_timer.expires = jiffies + 2*HZ; 130 spin_unlock_bh(&est_lock);
150 add_timer(&est_timer);
151 }
152 est_list = est;
153 write_unlock_bh(&est_lock);
154 return 0;
155} 131}
156 132
157void ip_vs_kill_estimator(struct ip_vs_stats *stats) 133void ip_vs_kill_estimator(struct ip_vs_stats *stats)
158{ 134{
159 struct ip_vs_estimator *est, **pest; 135 struct ip_vs_estimator *est = &stats->est;
160 int killed = 0; 136
161 137 spin_lock_bh(&est_lock);
162 write_lock_bh(&est_lock); 138 list_del(&est->list);
163 pest = &est_list; 139 while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
164 while ((est=*pest) != NULL) { 140 spin_unlock_bh(&est_lock);
165 if (est->stats != stats) {
166 pest = &est->next;
167 continue;
168 }
169 *pest = est->next;
170 kfree(est);
171 killed++;
172 }
173 while (killed && !est_list && try_to_del_timer_sync(&est_timer) < 0) {
174 write_unlock_bh(&est_lock);
175 cpu_relax(); 141 cpu_relax();
176 write_lock_bh(&est_lock); 142 spin_lock_bh(&est_lock);
177 } 143 }
178 write_unlock_bh(&est_lock); 144 spin_unlock_bh(&est_lock);
179} 145}
180 146
181void ip_vs_zero_estimator(struct ip_vs_stats *stats) 147void ip_vs_zero_estimator(struct ip_vs_stats *stats)
182{ 148{
183 struct ip_vs_estimator *e; 149 struct ip_vs_estimator *est = &stats->est;
184 150
185 write_lock_bh(&est_lock); 151 /* set counters zero, caller must hold the stats->lock lock */
186 for (e = est_list; e; e = e->next) { 152 est->last_inbytes = 0;
187 if (e->stats != stats) 153 est->last_outbytes = 0;
188 continue; 154 est->last_conns = 0;
189 155 est->last_inpkts = 0;
190 /* set counters zero */ 156 est->last_outpkts = 0;
191 e->last_conns = 0; 157 est->cps = 0;
192 e->last_inpkts = 0; 158 est->inpps = 0;
193 e->last_outpkts = 0; 159 est->outpps = 0;
194 e->last_inbytes = 0; 160 est->inbps = 0;
195 e->last_outbytes = 0; 161 est->outbps = 0;
196 e->cps = 0;
197 e->inpps = 0;
198 e->outpps = 0;
199 e->inbps = 0;
200 e->outbps = 0;
201 }
202 write_unlock_bh(&est_lock);
203} 162}