aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2008-11-24 18:48:05 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-24 18:48:05 -0500
commit4db0acf3c0afbbbb2ae35a65f8896ca6655a47ec (patch)
treee13b52665a811f9c5ad3621a2408be1b4f70c78c
parent3f0947c3ffaed33c1c38b79e4b17f75ba072d3e9 (diff)
net: gen_estimator: Fix gen_kill_estimator() lookups
gen_kill_estimator() linear lists lookups are very slow, and e.g. while deleting a large number of HTB classes soft lockups were reported. Here is another try to fix this problem: this time internally, with rbtree, so similarly to Jamal's hashing idea IIRC. (Looking for next hits could be still optimized, but it's really fast as it is.) Reported-by: Badalian Vyacheslav <slavon@bigtelecom.ru> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Acked-by: Jamal Hadi Salim <hadi@cyberus.ca> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/core/gen_estimator.c76
1 files changed, 56 insertions, 20 deletions
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 57abe8266be1..80aa160877e9 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -31,6 +31,7 @@
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/rtnetlink.h> 32#include <linux/rtnetlink.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/rbtree.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <net/gen_stats.h> 36#include <net/gen_stats.h>
36 37
@@ -89,6 +90,7 @@ struct gen_estimator
89 u32 avpps; 90 u32 avpps;
90 u32 avbps; 91 u32 avbps;
91 struct rcu_head e_rcu; 92 struct rcu_head e_rcu;
93 struct rb_node node;
92}; 94};
93 95
94struct gen_estimator_head 96struct gen_estimator_head
@@ -102,6 +104,9 @@ static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
102/* Protects against NULL dereference */ 104/* Protects against NULL dereference */
103static DEFINE_RWLOCK(est_lock); 105static DEFINE_RWLOCK(est_lock);
104 106
107/* Protects against soft lockup during large deletion */
108static struct rb_root est_root = RB_ROOT;
109
105static void est_timer(unsigned long arg) 110static void est_timer(unsigned long arg)
106{ 111{
107 int idx = (int)arg; 112 int idx = (int)arg;
@@ -139,6 +144,45 @@ skip:
139 rcu_read_unlock(); 144 rcu_read_unlock();
140} 145}
141 146
147static void gen_add_node(struct gen_estimator *est)
148{
149 struct rb_node **p = &est_root.rb_node, *parent = NULL;
150
151 while (*p) {
152 struct gen_estimator *e;
153
154 parent = *p;
155 e = rb_entry(parent, struct gen_estimator, node);
156
157 if (est->bstats > e->bstats)
158 p = &parent->rb_right;
159 else
160 p = &parent->rb_left;
161 }
162 rb_link_node(&est->node, parent, p);
163 rb_insert_color(&est->node, &est_root);
164}
165
166static struct gen_estimator *gen_find_node(struct gnet_stats_basic *bstats,
167 struct gnet_stats_rate_est *rate_est)
168{
169 struct rb_node *p = est_root.rb_node;
170
171 while (p) {
172 struct gen_estimator *e;
173
174 e = rb_entry(p, struct gen_estimator, node);
175
176 if (bstats > e->bstats)
177 p = p->rb_right;
178 else if (bstats < e->bstats || rate_est != e->rate_est)
179 p = p->rb_left;
180 else
181 return e;
182 }
183 return NULL;
184}
185
142/** 186/**
143 * gen_new_estimator - create a new rate estimator 187 * gen_new_estimator - create a new rate estimator
144 * @bstats: basic statistics 188 * @bstats: basic statistics
@@ -194,6 +238,8 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
194 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); 238 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
195 239
196 list_add_rcu(&est->list, &elist[idx].list); 240 list_add_rcu(&est->list, &elist[idx].list);
241 gen_add_node(est);
242
197 return 0; 243 return 0;
198} 244}
199 245
@@ -209,34 +255,24 @@ static void __gen_kill_estimator(struct rcu_head *head)
209 * @bstats: basic statistics 255 * @bstats: basic statistics
210 * @rate_est: rate estimator statistics 256 * @rate_est: rate estimator statistics
211 * 257 *
212 * Removes the rate estimator specified by &bstats and &rate_est 258 * Removes the rate estimator specified by &bstats and &rate_est.
213 * and deletes the timer.
214 * 259 *
215 * NOTE: Called under rtnl_mutex 260 * NOTE: Called under rtnl_mutex
216 */ 261 */
217void gen_kill_estimator(struct gnet_stats_basic *bstats, 262void gen_kill_estimator(struct gnet_stats_basic *bstats,
218 struct gnet_stats_rate_est *rate_est) 263 struct gnet_stats_rate_est *rate_est)
219{ 264{
220 int idx; 265 struct gen_estimator *e;
221 struct gen_estimator *e, *n;
222
223 for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
224
225 /* Skip non initialized indexes */
226 if (!elist[idx].timer.function)
227 continue;
228 266
229 list_for_each_entry_safe(e, n, &elist[idx].list, list) { 267 while ((e = gen_find_node(bstats, rate_est))) {
230 if (e->rate_est != rate_est || e->bstats != bstats) 268 rb_erase(&e->node, &est_root);
231 continue;
232 269
233 write_lock_bh(&est_lock); 270 write_lock_bh(&est_lock);
234 e->bstats = NULL; 271 e->bstats = NULL;
235 write_unlock_bh(&est_lock); 272 write_unlock_bh(&est_lock);
236 273
237 list_del_rcu(&e->list); 274 list_del_rcu(&e->list);
238 call_rcu(&e->e_rcu, __gen_kill_estimator); 275 call_rcu(&e->e_rcu, __gen_kill_estimator);
239 }
240 } 276 }
241} 277}
242 278