aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/act_police.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/act_police.c')
-rw-r--r--net/sched/act_police.c508
1 files changed, 260 insertions, 248 deletions
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index da905d7b4b40..fed47b658837 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -32,43 +32,27 @@
32#include <net/sock.h> 32#include <net/sock.h>
33#include <net/act_api.h> 33#include <net/act_api.h>
34 34
35#define L2T(p,L) ((p)->R_tab->data[(L)>>(p)->R_tab->rate.cell_log]) 35#define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log])
36#define L2T_P(p,L) ((p)->P_tab->data[(L)>>(p)->P_tab->rate.cell_log]) 36#define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log])
37#define PRIV(a) ((struct tcf_police *) (a)->priv)
38
39/* use generic hash table */
40#define MY_TAB_SIZE 16
41#define MY_TAB_MASK 15
42static u32 idx_gen;
43static struct tcf_police *tcf_police_ht[MY_TAB_SIZE];
44/* Policer hash table lock */
45static DEFINE_RWLOCK(police_lock);
46
47/* Each policer is serialized by its individual spinlock */
48 37
49static __inline__ unsigned tcf_police_hash(u32 index) 38#define POL_TAB_MASK 15
50{ 39static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
51 return index&0xF; 40static u32 police_idx_gen;
52} 41static DEFINE_RWLOCK(police_lock);
53 42
54static __inline__ struct tcf_police * tcf_police_lookup(u32 index) 43static struct tcf_hashinfo police_hash_info = {
55{ 44 .htab = tcf_police_ht,
56 struct tcf_police *p; 45 .hmask = POL_TAB_MASK,
46 .lock = &police_lock,
47};
57 48
58 read_lock(&police_lock); 49/* Each policer is serialized by its individual spinlock */
59 for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) {
60 if (p->index == index)
61 break;
62 }
63 read_unlock(&police_lock);
64 return p;
65}
66 50
67#ifdef CONFIG_NET_CLS_ACT 51#ifdef CONFIG_NET_CLS_ACT
68static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, 52static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
69 int type, struct tc_action *a) 53 int type, struct tc_action *a)
70{ 54{
71 struct tcf_police *p; 55 struct tcf_common *p;
72 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; 56 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
73 struct rtattr *r; 57 struct rtattr *r;
74 58
@@ -76,10 +60,10 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
76 60
77 s_i = cb->args[0]; 61 s_i = cb->args[0];
78 62
79 for (i = 0; i < MY_TAB_SIZE; i++) { 63 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
80 p = tcf_police_ht[tcf_police_hash(i)]; 64 p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
81 65
82 for (; p; p = p->next) { 66 for (; p; p = p->tcfc_next) {
83 index++; 67 index++;
84 if (index < s_i) 68 if (index < s_i)
85 continue; 69 continue;
@@ -110,48 +94,26 @@ rtattr_failure:
110 skb_trim(skb, (u8*)r - skb->data); 94 skb_trim(skb, (u8*)r - skb->data);
111 goto done; 95 goto done;
112} 96}
113
114static inline int
115tcf_act_police_hash_search(struct tc_action *a, u32 index)
116{
117 struct tcf_police *p = tcf_police_lookup(index);
118
119 if (p != NULL) {
120 a->priv = p;
121 return 1;
122 } else {
123 return 0;
124 }
125}
126#endif 97#endif
127 98
128static inline u32 tcf_police_new_index(void)
129{
130 do {
131 if (++idx_gen == 0)
132 idx_gen = 1;
133 } while (tcf_police_lookup(idx_gen));
134
135 return idx_gen;
136}
137
138void tcf_police_destroy(struct tcf_police *p) 99void tcf_police_destroy(struct tcf_police *p)
139{ 100{
140 unsigned h = tcf_police_hash(p->index); 101 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
141 struct tcf_police **p1p; 102 struct tcf_common **p1p;
142 103
143 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) { 104 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
144 if (*p1p == p) { 105 if (*p1p == &p->common) {
145 write_lock_bh(&police_lock); 106 write_lock_bh(&police_lock);
146 *p1p = p->next; 107 *p1p = p->tcf_next;
147 write_unlock_bh(&police_lock); 108 write_unlock_bh(&police_lock);
148#ifdef CONFIG_NET_ESTIMATOR 109#ifdef CONFIG_NET_ESTIMATOR
149 gen_kill_estimator(&p->bstats, &p->rate_est); 110 gen_kill_estimator(&p->tcf_bstats,
111 &p->tcf_rate_est);
150#endif 112#endif
151 if (p->R_tab) 113 if (p->tcfp_R_tab)
152 qdisc_put_rtab(p->R_tab); 114 qdisc_put_rtab(p->tcfp_R_tab);
153 if (p->P_tab) 115 if (p->tcfp_P_tab)
154 qdisc_put_rtab(p->P_tab); 116 qdisc_put_rtab(p->tcfp_P_tab);
155 kfree(p); 117 kfree(p);
156 return; 118 return;
157 } 119 }
@@ -167,7 +129,7 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
167 int ret = 0, err; 129 int ret = 0, err;
168 struct rtattr *tb[TCA_POLICE_MAX]; 130 struct rtattr *tb[TCA_POLICE_MAX];
169 struct tc_police *parm; 131 struct tc_police *parm;
170 struct tcf_police *p; 132 struct tcf_police *police;
171 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; 133 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
172 134
173 if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0) 135 if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
@@ -185,27 +147,32 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
185 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32)) 147 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
186 return -EINVAL; 148 return -EINVAL;
187 149
188 if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) { 150 if (parm->index) {
189 a->priv = p; 151 struct tcf_common *pc;
190 if (bind) { 152
191 p->bindcnt += 1; 153 pc = tcf_hash_lookup(parm->index, &police_hash_info);
192 p->refcnt += 1; 154 if (pc != NULL) {
155 a->priv = pc;
156 police = to_police(pc);
157 if (bind) {
158 police->tcf_bindcnt += 1;
159 police->tcf_refcnt += 1;
160 }
161 if (ovr)
162 goto override;
163 return ret;
193 } 164 }
194 if (ovr)
195 goto override;
196 return ret;
197 } 165 }
198 166
199 p = kzalloc(sizeof(*p), GFP_KERNEL); 167 police = kzalloc(sizeof(*police), GFP_KERNEL);
200 if (p == NULL) 168 if (police == NULL)
201 return -ENOMEM; 169 return -ENOMEM;
202
203 ret = ACT_P_CREATED; 170 ret = ACT_P_CREATED;
204 p->refcnt = 1; 171 police->tcf_refcnt = 1;
205 spin_lock_init(&p->lock); 172 spin_lock_init(&police->tcf_lock);
206 p->stats_lock = &p->lock; 173 police->tcf_stats_lock = &police->tcf_lock;
207 if (bind) 174 if (bind)
208 p->bindcnt = 1; 175 police->tcf_bindcnt = 1;
209override: 176override:
210 if (parm->rate.rate) { 177 if (parm->rate.rate) {
211 err = -ENOMEM; 178 err = -ENOMEM;
@@ -215,67 +182,71 @@ override:
215 if (parm->peakrate.rate) { 182 if (parm->peakrate.rate) {
216 P_tab = qdisc_get_rtab(&parm->peakrate, 183 P_tab = qdisc_get_rtab(&parm->peakrate,
217 tb[TCA_POLICE_PEAKRATE-1]); 184 tb[TCA_POLICE_PEAKRATE-1]);
218 if (p->P_tab == NULL) { 185 if (P_tab == NULL) {
219 qdisc_put_rtab(R_tab); 186 qdisc_put_rtab(R_tab);
220 goto failure; 187 goto failure;
221 } 188 }
222 } 189 }
223 } 190 }
224 /* No failure allowed after this point */ 191 /* No failure allowed after this point */
225 spin_lock_bh(&p->lock); 192 spin_lock_bh(&police->tcf_lock);
226 if (R_tab != NULL) { 193 if (R_tab != NULL) {
227 qdisc_put_rtab(p->R_tab); 194 qdisc_put_rtab(police->tcfp_R_tab);
228 p->R_tab = R_tab; 195 police->tcfp_R_tab = R_tab;
229 } 196 }
230 if (P_tab != NULL) { 197 if (P_tab != NULL) {
231 qdisc_put_rtab(p->P_tab); 198 qdisc_put_rtab(police->tcfp_P_tab);
232 p->P_tab = P_tab; 199 police->tcfp_P_tab = P_tab;
233 } 200 }
234 201
235 if (tb[TCA_POLICE_RESULT-1]) 202 if (tb[TCA_POLICE_RESULT-1])
236 p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 203 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
237 p->toks = p->burst = parm->burst; 204 police->tcfp_toks = police->tcfp_burst = parm->burst;
238 p->mtu = parm->mtu; 205 police->tcfp_mtu = parm->mtu;
239 if (p->mtu == 0) { 206 if (police->tcfp_mtu == 0) {
240 p->mtu = ~0; 207 police->tcfp_mtu = ~0;
241 if (p->R_tab) 208 if (police->tcfp_R_tab)
242 p->mtu = 255<<p->R_tab->rate.cell_log; 209 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
243 } 210 }
244 if (p->P_tab) 211 if (police->tcfp_P_tab)
245 p->ptoks = L2T_P(p, p->mtu); 212 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
246 p->action = parm->action; 213 police->tcf_action = parm->action;
247 214
248#ifdef CONFIG_NET_ESTIMATOR 215#ifdef CONFIG_NET_ESTIMATOR
249 if (tb[TCA_POLICE_AVRATE-1]) 216 if (tb[TCA_POLICE_AVRATE-1])
250 p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 217 police->tcfp_ewma_rate =
218 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
251 if (est) 219 if (est)
252 gen_replace_estimator(&p->bstats, &p->rate_est, p->stats_lock, est); 220 gen_replace_estimator(&police->tcf_bstats,
221 &police->tcf_rate_est,
222 police->tcf_stats_lock, est);
253#endif 223#endif
254 224
255 spin_unlock_bh(&p->lock); 225 spin_unlock_bh(&police->tcf_lock);
256 if (ret != ACT_P_CREATED) 226 if (ret != ACT_P_CREATED)
257 return ret; 227 return ret;
258 228
259 PSCHED_GET_TIME(p->t_c); 229 PSCHED_GET_TIME(police->tcfp_t_c);
260 p->index = parm->index ? : tcf_police_new_index(); 230 police->tcf_index = parm->index ? parm->index :
261 h = tcf_police_hash(p->index); 231 tcf_hash_new_index(&police_idx_gen, &police_hash_info);
232 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
262 write_lock_bh(&police_lock); 233 write_lock_bh(&police_lock);
263 p->next = tcf_police_ht[h]; 234 police->tcf_next = tcf_police_ht[h];
264 tcf_police_ht[h] = p; 235 tcf_police_ht[h] = &police->common;
265 write_unlock_bh(&police_lock); 236 write_unlock_bh(&police_lock);
266 237
267 a->priv = p; 238 a->priv = police;
268 return ret; 239 return ret;
269 240
270failure: 241failure:
271 if (ret == ACT_P_CREATED) 242 if (ret == ACT_P_CREATED)
272 kfree(p); 243 kfree(police);
273 return err; 244 return err;
274} 245}
275 246
276static int tcf_act_police_cleanup(struct tc_action *a, int bind) 247static int tcf_act_police_cleanup(struct tc_action *a, int bind)
277{ 248{
278 struct tcf_police *p = PRIV(a); 249 struct tcf_police *p = a->priv;
279 250
280 if (p != NULL) 251 if (p != NULL)
281 return tcf_police_release(p, bind); 252 return tcf_police_release(p, bind);
@@ -285,86 +256,87 @@ static int tcf_act_police_cleanup(struct tc_action *a, int bind)
285static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, 256static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
286 struct tcf_result *res) 257 struct tcf_result *res)
287{ 258{
259 struct tcf_police *police = a->priv;
288 psched_time_t now; 260 psched_time_t now;
289 struct tcf_police *p = PRIV(a);
290 long toks; 261 long toks;
291 long ptoks = 0; 262 long ptoks = 0;
292 263
293 spin_lock(&p->lock); 264 spin_lock(&police->tcf_lock);
294 265
295 p->bstats.bytes += skb->len; 266 police->tcf_bstats.bytes += skb->len;
296 p->bstats.packets++; 267 police->tcf_bstats.packets++;
297 268
298#ifdef CONFIG_NET_ESTIMATOR 269#ifdef CONFIG_NET_ESTIMATOR
299 if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) { 270 if (police->tcfp_ewma_rate &&
300 p->qstats.overlimits++; 271 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
301 spin_unlock(&p->lock); 272 police->tcf_qstats.overlimits++;
302 return p->action; 273 spin_unlock(&police->tcf_lock);
274 return police->tcf_action;
303 } 275 }
304#endif 276#endif
305 277
306 if (skb->len <= p->mtu) { 278 if (skb->len <= police->tcfp_mtu) {
307 if (p->R_tab == NULL) { 279 if (police->tcfp_R_tab == NULL) {
308 spin_unlock(&p->lock); 280 spin_unlock(&police->tcf_lock);
309 return p->result; 281 return police->tcfp_result;
310 } 282 }
311 283
312 PSCHED_GET_TIME(now); 284 PSCHED_GET_TIME(now);
313 285
314 toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst); 286 toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
315 287 police->tcfp_burst);
316 if (p->P_tab) { 288 if (police->tcfp_P_tab) {
317 ptoks = toks + p->ptoks; 289 ptoks = toks + police->tcfp_ptoks;
318 if (ptoks > (long)L2T_P(p, p->mtu)) 290 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
319 ptoks = (long)L2T_P(p, p->mtu); 291 ptoks = (long)L2T_P(police, police->tcfp_mtu);
320 ptoks -= L2T_P(p, skb->len); 292 ptoks -= L2T_P(police, skb->len);
321 } 293 }
322 toks += p->toks; 294 toks += police->tcfp_toks;
323 if (toks > (long)p->burst) 295 if (toks > (long)police->tcfp_burst)
324 toks = p->burst; 296 toks = police->tcfp_burst;
325 toks -= L2T(p, skb->len); 297 toks -= L2T(police, skb->len);
326
327 if ((toks|ptoks) >= 0) { 298 if ((toks|ptoks) >= 0) {
328 p->t_c = now; 299 police->tcfp_t_c = now;
329 p->toks = toks; 300 police->tcfp_toks = toks;
330 p->ptoks = ptoks; 301 police->tcfp_ptoks = ptoks;
331 spin_unlock(&p->lock); 302 spin_unlock(&police->tcf_lock);
332 return p->result; 303 return police->tcfp_result;
333 } 304 }
334 } 305 }
335 306
336 p->qstats.overlimits++; 307 police->tcf_qstats.overlimits++;
337 spin_unlock(&p->lock); 308 spin_unlock(&police->tcf_lock);
338 return p->action; 309 return police->tcf_action;
339} 310}
340 311
341static int 312static int
342tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 313tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
343{ 314{
344 unsigned char *b = skb->tail; 315 unsigned char *b = skb->tail;
316 struct tcf_police *police = a->priv;
345 struct tc_police opt; 317 struct tc_police opt;
346 struct tcf_police *p = PRIV(a); 318
347 319 opt.index = police->tcf_index;
348 opt.index = p->index; 320 opt.action = police->tcf_action;
349 opt.action = p->action; 321 opt.mtu = police->tcfp_mtu;
350 opt.mtu = p->mtu; 322 opt.burst = police->tcfp_burst;
351 opt.burst = p->burst; 323 opt.refcnt = police->tcf_refcnt - ref;
352 opt.refcnt = p->refcnt - ref; 324 opt.bindcnt = police->tcf_bindcnt - bind;
353 opt.bindcnt = p->bindcnt - bind; 325 if (police->tcfp_R_tab)
354 if (p->R_tab) 326 opt.rate = police->tcfp_R_tab->rate;
355 opt.rate = p->R_tab->rate;
356 else 327 else
357 memset(&opt.rate, 0, sizeof(opt.rate)); 328 memset(&opt.rate, 0, sizeof(opt.rate));
358 if (p->P_tab) 329 if (police->tcfp_P_tab)
359 opt.peakrate = p->P_tab->rate; 330 opt.peakrate = police->tcfp_P_tab->rate;
360 else 331 else
361 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 332 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
362 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); 333 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
363 if (p->result) 334 if (police->tcfp_result)
364 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result); 335 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
336 &police->tcfp_result);
365#ifdef CONFIG_NET_ESTIMATOR 337#ifdef CONFIG_NET_ESTIMATOR
366 if (p->ewma_rate) 338 if (police->tcfp_ewma_rate)
367 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate); 339 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
368#endif 340#endif
369 return skb->len; 341 return skb->len;
370 342
@@ -379,13 +351,14 @@ MODULE_LICENSE("GPL");
379 351
380static struct tc_action_ops act_police_ops = { 352static struct tc_action_ops act_police_ops = {
381 .kind = "police", 353 .kind = "police",
354 .hinfo = &police_hash_info,
382 .type = TCA_ID_POLICE, 355 .type = TCA_ID_POLICE,
383 .capab = TCA_CAP_NONE, 356 .capab = TCA_CAP_NONE,
384 .owner = THIS_MODULE, 357 .owner = THIS_MODULE,
385 .act = tcf_act_police, 358 .act = tcf_act_police,
386 .dump = tcf_act_police_dump, 359 .dump = tcf_act_police_dump,
387 .cleanup = tcf_act_police_cleanup, 360 .cleanup = tcf_act_police_cleanup,
388 .lookup = tcf_act_police_hash_search, 361 .lookup = tcf_hash_search,
389 .init = tcf_act_police_locate, 362 .init = tcf_act_police_locate,
390 .walk = tcf_act_police_walker 363 .walk = tcf_act_police_walker
391}; 364};
@@ -407,10 +380,39 @@ module_exit(police_cleanup_module);
407 380
408#else /* CONFIG_NET_CLS_ACT */ 381#else /* CONFIG_NET_CLS_ACT */
409 382
410struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est) 383static struct tcf_common *tcf_police_lookup(u32 index)
411{ 384{
412 unsigned h; 385 struct tcf_hashinfo *hinfo = &police_hash_info;
413 struct tcf_police *p; 386 struct tcf_common *p;
387
388 read_lock(hinfo->lock);
389 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
390 p = p->tcfc_next) {
391 if (p->tcfc_index == index)
392 break;
393 }
394 read_unlock(hinfo->lock);
395
396 return p;
397}
398
399static u32 tcf_police_new_index(void)
400{
401 u32 *idx_gen = &police_idx_gen;
402 u32 val = *idx_gen;
403
404 do {
405 if (++val == 0)
406 val = 1;
407 } while (tcf_police_lookup(val));
408
409 return (*idx_gen = val);
410}
411
412struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
413{
414 unsigned int h;
415 struct tcf_police *police;
414 struct rtattr *tb[TCA_POLICE_MAX]; 416 struct rtattr *tb[TCA_POLICE_MAX];
415 struct tc_police *parm; 417 struct tc_police *parm;
416 418
@@ -423,149 +425,158 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
423 425
424 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]); 426 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
425 427
426 if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) { 428 if (parm->index) {
427 p->refcnt++; 429 struct tcf_common *pc;
428 return p;
429 }
430 430
431 p = kzalloc(sizeof(*p), GFP_KERNEL); 431 pc = tcf_police_lookup(parm->index);
432 if (p == NULL) 432 if (pc) {
433 police = to_police(pc);
434 police->tcf_refcnt++;
435 return police;
436 }
437 }
438 police = kzalloc(sizeof(*police), GFP_KERNEL);
439 if (unlikely(!police))
433 return NULL; 440 return NULL;
434 441
435 p->refcnt = 1; 442 police->tcf_refcnt = 1;
436 spin_lock_init(&p->lock); 443 spin_lock_init(&police->tcf_lock);
437 p->stats_lock = &p->lock; 444 police->tcf_stats_lock = &police->tcf_lock;
438 if (parm->rate.rate) { 445 if (parm->rate.rate) {
439 p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]); 446 police->tcfp_R_tab =
440 if (p->R_tab == NULL) 447 qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
448 if (police->tcfp_R_tab == NULL)
441 goto failure; 449 goto failure;
442 if (parm->peakrate.rate) { 450 if (parm->peakrate.rate) {
443 p->P_tab = qdisc_get_rtab(&parm->peakrate, 451 police->tcfp_P_tab =
444 tb[TCA_POLICE_PEAKRATE-1]); 452 qdisc_get_rtab(&parm->peakrate,
445 if (p->P_tab == NULL) 453 tb[TCA_POLICE_PEAKRATE-1]);
454 if (police->tcfp_P_tab == NULL)
446 goto failure; 455 goto failure;
447 } 456 }
448 } 457 }
449 if (tb[TCA_POLICE_RESULT-1]) { 458 if (tb[TCA_POLICE_RESULT-1]) {
450 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32)) 459 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
451 goto failure; 460 goto failure;
452 p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 461 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
453 } 462 }
454#ifdef CONFIG_NET_ESTIMATOR 463#ifdef CONFIG_NET_ESTIMATOR
455 if (tb[TCA_POLICE_AVRATE-1]) { 464 if (tb[TCA_POLICE_AVRATE-1]) {
456 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32)) 465 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
457 goto failure; 466 goto failure;
458 p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 467 police->tcfp_ewma_rate =
468 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
459 } 469 }
460#endif 470#endif
461 p->toks = p->burst = parm->burst; 471 police->tcfp_toks = police->tcfp_burst = parm->burst;
462 p->mtu = parm->mtu; 472 police->tcfp_mtu = parm->mtu;
463 if (p->mtu == 0) { 473 if (police->tcfp_mtu == 0) {
464 p->mtu = ~0; 474 police->tcfp_mtu = ~0;
465 if (p->R_tab) 475 if (police->tcfp_R_tab)
466 p->mtu = 255<<p->R_tab->rate.cell_log; 476 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
467 } 477 }
468 if (p->P_tab) 478 if (police->tcfp_P_tab)
469 p->ptoks = L2T_P(p, p->mtu); 479 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
470 PSCHED_GET_TIME(p->t_c); 480 PSCHED_GET_TIME(police->tcfp_t_c);
471 p->index = parm->index ? : tcf_police_new_index(); 481 police->tcf_index = parm->index ? parm->index :
472 p->action = parm->action; 482 tcf_police_new_index();
483 police->tcf_action = parm->action;
473#ifdef CONFIG_NET_ESTIMATOR 484#ifdef CONFIG_NET_ESTIMATOR
474 if (est) 485 if (est)
475 gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est); 486 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
487 police->tcf_stats_lock, est);
476#endif 488#endif
477 h = tcf_police_hash(p->index); 489 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
478 write_lock_bh(&police_lock); 490 write_lock_bh(&police_lock);
479 p->next = tcf_police_ht[h]; 491 police->tcf_next = tcf_police_ht[h];
480 tcf_police_ht[h] = p; 492 tcf_police_ht[h] = &police->common;
481 write_unlock_bh(&police_lock); 493 write_unlock_bh(&police_lock);
482 return p; 494 return police;
483 495
484failure: 496failure:
485 if (p->R_tab) 497 if (police->tcfp_R_tab)
486 qdisc_put_rtab(p->R_tab); 498 qdisc_put_rtab(police->tcfp_R_tab);
487 kfree(p); 499 kfree(police);
488 return NULL; 500 return NULL;
489} 501}
490 502
491int tcf_police(struct sk_buff *skb, struct tcf_police *p) 503int tcf_police(struct sk_buff *skb, struct tcf_police *police)
492{ 504{
493 psched_time_t now; 505 psched_time_t now;
494 long toks; 506 long toks;
495 long ptoks = 0; 507 long ptoks = 0;
496 508
497 spin_lock(&p->lock); 509 spin_lock(&police->tcf_lock);
498 510
499 p->bstats.bytes += skb->len; 511 police->tcf_bstats.bytes += skb->len;
500 p->bstats.packets++; 512 police->tcf_bstats.packets++;
501 513
502#ifdef CONFIG_NET_ESTIMATOR 514#ifdef CONFIG_NET_ESTIMATOR
503 if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) { 515 if (police->tcfp_ewma_rate &&
504 p->qstats.overlimits++; 516 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
505 spin_unlock(&p->lock); 517 police->tcf_qstats.overlimits++;
506 return p->action; 518 spin_unlock(&police->tcf_lock);
519 return police->tcf_action;
507 } 520 }
508#endif 521#endif
509 522 if (skb->len <= police->tcfp_mtu) {
510 if (skb->len <= p->mtu) { 523 if (police->tcfp_R_tab == NULL) {
511 if (p->R_tab == NULL) { 524 spin_unlock(&police->tcf_lock);
512 spin_unlock(&p->lock); 525 return police->tcfp_result;
513 return p->result;
514 } 526 }
515 527
516 PSCHED_GET_TIME(now); 528 PSCHED_GET_TIME(now);
517 529 toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
518 toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst); 530 police->tcfp_burst);
519 531 if (police->tcfp_P_tab) {
520 if (p->P_tab) { 532 ptoks = toks + police->tcfp_ptoks;
521 ptoks = toks + p->ptoks; 533 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
522 if (ptoks > (long)L2T_P(p, p->mtu)) 534 ptoks = (long)L2T_P(police, police->tcfp_mtu);
523 ptoks = (long)L2T_P(p, p->mtu); 535 ptoks -= L2T_P(police, skb->len);
524 ptoks -= L2T_P(p, skb->len);
525 } 536 }
526 toks += p->toks; 537 toks += police->tcfp_toks;
527 if (toks > (long)p->burst) 538 if (toks > (long)police->tcfp_burst)
528 toks = p->burst; 539 toks = police->tcfp_burst;
529 toks -= L2T(p, skb->len); 540 toks -= L2T(police, skb->len);
530
531 if ((toks|ptoks) >= 0) { 541 if ((toks|ptoks) >= 0) {
532 p->t_c = now; 542 police->tcfp_t_c = now;
533 p->toks = toks; 543 police->tcfp_toks = toks;
534 p->ptoks = ptoks; 544 police->tcfp_ptoks = ptoks;
535 spin_unlock(&p->lock); 545 spin_unlock(&police->tcf_lock);
536 return p->result; 546 return police->tcfp_result;
537 } 547 }
538 } 548 }
539 549
540 p->qstats.overlimits++; 550 police->tcf_qstats.overlimits++;
541 spin_unlock(&p->lock); 551 spin_unlock(&police->tcf_lock);
542 return p->action; 552 return police->tcf_action;
543} 553}
544EXPORT_SYMBOL(tcf_police); 554EXPORT_SYMBOL(tcf_police);
545 555
546int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) 556int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
547{ 557{
548 unsigned char *b = skb->tail; 558 unsigned char *b = skb->tail;
549 struct tc_police opt; 559 struct tc_police opt;
550 560
551 opt.index = p->index; 561 opt.index = police->tcf_index;
552 opt.action = p->action; 562 opt.action = police->tcf_action;
553 opt.mtu = p->mtu; 563 opt.mtu = police->tcfp_mtu;
554 opt.burst = p->burst; 564 opt.burst = police->tcfp_burst;
555 if (p->R_tab) 565 if (police->tcfp_R_tab)
556 opt.rate = p->R_tab->rate; 566 opt.rate = police->tcfp_R_tab->rate;
557 else 567 else
558 memset(&opt.rate, 0, sizeof(opt.rate)); 568 memset(&opt.rate, 0, sizeof(opt.rate));
559 if (p->P_tab) 569 if (police->tcfp_P_tab)
560 opt.peakrate = p->P_tab->rate; 570 opt.peakrate = police->tcfp_P_tab->rate;
561 else 571 else
562 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 572 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
563 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); 573 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
564 if (p->result) 574 if (police->tcfp_result)
565 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result); 575 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
576 &police->tcfp_result);
566#ifdef CONFIG_NET_ESTIMATOR 577#ifdef CONFIG_NET_ESTIMATOR
567 if (p->ewma_rate) 578 if (police->tcfp_ewma_rate)
568 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate); 579 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
569#endif 580#endif
570 return skb->len; 581 return skb->len;
571 582
@@ -574,19 +585,20 @@ rtattr_failure:
574 return -1; 585 return -1;
575} 586}
576 587
577int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p) 588int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
578{ 589{
579 struct gnet_dump d; 590 struct gnet_dump d;
580 591
581 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 592 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
582 TCA_XSTATS, p->stats_lock, &d) < 0) 593 TCA_XSTATS, police->tcf_stats_lock,
594 &d) < 0)
583 goto errout; 595 goto errout;
584 596
585 if (gnet_stats_copy_basic(&d, &p->bstats) < 0 || 597 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
586#ifdef CONFIG_NET_ESTIMATOR 598#ifdef CONFIG_NET_ESTIMATOR
587 gnet_stats_copy_rate_est(&d, &p->rate_est) < 0 || 599 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
588#endif 600#endif
589 gnet_stats_copy_queue(&d, &p->qstats) < 0) 601 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
590 goto errout; 602 goto errout;
591 603
592 if (gnet_stats_finish_copy(&d) < 0) 604 if (gnet_stats_finish_copy(&d) < 0)