aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-08-22 02:54:55 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-09-22 17:55:10 -0400
commite9ce1cd3cf6cf35b21d0ce990f2e738f35907386 (patch)
tree22a3ee7b78ae7cbf00520c66dcc389d87740069c /net
parent2e4ca75b31b6851dcc036c2cdebf3ecfe279a653 (diff)
[PKT_SCHED]: Kill pkt_act.h inlining.
This was simply making templates of functions and mostly causing a lot of code duplication in the classifier action modules. We solve this more cleanly by having a common "struct tcf_common" that hash worker functions contained once in act_api.c can work with. Callers work with real action objects that have the common struct plus their module specific struct members. You go from a common object to the higher level one using a "to_foo()" macro which makes use of container_of() to do the dirty work. This also kills off act_generic.h which was only used by act_simple.c and keeping it around was more work than the it's value. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/act_api.c246
-rw-r--r--net/sched/act_gact.c142
-rw-r--r--net/sched/act_ipt.c175
-rw-r--r--net/sched/act_mirred.c159
-rw-r--r--net/sched/act_pedit.c166
-rw-r--r--net/sched/act_police.c508
-rw-r--r--net/sched/act_simple.c183
7 files changed, 932 insertions, 647 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 6990747d6d5a..835070e9169c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -33,16 +33,230 @@
33#include <net/sch_generic.h> 33#include <net/sch_generic.h>
34#include <net/act_api.h> 34#include <net/act_api.h>
35 35
36#if 0 /* control */ 36void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
37#define DPRINTK(format, args...) printk(KERN_DEBUG format, ##args) 37{
38#else 38 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
39#define DPRINTK(format, args...) 39 struct tcf_common **p1p;
40
41 for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
42 if (*p1p == p) {
43 write_lock_bh(hinfo->lock);
44 *p1p = p->tcfc_next;
45 write_unlock_bh(hinfo->lock);
46#ifdef CONFIG_NET_ESTIMATOR
47 gen_kill_estimator(&p->tcfc_bstats,
48 &p->tcfc_rate_est);
40#endif 49#endif
41#if 0 /* data */ 50 kfree(p);
42#define D2PRINTK(format, args...) printk(KERN_DEBUG format, ##args) 51 return;
43#else 52 }
44#define D2PRINTK(format, args...) 53 }
54 BUG_TRAP(0);
55}
56EXPORT_SYMBOL(tcf_hash_destroy);
57
58int tcf_hash_release(struct tcf_common *p, int bind,
59 struct tcf_hashinfo *hinfo)
60{
61 int ret = 0;
62
63 if (p) {
64 if (bind)
65 p->tcfc_bindcnt--;
66
67 p->tcfc_refcnt--;
68 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
69 tcf_hash_destroy(p, hinfo);
70 ret = 1;
71 }
72 }
73 return ret;
74}
75EXPORT_SYMBOL(tcf_hash_release);
76
77static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
78 struct tc_action *a, struct tcf_hashinfo *hinfo)
79{
80 struct tcf_common *p;
81 int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
82 struct rtattr *r ;
83
84 read_lock(hinfo->lock);
85
86 s_i = cb->args[0];
87
88 for (i = 0; i < (hinfo->hmask + 1); i++) {
89 p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
90
91 for (; p; p = p->tcfc_next) {
92 index++;
93 if (index < s_i)
94 continue;
95 a->priv = p;
96 a->order = n_i;
97 r = (struct rtattr*) skb->tail;
98 RTA_PUT(skb, a->order, 0, NULL);
99 err = tcf_action_dump_1(skb, a, 0, 0);
100 if (err < 0) {
101 index--;
102 skb_trim(skb, (u8*)r - skb->data);
103 goto done;
104 }
105 r->rta_len = skb->tail - (u8*)r;
106 n_i++;
107 if (n_i >= TCA_ACT_MAX_PRIO)
108 goto done;
109 }
110 }
111done:
112 read_unlock(hinfo->lock);
113 if (n_i)
114 cb->args[0] += n_i;
115 return n_i;
116
117rtattr_failure:
118 skb_trim(skb, (u8*)r - skb->data);
119 goto done;
120}
121
122static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
123 struct tcf_hashinfo *hinfo)
124{
125 struct tcf_common *p, *s_p;
126 struct rtattr *r ;
127 int i= 0, n_i = 0;
128
129 r = (struct rtattr*) skb->tail;
130 RTA_PUT(skb, a->order, 0, NULL);
131 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
132 for (i = 0; i < (hinfo->hmask + 1); i++) {
133 p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
134
135 while (p != NULL) {
136 s_p = p->tcfc_next;
137 if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
138 module_put(a->ops->owner);
139 n_i++;
140 p = s_p;
141 }
142 }
143 RTA_PUT(skb, TCA_FCNT, 4, &n_i);
144 r->rta_len = skb->tail - (u8*)r;
145
146 return n_i;
147rtattr_failure:
148 skb_trim(skb, (u8*)r - skb->data);
149 return -EINVAL;
150}
151
152int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
153 int type, struct tc_action *a)
154{
155 struct tcf_hashinfo *hinfo = a->ops->hinfo;
156
157 if (type == RTM_DELACTION) {
158 return tcf_del_walker(skb, a, hinfo);
159 } else if (type == RTM_GETACTION) {
160 return tcf_dump_walker(skb, cb, a, hinfo);
161 } else {
162 printk("tcf_generic_walker: unknown action %d\n", type);
163 return -EINVAL;
164 }
165}
166EXPORT_SYMBOL(tcf_generic_walker);
167
168struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
169{
170 struct tcf_common *p;
171
172 read_lock(hinfo->lock);
173 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
174 p = p->tcfc_next) {
175 if (p->tcfc_index == index)
176 break;
177 }
178 read_unlock(hinfo->lock);
179
180 return p;
181}
182EXPORT_SYMBOL(tcf_hash_lookup);
183
184u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo)
185{
186 u32 val = *idx_gen;
187
188 do {
189 if (++val == 0)
190 val = 1;
191 } while (tcf_hash_lookup(val, hinfo));
192
193 return (*idx_gen = val);
194}
195EXPORT_SYMBOL(tcf_hash_new_index);
196
197int tcf_hash_search(struct tc_action *a, u32 index)
198{
199 struct tcf_hashinfo *hinfo = a->ops->hinfo;
200 struct tcf_common *p = tcf_hash_lookup(index, hinfo);
201
202 if (p) {
203 a->priv = p;
204 return 1;
205 }
206 return 0;
207}
208EXPORT_SYMBOL(tcf_hash_search);
209
210struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
211 struct tcf_hashinfo *hinfo)
212{
213 struct tcf_common *p = NULL;
214 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
215 if (bind) {
216 p->tcfc_bindcnt++;
217 p->tcfc_refcnt++;
218 }
219 a->priv = p;
220 }
221 return p;
222}
223EXPORT_SYMBOL(tcf_hash_check);
224
225struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo)
226{
227 struct tcf_common *p = kzalloc(size, GFP_KERNEL);
228
229 if (unlikely(!p))
230 return p;
231 p->tcfc_refcnt = 1;
232 if (bind)
233 p->tcfc_bindcnt = 1;
234
235 spin_lock_init(&p->tcfc_lock);
236 p->tcfc_stats_lock = &p->tcfc_lock;
237 p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
238 p->tcfc_tm.install = jiffies;
239 p->tcfc_tm.lastuse = jiffies;
240#ifdef CONFIG_NET_ESTIMATOR
241 if (est)
242 gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
243 p->tcfc_stats_lock, est);
45#endif 244#endif
245 a->priv = (void *) p;
246 return p;
247}
248EXPORT_SYMBOL(tcf_hash_create);
249
250void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo)
251{
252 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
253
254 write_lock_bh(hinfo->lock);
255 p->tcfc_next = hinfo->htab[h];
256 hinfo->htab[h] = p;
257 write_unlock_bh(hinfo->lock);
258}
259EXPORT_SYMBOL(tcf_hash_insert);
46 260
47static struct tc_action_ops *act_base = NULL; 261static struct tc_action_ops *act_base = NULL;
48static DEFINE_RWLOCK(act_mod_lock); 262static DEFINE_RWLOCK(act_mod_lock);
@@ -155,9 +369,6 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action *act,
155 369
156 if (skb->tc_verd & TC_NCLS) { 370 if (skb->tc_verd & TC_NCLS) {
157 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 371 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
158 D2PRINTK("(%p)tcf_action_exec: cleared TC_NCLS in %s out %s\n",
159 skb, skb->input_dev ? skb->input_dev->name : "xxx",
160 skb->dev->name);
161 ret = TC_ACT_OK; 372 ret = TC_ACT_OK;
162 goto exec_done; 373 goto exec_done;
163 } 374 }
@@ -187,8 +398,6 @@ void tcf_action_destroy(struct tc_action *act, int bind)
187 398
188 for (a = act; a; a = act) { 399 for (a = act; a; a = act) {
189 if (a->ops && a->ops->cleanup) { 400 if (a->ops && a->ops->cleanup) {
190 DPRINTK("tcf_action_destroy destroying %p next %p\n",
191 a, a->next);
192 if (a->ops->cleanup(a, bind) == ACT_P_DELETED) 401 if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
193 module_put(a->ops->owner); 402 module_put(a->ops->owner);
194 act = act->next; 403 act = act->next;
@@ -331,7 +540,6 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
331 if (*err != ACT_P_CREATED) 540 if (*err != ACT_P_CREATED)
332 module_put(a_o->owner); 541 module_put(a_o->owner);
333 a->ops = a_o; 542 a->ops = a_o;
334 DPRINTK("tcf_action_init_1: successfull %s\n", act_name);
335 543
336 *err = 0; 544 *err = 0;
337 return a; 545 return a;
@@ -392,12 +600,12 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
392 if (compat_mode) { 600 if (compat_mode) {
393 if (a->type == TCA_OLD_COMPAT) 601 if (a->type == TCA_OLD_COMPAT)
394 err = gnet_stats_start_copy_compat(skb, 0, 602 err = gnet_stats_start_copy_compat(skb, 0,
395 TCA_STATS, TCA_XSTATS, h->stats_lock, &d); 603 TCA_STATS, TCA_XSTATS, h->tcf_stats_lock, &d);
396 else 604 else
397 return 0; 605 return 0;
398 } else 606 } else
399 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 607 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
400 h->stats_lock, &d); 608 h->tcf_stats_lock, &d);
401 609
402 if (err < 0) 610 if (err < 0)
403 goto errout; 611 goto errout;
@@ -406,11 +614,11 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
406 if (a->ops->get_stats(skb, a) < 0) 614 if (a->ops->get_stats(skb, a) < 0)
407 goto errout; 615 goto errout;
408 616
409 if (gnet_stats_copy_basic(&d, &h->bstats) < 0 || 617 if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
410#ifdef CONFIG_NET_ESTIMATOR 618#ifdef CONFIG_NET_ESTIMATOR
411 gnet_stats_copy_rate_est(&d, &h->rate_est) < 0 || 619 gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
412#endif 620#endif
413 gnet_stats_copy_queue(&d, &h->qstats) < 0) 621 gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
414 goto errout; 622 goto errout;
415 623
416 if (gnet_stats_finish_copy(&d) < 0) 624 if (gnet_stats_finish_copy(&d) < 0)
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e75a147ad60f..6cff56696a81 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -34,48 +34,43 @@
34#include <linux/tc_act/tc_gact.h> 34#include <linux/tc_act/tc_gact.h>
35#include <net/tc_act/tc_gact.h> 35#include <net/tc_act/tc_gact.h>
36 36
37/* use generic hash table */ 37#define GACT_TAB_MASK 15
38#define MY_TAB_SIZE 16 38static struct tcf_common *tcf_gact_ht[GACT_TAB_MASK + 1];
39#define MY_TAB_MASK 15 39static u32 gact_idx_gen;
40
41static u32 idx_gen;
42static struct tcf_gact *tcf_gact_ht[MY_TAB_SIZE];
43static DEFINE_RWLOCK(gact_lock); 40static DEFINE_RWLOCK(gact_lock);
44 41
45/* ovewrride the defaults */ 42static struct tcf_hashinfo gact_hash_info = {
46#define tcf_st tcf_gact 43 .htab = tcf_gact_ht,
47#define tc_st tc_gact 44 .hmask = GACT_TAB_MASK,
48#define tcf_t_lock gact_lock 45 .lock = &gact_lock,
49#define tcf_ht tcf_gact_ht 46};
50
51#define CONFIG_NET_ACT_INIT 1
52#include <net/pkt_act.h>
53 47
54#ifdef CONFIG_GACT_PROB 48#ifdef CONFIG_GACT_PROB
55static int gact_net_rand(struct tcf_gact *p) 49static int gact_net_rand(struct tcf_gact *gact)
56{ 50{
57 if (net_random()%p->pval) 51 if (net_random() % gact->tcfg_pval)
58 return p->action; 52 return gact->tcf_action;
59 return p->paction; 53 return gact->tcfg_paction;
60} 54}
61 55
62static int gact_determ(struct tcf_gact *p) 56static int gact_determ(struct tcf_gact *gact)
63{ 57{
64 if (p->bstats.packets%p->pval) 58 if (gact->tcf_bstats.packets % gact->tcfg_pval)
65 return p->action; 59 return gact->tcf_action;
66 return p->paction; 60 return gact->tcfg_paction;
67} 61}
68 62
69typedef int (*g_rand)(struct tcf_gact *p); 63typedef int (*g_rand)(struct tcf_gact *gact);
70static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ }; 64static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ };
71#endif 65#endif /* CONFIG_GACT_PROB */
72 66
73static int tcf_gact_init(struct rtattr *rta, struct rtattr *est, 67static int tcf_gact_init(struct rtattr *rta, struct rtattr *est,
74 struct tc_action *a, int ovr, int bind) 68 struct tc_action *a, int ovr, int bind)
75{ 69{
76 struct rtattr *tb[TCA_GACT_MAX]; 70 struct rtattr *tb[TCA_GACT_MAX];
77 struct tc_gact *parm; 71 struct tc_gact *parm;
78 struct tcf_gact *p; 72 struct tcf_gact *gact;
73 struct tcf_common *pc;
79 int ret = 0; 74 int ret = 0;
80 75
81 if (rta == NULL || rtattr_parse_nested(tb, TCA_GACT_MAX, rta) < 0) 76 if (rta == NULL || rtattr_parse_nested(tb, TCA_GACT_MAX, rta) < 0)
@@ -94,105 +89,106 @@ static int tcf_gact_init(struct rtattr *rta, struct rtattr *est,
94 return -EOPNOTSUPP; 89 return -EOPNOTSUPP;
95#endif 90#endif
96 91
97 p = tcf_hash_check(parm->index, a, ovr, bind); 92 pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
98 if (p == NULL) { 93 if (!pc) {
99 p = tcf_hash_create(parm->index, est, a, sizeof(*p), ovr, bind); 94 pc = tcf_hash_create(parm->index, est, a, sizeof(*gact),
100 if (p == NULL) 95 bind, &gact_idx_gen, &gact_hash_info);
96 if (unlikely(!pc))
101 return -ENOMEM; 97 return -ENOMEM;
102 ret = ACT_P_CREATED; 98 ret = ACT_P_CREATED;
103 } else { 99 } else {
104 if (!ovr) { 100 if (!ovr) {
105 tcf_hash_release(p, bind); 101 tcf_hash_release(pc, bind, &gact_hash_info);
106 return -EEXIST; 102 return -EEXIST;
107 } 103 }
108 } 104 }
109 105
110 spin_lock_bh(&p->lock); 106 gact = to_gact(pc);
111 p->action = parm->action; 107
108 spin_lock_bh(&gact->tcf_lock);
109 gact->tcf_action = parm->action;
112#ifdef CONFIG_GACT_PROB 110#ifdef CONFIG_GACT_PROB
113 if (tb[TCA_GACT_PROB-1] != NULL) { 111 if (tb[TCA_GACT_PROB-1] != NULL) {
114 struct tc_gact_p *p_parm = RTA_DATA(tb[TCA_GACT_PROB-1]); 112 struct tc_gact_p *p_parm = RTA_DATA(tb[TCA_GACT_PROB-1]);
115 p->paction = p_parm->paction; 113 gact->tcfg_paction = p_parm->paction;
116 p->pval = p_parm->pval; 114 gact->tcfg_pval = p_parm->pval;
117 p->ptype = p_parm->ptype; 115 gact->tcfg_ptype = p_parm->ptype;
118 } 116 }
119#endif 117#endif
120 spin_unlock_bh(&p->lock); 118 spin_unlock_bh(&gact->tcf_lock);
121 if (ret == ACT_P_CREATED) 119 if (ret == ACT_P_CREATED)
122 tcf_hash_insert(p); 120 tcf_hash_insert(pc, &gact_hash_info);
123 return ret; 121 return ret;
124} 122}
125 123
126static int 124static int tcf_gact_cleanup(struct tc_action *a, int bind)
127tcf_gact_cleanup(struct tc_action *a, int bind)
128{ 125{
129 struct tcf_gact *p = PRIV(a, gact); 126 struct tcf_gact *gact = a->priv;
130 127
131 if (p != NULL) 128 if (gact)
132 return tcf_hash_release(p, bind); 129 return tcf_hash_release(&gact->common, bind, &gact_hash_info);
133 return 0; 130 return 0;
134} 131}
135 132
136static int 133static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
137tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
138{ 134{
139 struct tcf_gact *p = PRIV(a, gact); 135 struct tcf_gact *gact = a->priv;
140 int action = TC_ACT_SHOT; 136 int action = TC_ACT_SHOT;
141 137
142 spin_lock(&p->lock); 138 spin_lock(&gact->tcf_lock);
143#ifdef CONFIG_GACT_PROB 139#ifdef CONFIG_GACT_PROB
144 if (p->ptype && gact_rand[p->ptype] != NULL) 140 if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
145 action = gact_rand[p->ptype](p); 141 action = gact_rand[gact->tcfg_ptype](gact);
146 else 142 else
147 action = p->action; 143 action = gact->tcf_action;
148#else 144#else
149 action = p->action; 145 action = gact->tcf_action;
150#endif 146#endif
151 p->bstats.bytes += skb->len; 147 gact->tcf_bstats.bytes += skb->len;
152 p->bstats.packets++; 148 gact->tcf_bstats.packets++;
153 if (action == TC_ACT_SHOT) 149 if (action == TC_ACT_SHOT)
154 p->qstats.drops++; 150 gact->tcf_qstats.drops++;
155 p->tm.lastuse = jiffies; 151 gact->tcf_tm.lastuse = jiffies;
156 spin_unlock(&p->lock); 152 spin_unlock(&gact->tcf_lock);
157 153
158 return action; 154 return action;
159} 155}
160 156
161static int 157static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
162tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
163{ 158{
164 unsigned char *b = skb->tail; 159 unsigned char *b = skb->tail;
165 struct tc_gact opt; 160 struct tc_gact opt;
166 struct tcf_gact *p = PRIV(a, gact); 161 struct tcf_gact *gact = a->priv;
167 struct tcf_t t; 162 struct tcf_t t;
168 163
169 opt.index = p->index; 164 opt.index = gact->tcf_index;
170 opt.refcnt = p->refcnt - ref; 165 opt.refcnt = gact->tcf_refcnt - ref;
171 opt.bindcnt = p->bindcnt - bind; 166 opt.bindcnt = gact->tcf_bindcnt - bind;
172 opt.action = p->action; 167 opt.action = gact->tcf_action;
173 RTA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); 168 RTA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
174#ifdef CONFIG_GACT_PROB 169#ifdef CONFIG_GACT_PROB
175 if (p->ptype) { 170 if (gact->tcfg_ptype) {
176 struct tc_gact_p p_opt; 171 struct tc_gact_p p_opt;
177 p_opt.paction = p->paction; 172 p_opt.paction = gact->tcfg_paction;
178 p_opt.pval = p->pval; 173 p_opt.pval = gact->tcfg_pval;
179 p_opt.ptype = p->ptype; 174 p_opt.ptype = gact->tcfg_ptype;
180 RTA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); 175 RTA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
181 } 176 }
182#endif 177#endif
183 t.install = jiffies_to_clock_t(jiffies - p->tm.install); 178 t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
184 t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 179 t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
185 t.expires = jiffies_to_clock_t(p->tm.expires); 180 t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
186 RTA_PUT(skb, TCA_GACT_TM, sizeof(t), &t); 181 RTA_PUT(skb, TCA_GACT_TM, sizeof(t), &t);
187 return skb->len; 182 return skb->len;
188 183
189 rtattr_failure: 184rtattr_failure:
190 skb_trim(skb, b - skb->data); 185 skb_trim(skb, b - skb->data);
191 return -1; 186 return -1;
192} 187}
193 188
194static struct tc_action_ops act_gact_ops = { 189static struct tc_action_ops act_gact_ops = {
195 .kind = "gact", 190 .kind = "gact",
191 .hinfo = &gact_hash_info,
196 .type = TCA_ACT_GACT, 192 .type = TCA_ACT_GACT,
197 .capab = TCA_CAP_NONE, 193 .capab = TCA_CAP_NONE,
198 .owner = THIS_MODULE, 194 .owner = THIS_MODULE,
@@ -208,8 +204,7 @@ MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
208MODULE_DESCRIPTION("Generic Classifier actions"); 204MODULE_DESCRIPTION("Generic Classifier actions");
209MODULE_LICENSE("GPL"); 205MODULE_LICENSE("GPL");
210 206
211static int __init 207static int __init gact_init_module(void)
212gact_init_module(void)
213{ 208{
214#ifdef CONFIG_GACT_PROB 209#ifdef CONFIG_GACT_PROB
215 printk("GACT probability on\n"); 210 printk("GACT probability on\n");
@@ -219,8 +214,7 @@ gact_init_module(void)
219 return tcf_register_action(&act_gact_ops); 214 return tcf_register_action(&act_gact_ops);
220} 215}
221 216
222static void __exit 217static void __exit gact_cleanup_module(void)
223gact_cleanup_module(void)
224{ 218{
225 tcf_unregister_action(&act_gact_ops); 219 tcf_unregister_action(&act_gact_ops);
226} 220}
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index d799e01248c4..224c078a398e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -38,25 +38,19 @@
38 38
39#include <linux/netfilter_ipv4/ip_tables.h> 39#include <linux/netfilter_ipv4/ip_tables.h>
40 40
41/* use generic hash table */
42#define MY_TAB_SIZE 16
43#define MY_TAB_MASK 15
44 41
45static u32 idx_gen; 42#define IPT_TAB_MASK 15
46static struct tcf_ipt *tcf_ipt_ht[MY_TAB_SIZE]; 43static struct tcf_common *tcf_ipt_ht[IPT_TAB_MASK + 1];
47/* ipt hash table lock */ 44static u32 ipt_idx_gen;
48static DEFINE_RWLOCK(ipt_lock); 45static DEFINE_RWLOCK(ipt_lock);
49 46
50/* ovewrride the defaults */ 47static struct tcf_hashinfo ipt_hash_info = {
51#define tcf_st tcf_ipt 48 .htab = tcf_ipt_ht,
52#define tcf_t_lock ipt_lock 49 .hmask = IPT_TAB_MASK,
53#define tcf_ht tcf_ipt_ht 50 .lock = &ipt_lock,
54 51};
55#define CONFIG_NET_ACT_INIT
56#include <net/pkt_act.h>
57 52
58static int 53static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
59ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
60{ 54{
61 struct ipt_target *target; 55 struct ipt_target *target;
62 int ret = 0; 56 int ret = 0;
@@ -65,7 +59,6 @@ ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
65 if (!target) 59 if (!target)
66 return -ENOENT; 60 return -ENOENT;
67 61
68 DPRINTK("ipt_init_target: found %s\n", target->name);
69 t->u.kernel.target = target; 62 t->u.kernel.target = target;
70 63
71 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), 64 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
@@ -78,8 +71,6 @@ ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
78 t->u.kernel.target, t->data, 71 t->u.kernel.target, t->data,
79 t->u.target_size - sizeof(*t), 72 t->u.target_size - sizeof(*t),
80 hook)) { 73 hook)) {
81 DPRINTK("ipt_init_target: check failed for `%s'.\n",
82 t->u.kernel.target->name);
83 module_put(t->u.kernel.target->me); 74 module_put(t->u.kernel.target->me);
84 ret = -EINVAL; 75 ret = -EINVAL;
85 } 76 }
@@ -87,8 +78,7 @@ ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
87 return ret; 78 return ret;
88} 79}
89 80
90static void 81static void ipt_destroy_target(struct ipt_entry_target *t)
91ipt_destroy_target(struct ipt_entry_target *t)
92{ 82{
93 if (t->u.kernel.target->destroy) 83 if (t->u.kernel.target->destroy)
94 t->u.kernel.target->destroy(t->u.kernel.target, t->data, 84 t->u.kernel.target->destroy(t->u.kernel.target, t->data,
@@ -96,31 +86,30 @@ ipt_destroy_target(struct ipt_entry_target *t)
96 module_put(t->u.kernel.target->me); 86 module_put(t->u.kernel.target->me);
97} 87}
98 88
99static int 89static int tcf_ipt_release(struct tcf_ipt *ipt, int bind)
100tcf_ipt_release(struct tcf_ipt *p, int bind)
101{ 90{
102 int ret = 0; 91 int ret = 0;
103 if (p) { 92 if (ipt) {
104 if (bind) 93 if (bind)
105 p->bindcnt--; 94 ipt->tcf_bindcnt--;
106 p->refcnt--; 95 ipt->tcf_refcnt--;
107 if (p->bindcnt <= 0 && p->refcnt <= 0) { 96 if (ipt->tcf_bindcnt <= 0 && ipt->tcf_refcnt <= 0) {
108 ipt_destroy_target(p->t); 97 ipt_destroy_target(ipt->tcfi_t);
109 kfree(p->tname); 98 kfree(ipt->tcfi_tname);
110 kfree(p->t); 99 kfree(ipt->tcfi_t);
111 tcf_hash_destroy(p); 100 tcf_hash_destroy(&ipt->common, &ipt_hash_info);
112 ret = ACT_P_DELETED; 101 ret = ACT_P_DELETED;
113 } 102 }
114 } 103 }
115 return ret; 104 return ret;
116} 105}
117 106
118static int 107static int tcf_ipt_init(struct rtattr *rta, struct rtattr *est,
119tcf_ipt_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, 108 struct tc_action *a, int ovr, int bind)
120 int ovr, int bind)
121{ 109{
122 struct rtattr *tb[TCA_IPT_MAX]; 110 struct rtattr *tb[TCA_IPT_MAX];
123 struct tcf_ipt *p; 111 struct tcf_ipt *ipt;
112 struct tcf_common *pc;
124 struct ipt_entry_target *td, *t; 113 struct ipt_entry_target *td, *t;
125 char *tname; 114 char *tname;
126 int ret = 0, err; 115 int ret = 0, err;
@@ -144,49 +133,51 @@ tcf_ipt_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
144 RTA_PAYLOAD(tb[TCA_IPT_INDEX-1]) >= sizeof(u32)) 133 RTA_PAYLOAD(tb[TCA_IPT_INDEX-1]) >= sizeof(u32))
145 index = *(u32 *)RTA_DATA(tb[TCA_IPT_INDEX-1]); 134 index = *(u32 *)RTA_DATA(tb[TCA_IPT_INDEX-1]);
146 135
147 p = tcf_hash_check(index, a, ovr, bind); 136 pc = tcf_hash_check(index, a, bind, &ipt_hash_info);
148 if (p == NULL) { 137 if (!pc) {
149 p = tcf_hash_create(index, est, a, sizeof(*p), ovr, bind); 138 pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
150 if (p == NULL) 139 &ipt_idx_gen, &ipt_hash_info);
140 if (unlikely(!pc))
151 return -ENOMEM; 141 return -ENOMEM;
152 ret = ACT_P_CREATED; 142 ret = ACT_P_CREATED;
153 } else { 143 } else {
154 if (!ovr) { 144 if (!ovr) {
155 tcf_ipt_release(p, bind); 145 tcf_ipt_release(to_ipt(pc), bind);
156 return -EEXIST; 146 return -EEXIST;
157 } 147 }
158 } 148 }
149 ipt = to_ipt(pc);
159 150
160 hook = *(u32 *)RTA_DATA(tb[TCA_IPT_HOOK-1]); 151 hook = *(u32 *)RTA_DATA(tb[TCA_IPT_HOOK-1]);
161 152
162 err = -ENOMEM; 153 err = -ENOMEM;
163 tname = kmalloc(IFNAMSIZ, GFP_KERNEL); 154 tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
164 if (tname == NULL) 155 if (unlikely(!tname))
165 goto err1; 156 goto err1;
166 if (tb[TCA_IPT_TABLE - 1] == NULL || 157 if (tb[TCA_IPT_TABLE - 1] == NULL ||
167 rtattr_strlcpy(tname, tb[TCA_IPT_TABLE-1], IFNAMSIZ) >= IFNAMSIZ) 158 rtattr_strlcpy(tname, tb[TCA_IPT_TABLE-1], IFNAMSIZ) >= IFNAMSIZ)
168 strcpy(tname, "mangle"); 159 strcpy(tname, "mangle");
169 160
170 t = kmalloc(td->u.target_size, GFP_KERNEL); 161 t = kmalloc(td->u.target_size, GFP_KERNEL);
171 if (t == NULL) 162 if (unlikely(!t))
172 goto err2; 163 goto err2;
173 memcpy(t, td, td->u.target_size); 164 memcpy(t, td, td->u.target_size);
174 165
175 if ((err = ipt_init_target(t, tname, hook)) < 0) 166 if ((err = ipt_init_target(t, tname, hook)) < 0)
176 goto err3; 167 goto err3;
177 168
178 spin_lock_bh(&p->lock); 169 spin_lock_bh(&ipt->tcf_lock);
179 if (ret != ACT_P_CREATED) { 170 if (ret != ACT_P_CREATED) {
180 ipt_destroy_target(p->t); 171 ipt_destroy_target(ipt->tcfi_t);
181 kfree(p->tname); 172 kfree(ipt->tcfi_tname);
182 kfree(p->t); 173 kfree(ipt->tcfi_t);
183 } 174 }
184 p->tname = tname; 175 ipt->tcfi_tname = tname;
185 p->t = t; 176 ipt->tcfi_t = t;
186 p->hook = hook; 177 ipt->tcfi_hook = hook;
187 spin_unlock_bh(&p->lock); 178 spin_unlock_bh(&ipt->tcf_lock);
188 if (ret == ACT_P_CREATED) 179 if (ret == ACT_P_CREATED)
189 tcf_hash_insert(p); 180 tcf_hash_insert(pc, &ipt_hash_info);
190 return ret; 181 return ret;
191 182
192err3: 183err3:
@@ -194,33 +185,32 @@ err3:
194err2: 185err2:
195 kfree(tname); 186 kfree(tname);
196err1: 187err1:
197 kfree(p); 188 kfree(pc);
198 return err; 189 return err;
199} 190}
200 191
201static int 192static int tcf_ipt_cleanup(struct tc_action *a, int bind)
202tcf_ipt_cleanup(struct tc_action *a, int bind)
203{ 193{
204 struct tcf_ipt *p = PRIV(a, ipt); 194 struct tcf_ipt *ipt = a->priv;
205 return tcf_ipt_release(p, bind); 195 return tcf_ipt_release(ipt, bind);
206} 196}
207 197
208static int 198static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
209tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 199 struct tcf_result *res)
210{ 200{
211 int ret = 0, result = 0; 201 int ret = 0, result = 0;
212 struct tcf_ipt *p = PRIV(a, ipt); 202 struct tcf_ipt *ipt = a->priv;
213 203
214 if (skb_cloned(skb)) { 204 if (skb_cloned(skb)) {
215 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 205 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
216 return TC_ACT_UNSPEC; 206 return TC_ACT_UNSPEC;
217 } 207 }
218 208
219 spin_lock(&p->lock); 209 spin_lock(&ipt->tcf_lock);
220 210
221 p->tm.lastuse = jiffies; 211 ipt->tcf_tm.lastuse = jiffies;
222 p->bstats.bytes += skb->len; 212 ipt->tcf_bstats.bytes += skb->len;
223 p->bstats.packets++; 213 ipt->tcf_bstats.packets++;
224 214
225 /* yes, we have to worry about both in and out dev 215 /* yes, we have to worry about both in and out dev
226 worry later - danger - this API seems to have changed 216 worry later - danger - this API seems to have changed
@@ -229,16 +219,17 @@ tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
229 /* iptables targets take a double skb pointer in case the skb 219 /* iptables targets take a double skb pointer in case the skb
230 * needs to be replaced. We don't own the skb, so this must not 220 * needs to be replaced. We don't own the skb, so this must not
231 * happen. The pskb_expand_head above should make sure of this */ 221 * happen. The pskb_expand_head above should make sure of this */
232 ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL, p->hook, 222 ret = ipt->tcfi_t->u.kernel.target->target(&skb, skb->dev, NULL,
233 p->t->u.kernel.target, p->t->data, 223 ipt->tcfi_hook,
234 NULL); 224 ipt->tcfi_t->u.kernel.target,
225 ipt->tcfi_t->data, NULL);
235 switch (ret) { 226 switch (ret) {
236 case NF_ACCEPT: 227 case NF_ACCEPT:
237 result = TC_ACT_OK; 228 result = TC_ACT_OK;
238 break; 229 break;
239 case NF_DROP: 230 case NF_DROP:
240 result = TC_ACT_SHOT; 231 result = TC_ACT_SHOT;
241 p->qstats.drops++; 232 ipt->tcf_qstats.drops++;
242 break; 233 break;
243 case IPT_CONTINUE: 234 case IPT_CONTINUE:
244 result = TC_ACT_PIPE; 235 result = TC_ACT_PIPE;
@@ -249,53 +240,46 @@ tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
249 result = TC_POLICE_OK; 240 result = TC_POLICE_OK;
250 break; 241 break;
251 } 242 }
252 spin_unlock(&p->lock); 243 spin_unlock(&ipt->tcf_lock);
253 return result; 244 return result;
254 245
255} 246}
256 247
257static int 248static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
258tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
259{ 249{
250 unsigned char *b = skb->tail;
251 struct tcf_ipt *ipt = a->priv;
260 struct ipt_entry_target *t; 252 struct ipt_entry_target *t;
261 struct tcf_t tm; 253 struct tcf_t tm;
262 struct tc_cnt c; 254 struct tc_cnt c;
263 unsigned char *b = skb->tail;
264 struct tcf_ipt *p = PRIV(a, ipt);
265 255
266 /* for simple targets kernel size == user size 256 /* for simple targets kernel size == user size
267 ** user name = target name 257 ** user name = target name
268 ** for foolproof you need to not assume this 258 ** for foolproof you need to not assume this
269 */ 259 */
270 260
271 t = kmalloc(p->t->u.user.target_size, GFP_ATOMIC); 261 t = kmalloc(ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
272 if (t == NULL) 262 if (unlikely(!t))
273 goto rtattr_failure; 263 goto rtattr_failure;
274 264
275 c.bindcnt = p->bindcnt - bind; 265 c.bindcnt = ipt->tcf_bindcnt - bind;
276 c.refcnt = p->refcnt - ref; 266 c.refcnt = ipt->tcf_refcnt - ref;
277 memcpy(t, p->t, p->t->u.user.target_size); 267 memcpy(t, ipt->tcfi_t, ipt->tcfi_t->u.user.target_size);
278 strcpy(t->u.user.name, p->t->u.kernel.target->name); 268 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
279 269
280 DPRINTK("\ttcf_ipt_dump tablename %s length %d\n", p->tname, 270 RTA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t);
281 strlen(p->tname)); 271 RTA_PUT(skb, TCA_IPT_INDEX, 4, &ipt->tcf_index);
282 DPRINTK("\tdump target name %s size %d size user %d " 272 RTA_PUT(skb, TCA_IPT_HOOK, 4, &ipt->tcfi_hook);
283 "data[0] %x data[1] %x\n", p->t->u.kernel.target->name,
284 p->t->u.target_size, p->t->u.user.target_size,
285 p->t->data[0], p->t->data[1]);
286 RTA_PUT(skb, TCA_IPT_TARG, p->t->u.user.target_size, t);
287 RTA_PUT(skb, TCA_IPT_INDEX, 4, &p->index);
288 RTA_PUT(skb, TCA_IPT_HOOK, 4, &p->hook);
289 RTA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c); 273 RTA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c);
290 RTA_PUT(skb, TCA_IPT_TABLE, IFNAMSIZ, p->tname); 274 RTA_PUT(skb, TCA_IPT_TABLE, IFNAMSIZ, ipt->tcfi_tname);
291 tm.install = jiffies_to_clock_t(jiffies - p->tm.install); 275 tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
292 tm.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 276 tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
293 tm.expires = jiffies_to_clock_t(p->tm.expires); 277 tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
294 RTA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm); 278 RTA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm);
295 kfree(t); 279 kfree(t);
296 return skb->len; 280 return skb->len;
297 281
298 rtattr_failure: 282rtattr_failure:
299 skb_trim(skb, b - skb->data); 283 skb_trim(skb, b - skb->data);
300 kfree(t); 284 kfree(t);
301 return -1; 285 return -1;
@@ -303,6 +287,7 @@ tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
303 287
304static struct tc_action_ops act_ipt_ops = { 288static struct tc_action_ops act_ipt_ops = {
305 .kind = "ipt", 289 .kind = "ipt",
290 .hinfo = &ipt_hash_info,
306 .type = TCA_ACT_IPT, 291 .type = TCA_ACT_IPT,
307 .capab = TCA_CAP_NONE, 292 .capab = TCA_CAP_NONE,
308 .owner = THIS_MODULE, 293 .owner = THIS_MODULE,
@@ -318,14 +303,12 @@ MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
318MODULE_DESCRIPTION("Iptables target actions"); 303MODULE_DESCRIPTION("Iptables target actions");
319MODULE_LICENSE("GPL"); 304MODULE_LICENSE("GPL");
320 305
321static int __init 306static int __init ipt_init_module(void)
322ipt_init_module(void)
323{ 307{
324 return tcf_register_action(&act_ipt_ops); 308 return tcf_register_action(&act_ipt_ops);
325} 309}
326 310
327static void __exit 311static void __exit ipt_cleanup_module(void)
328ipt_cleanup_module(void)
329{ 312{
330 tcf_unregister_action(&act_ipt_ops); 313 tcf_unregister_action(&act_ipt_ops);
331} 314}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index fc562047ecc5..483897271f15 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -39,46 +39,39 @@
39#include <linux/etherdevice.h> 39#include <linux/etherdevice.h>
40#include <linux/if_arp.h> 40#include <linux/if_arp.h>
41 41
42 42#define MIRRED_TAB_MASK 7
43/* use generic hash table */ 43static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1];
44#define MY_TAB_SIZE 8 44static u32 mirred_idx_gen;
45#define MY_TAB_MASK (MY_TAB_SIZE - 1)
46static u32 idx_gen;
47static struct tcf_mirred *tcf_mirred_ht[MY_TAB_SIZE];
48static DEFINE_RWLOCK(mirred_lock); 45static DEFINE_RWLOCK(mirred_lock);
49 46
50/* ovewrride the defaults */ 47static struct tcf_hashinfo mirred_hash_info = {
51#define tcf_st tcf_mirred 48 .htab = tcf_mirred_ht,
52#define tc_st tc_mirred 49 .hmask = MIRRED_TAB_MASK,
53#define tcf_t_lock mirred_lock 50 .lock = &mirred_lock,
54#define tcf_ht tcf_mirred_ht 51};
55
56#define CONFIG_NET_ACT_INIT 1
57#include <net/pkt_act.h>
58 52
59static inline int 53static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
60tcf_mirred_release(struct tcf_mirred *p, int bind)
61{ 54{
62 if (p) { 55 if (m) {
63 if (bind) 56 if (bind)
64 p->bindcnt--; 57 m->tcf_bindcnt--;
65 p->refcnt--; 58 m->tcf_refcnt--;
66 if(!p->bindcnt && p->refcnt <= 0) { 59 if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
67 dev_put(p->dev); 60 dev_put(m->tcfm_dev);
68 tcf_hash_destroy(p); 61 tcf_hash_destroy(&m->common, &mirred_hash_info);
69 return 1; 62 return 1;
70 } 63 }
71 } 64 }
72 return 0; 65 return 0;
73} 66}
74 67
75static int 68static int tcf_mirred_init(struct rtattr *rta, struct rtattr *est,
76tcf_mirred_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, 69 struct tc_action *a, int ovr, int bind)
77 int ovr, int bind)
78{ 70{
79 struct rtattr *tb[TCA_MIRRED_MAX]; 71 struct rtattr *tb[TCA_MIRRED_MAX];
80 struct tc_mirred *parm; 72 struct tc_mirred *parm;
81 struct tcf_mirred *p; 73 struct tcf_mirred *m;
74 struct tcf_common *pc;
82 struct net_device *dev = NULL; 75 struct net_device *dev = NULL;
83 int ret = 0; 76 int ret = 0;
84 int ok_push = 0; 77 int ok_push = 0;
@@ -110,64 +103,62 @@ tcf_mirred_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
110 } 103 }
111 } 104 }
112 105
113 p = tcf_hash_check(parm->index, a, ovr, bind); 106 pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info);
114 if (p == NULL) { 107 if (!pc) {
115 if (!parm->ifindex) 108 if (!parm->ifindex)
116 return -EINVAL; 109 return -EINVAL;
117 p = tcf_hash_create(parm->index, est, a, sizeof(*p), ovr, bind); 110 pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind,
118 if (p == NULL) 111 &mirred_idx_gen, &mirred_hash_info);
112 if (unlikely(!pc))
119 return -ENOMEM; 113 return -ENOMEM;
120 ret = ACT_P_CREATED; 114 ret = ACT_P_CREATED;
121 } else { 115 } else {
122 if (!ovr) { 116 if (!ovr) {
123 tcf_mirred_release(p, bind); 117 tcf_mirred_release(to_mirred(pc), bind);
124 return -EEXIST; 118 return -EEXIST;
125 } 119 }
126 } 120 }
121 m = to_mirred(pc);
127 122
128 spin_lock_bh(&p->lock); 123 spin_lock_bh(&m->tcf_lock);
129 p->action = parm->action; 124 m->tcf_action = parm->action;
130 p->eaction = parm->eaction; 125 m->tcfm_eaction = parm->eaction;
131 if (parm->ifindex) { 126 if (parm->ifindex) {
132 p->ifindex = parm->ifindex; 127 m->tcfm_ifindex = parm->ifindex;
133 if (ret != ACT_P_CREATED) 128 if (ret != ACT_P_CREATED)
134 dev_put(p->dev); 129 dev_put(m->tcfm_dev);
135 p->dev = dev; 130 m->tcfm_dev = dev;
136 dev_hold(dev); 131 dev_hold(dev);
137 p->ok_push = ok_push; 132 m->tcfm_ok_push = ok_push;
138 } 133 }
139 spin_unlock_bh(&p->lock); 134 spin_unlock_bh(&m->tcf_lock);
140 if (ret == ACT_P_CREATED) 135 if (ret == ACT_P_CREATED)
141 tcf_hash_insert(p); 136 tcf_hash_insert(pc, &mirred_hash_info);
142 137
143 DPRINTK("tcf_mirred_init index %d action %d eaction %d device %s "
144 "ifindex %d\n", parm->index, parm->action, parm->eaction,
145 dev->name, parm->ifindex);
146 return ret; 138 return ret;
147} 139}
148 140
149static int 141static int tcf_mirred_cleanup(struct tc_action *a, int bind)
150tcf_mirred_cleanup(struct tc_action *a, int bind)
151{ 142{
152 struct tcf_mirred *p = PRIV(a, mirred); 143 struct tcf_mirred *m = a->priv;
153 144
154 if (p != NULL) 145 if (m)
155 return tcf_mirred_release(p, bind); 146 return tcf_mirred_release(m, bind);
156 return 0; 147 return 0;
157} 148}
158 149
159static int 150static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
160tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 151 struct tcf_result *res)
161{ 152{
162 struct tcf_mirred *p = PRIV(a, mirred); 153 struct tcf_mirred *m = a->priv;
163 struct net_device *dev; 154 struct net_device *dev;
164 struct sk_buff *skb2 = NULL; 155 struct sk_buff *skb2 = NULL;
165 u32 at = G_TC_AT(skb->tc_verd); 156 u32 at = G_TC_AT(skb->tc_verd);
166 157
167 spin_lock(&p->lock); 158 spin_lock(&m->tcf_lock);
168 159
169 dev = p->dev; 160 dev = m->tcfm_dev;
170 p->tm.lastuse = jiffies; 161 m->tcf_tm.lastuse = jiffies;
171 162
172 if (!(dev->flags&IFF_UP) ) { 163 if (!(dev->flags&IFF_UP) ) {
173 if (net_ratelimit()) 164 if (net_ratelimit())
@@ -176,10 +167,10 @@ tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
176bad_mirred: 167bad_mirred:
177 if (skb2 != NULL) 168 if (skb2 != NULL)
178 kfree_skb(skb2); 169 kfree_skb(skb2);
179 p->qstats.overlimits++; 170 m->tcf_qstats.overlimits++;
180 p->bstats.bytes += skb->len; 171 m->tcf_bstats.bytes += skb->len;
181 p->bstats.packets++; 172 m->tcf_bstats.packets++;
182 spin_unlock(&p->lock); 173 spin_unlock(&m->tcf_lock);
183 /* should we be asking for packet to be dropped? 174 /* should we be asking for packet to be dropped?
184 * may make sense for redirect case only 175 * may make sense for redirect case only
185 */ 176 */
@@ -189,59 +180,59 @@ bad_mirred:
189 skb2 = skb_clone(skb, GFP_ATOMIC); 180 skb2 = skb_clone(skb, GFP_ATOMIC);
190 if (skb2 == NULL) 181 if (skb2 == NULL)
191 goto bad_mirred; 182 goto bad_mirred;
192 if (p->eaction != TCA_EGRESS_MIRROR && p->eaction != TCA_EGRESS_REDIR) { 183 if (m->tcfm_eaction != TCA_EGRESS_MIRROR &&
184 m->tcfm_eaction != TCA_EGRESS_REDIR) {
193 if (net_ratelimit()) 185 if (net_ratelimit())
194 printk("tcf_mirred unknown action %d\n", p->eaction); 186 printk("tcf_mirred unknown action %d\n",
187 m->tcfm_eaction);
195 goto bad_mirred; 188 goto bad_mirred;
196 } 189 }
197 190
198 p->bstats.bytes += skb2->len; 191 m->tcf_bstats.bytes += skb2->len;
199 p->bstats.packets++; 192 m->tcf_bstats.packets++;
200 if (!(at & AT_EGRESS)) 193 if (!(at & AT_EGRESS))
201 if (p->ok_push) 194 if (m->tcfm_ok_push)
202 skb_push(skb2, skb2->dev->hard_header_len); 195 skb_push(skb2, skb2->dev->hard_header_len);
203 196
204 /* mirror is always swallowed */ 197 /* mirror is always swallowed */
205 if (p->eaction != TCA_EGRESS_MIRROR) 198 if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
206 skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at); 199 skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
207 200
208 skb2->dev = dev; 201 skb2->dev = dev;
209 skb2->input_dev = skb->dev; 202 skb2->input_dev = skb->dev;
210 dev_queue_xmit(skb2); 203 dev_queue_xmit(skb2);
211 spin_unlock(&p->lock); 204 spin_unlock(&m->tcf_lock);
212 return p->action; 205 return m->tcf_action;
213} 206}
214 207
215static int 208static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
216tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
217{ 209{
218 unsigned char *b = skb->tail; 210 unsigned char *b = skb->tail;
211 struct tcf_mirred *m = a->priv;
219 struct tc_mirred opt; 212 struct tc_mirred opt;
220 struct tcf_mirred *p = PRIV(a, mirred);
221 struct tcf_t t; 213 struct tcf_t t;
222 214
223 opt.index = p->index; 215 opt.index = m->tcf_index;
224 opt.action = p->action; 216 opt.action = m->tcf_action;
225 opt.refcnt = p->refcnt - ref; 217 opt.refcnt = m->tcf_refcnt - ref;
226 opt.bindcnt = p->bindcnt - bind; 218 opt.bindcnt = m->tcf_bindcnt - bind;
227 opt.eaction = p->eaction; 219 opt.eaction = m->tcfm_eaction;
228 opt.ifindex = p->ifindex; 220 opt.ifindex = m->tcfm_ifindex;
229 DPRINTK("tcf_mirred_dump index %d action %d eaction %d ifindex %d\n",
230 p->index, p->action, p->eaction, p->ifindex);
231 RTA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); 221 RTA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
232 t.install = jiffies_to_clock_t(jiffies - p->tm.install); 222 t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
233 t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 223 t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
234 t.expires = jiffies_to_clock_t(p->tm.expires); 224 t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
235 RTA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t); 225 RTA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t);
236 return skb->len; 226 return skb->len;
237 227
238 rtattr_failure: 228rtattr_failure:
239 skb_trim(skb, b - skb->data); 229 skb_trim(skb, b - skb->data);
240 return -1; 230 return -1;
241} 231}
242 232
243static struct tc_action_ops act_mirred_ops = { 233static struct tc_action_ops act_mirred_ops = {
244 .kind = "mirred", 234 .kind = "mirred",
235 .hinfo = &mirred_hash_info,
245 .type = TCA_ACT_MIRRED, 236 .type = TCA_ACT_MIRRED,
246 .capab = TCA_CAP_NONE, 237 .capab = TCA_CAP_NONE,
247 .owner = THIS_MODULE, 238 .owner = THIS_MODULE,
@@ -257,15 +248,13 @@ MODULE_AUTHOR("Jamal Hadi Salim(2002)");
257MODULE_DESCRIPTION("Device Mirror/redirect actions"); 248MODULE_DESCRIPTION("Device Mirror/redirect actions");
258MODULE_LICENSE("GPL"); 249MODULE_LICENSE("GPL");
259 250
260static int __init 251static int __init mirred_init_module(void)
261mirred_init_module(void)
262{ 252{
263 printk("Mirror/redirect action on\n"); 253 printk("Mirror/redirect action on\n");
264 return tcf_register_action(&act_mirred_ops); 254 return tcf_register_action(&act_mirred_ops);
265} 255}
266 256
267static void __exit 257static void __exit mirred_cleanup_module(void)
268mirred_cleanup_module(void)
269{ 258{
270 tcf_unregister_action(&act_mirred_ops); 259 tcf_unregister_action(&act_mirred_ops);
271} 260}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index f257475e0e0c..8ac65c219b98 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -33,32 +33,25 @@
33#include <linux/tc_act/tc_pedit.h> 33#include <linux/tc_act/tc_pedit.h>
34#include <net/tc_act/tc_pedit.h> 34#include <net/tc_act/tc_pedit.h>
35 35
36 36#define PEDIT_TAB_MASK 15
37#define PEDIT_DEB 1 37static struct tcf_common *tcf_pedit_ht[PEDIT_TAB_MASK + 1];
38 38static u32 pedit_idx_gen;
39/* use generic hash table */
40#define MY_TAB_SIZE 16
41#define MY_TAB_MASK 15
42static u32 idx_gen;
43static struct tcf_pedit *tcf_pedit_ht[MY_TAB_SIZE];
44static DEFINE_RWLOCK(pedit_lock); 39static DEFINE_RWLOCK(pedit_lock);
45 40
46#define tcf_st tcf_pedit 41static struct tcf_hashinfo pedit_hash_info = {
47#define tc_st tc_pedit 42 .htab = tcf_pedit_ht,
48#define tcf_t_lock pedit_lock 43 .hmask = PEDIT_TAB_MASK,
49#define tcf_ht tcf_pedit_ht 44 .lock = &pedit_lock,
50 45};
51#define CONFIG_NET_ACT_INIT 1
52#include <net/pkt_act.h>
53 46
54static int 47static int tcf_pedit_init(struct rtattr *rta, struct rtattr *est,
55tcf_pedit_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, 48 struct tc_action *a, int ovr, int bind)
56 int ovr, int bind)
57{ 49{
58 struct rtattr *tb[TCA_PEDIT_MAX]; 50 struct rtattr *tb[TCA_PEDIT_MAX];
59 struct tc_pedit *parm; 51 struct tc_pedit *parm;
60 int ret = 0; 52 int ret = 0;
61 struct tcf_pedit *p; 53 struct tcf_pedit *p;
54 struct tcf_common *pc;
62 struct tc_pedit_key *keys = NULL; 55 struct tc_pedit_key *keys = NULL;
63 int ksize; 56 int ksize;
64 57
@@ -73,54 +66,56 @@ tcf_pedit_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
73 if (RTA_PAYLOAD(tb[TCA_PEDIT_PARMS-1]) < sizeof(*parm) + ksize) 66 if (RTA_PAYLOAD(tb[TCA_PEDIT_PARMS-1]) < sizeof(*parm) + ksize)
74 return -EINVAL; 67 return -EINVAL;
75 68
76 p = tcf_hash_check(parm->index, a, ovr, bind); 69 pc = tcf_hash_check(parm->index, a, bind, &pedit_hash_info);
77 if (p == NULL) { 70 if (!pc) {
78 if (!parm->nkeys) 71 if (!parm->nkeys)
79 return -EINVAL; 72 return -EINVAL;
80 p = tcf_hash_create(parm->index, est, a, sizeof(*p), ovr, bind); 73 pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
81 if (p == NULL) 74 &pedit_idx_gen, &pedit_hash_info);
75 if (unlikely(!pc))
82 return -ENOMEM; 76 return -ENOMEM;
77 p = to_pedit(pc);
83 keys = kmalloc(ksize, GFP_KERNEL); 78 keys = kmalloc(ksize, GFP_KERNEL);
84 if (keys == NULL) { 79 if (keys == NULL) {
85 kfree(p); 80 kfree(pc);
86 return -ENOMEM; 81 return -ENOMEM;
87 } 82 }
88 ret = ACT_P_CREATED; 83 ret = ACT_P_CREATED;
89 } else { 84 } else {
85 p = to_pedit(pc);
90 if (!ovr) { 86 if (!ovr) {
91 tcf_hash_release(p, bind); 87 tcf_hash_release(pc, bind, &pedit_hash_info);
92 return -EEXIST; 88 return -EEXIST;
93 } 89 }
94 if (p->nkeys && p->nkeys != parm->nkeys) { 90 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
95 keys = kmalloc(ksize, GFP_KERNEL); 91 keys = kmalloc(ksize, GFP_KERNEL);
96 if (keys == NULL) 92 if (keys == NULL)
97 return -ENOMEM; 93 return -ENOMEM;
98 } 94 }
99 } 95 }
100 96
101 spin_lock_bh(&p->lock); 97 spin_lock_bh(&p->tcf_lock);
102 p->flags = parm->flags; 98 p->tcfp_flags = parm->flags;
103 p->action = parm->action; 99 p->tcf_action = parm->action;
104 if (keys) { 100 if (keys) {
105 kfree(p->keys); 101 kfree(p->tcfp_keys);
106 p->keys = keys; 102 p->tcfp_keys = keys;
107 p->nkeys = parm->nkeys; 103 p->tcfp_nkeys = parm->nkeys;
108 } 104 }
109 memcpy(p->keys, parm->keys, ksize); 105 memcpy(p->tcfp_keys, parm->keys, ksize);
110 spin_unlock_bh(&p->lock); 106 spin_unlock_bh(&p->tcf_lock);
111 if (ret == ACT_P_CREATED) 107 if (ret == ACT_P_CREATED)
112 tcf_hash_insert(p); 108 tcf_hash_insert(pc, &pedit_hash_info);
113 return ret; 109 return ret;
114} 110}
115 111
116static int 112static int tcf_pedit_cleanup(struct tc_action *a, int bind)
117tcf_pedit_cleanup(struct tc_action *a, int bind)
118{ 113{
119 struct tcf_pedit *p = PRIV(a, pedit); 114 struct tcf_pedit *p = a->priv;
120 115
121 if (p != NULL) { 116 if (p) {
122 struct tc_pedit_key *keys = p->keys; 117 struct tc_pedit_key *keys = p->tcfp_keys;
123 if (tcf_hash_release(p, bind)) { 118 if (tcf_hash_release(&p->common, bind, &pedit_hash_info)) {
124 kfree(keys); 119 kfree(keys);
125 return 1; 120 return 1;
126 } 121 }
@@ -128,30 +123,30 @@ tcf_pedit_cleanup(struct tc_action *a, int bind)
128 return 0; 123 return 0;
129} 124}
130 125
131static int 126static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
132tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 127 struct tcf_result *res)
133{ 128{
134 struct tcf_pedit *p = PRIV(a, pedit); 129 struct tcf_pedit *p = a->priv;
135 int i, munged = 0; 130 int i, munged = 0;
136 u8 *pptr; 131 u8 *pptr;
137 132
138 if (!(skb->tc_verd & TC_OK2MUNGE)) { 133 if (!(skb->tc_verd & TC_OK2MUNGE)) {
139 /* should we set skb->cloned? */ 134 /* should we set skb->cloned? */
140 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 135 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
141 return p->action; 136 return p->tcf_action;
142 } 137 }
143 } 138 }
144 139
145 pptr = skb->nh.raw; 140 pptr = skb->nh.raw;
146 141
147 spin_lock(&p->lock); 142 spin_lock(&p->tcf_lock);
148 143
149 p->tm.lastuse = jiffies; 144 p->tcf_tm.lastuse = jiffies;
150 145
151 if (p->nkeys > 0) { 146 if (p->tcfp_nkeys > 0) {
152 struct tc_pedit_key *tkey = p->keys; 147 struct tc_pedit_key *tkey = p->tcfp_keys;
153 148
154 for (i = p->nkeys; i > 0; i--, tkey++) { 149 for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
155 u32 *ptr; 150 u32 *ptr;
156 int offset = tkey->off; 151 int offset = tkey->off;
157 152
@@ -169,7 +164,8 @@ tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
169 printk("offset must be on 32 bit boundaries\n"); 164 printk("offset must be on 32 bit boundaries\n");
170 goto bad; 165 goto bad;
171 } 166 }
172 if (skb->len < 0 || (offset > 0 && offset > skb->len)) { 167 if (skb->len < 0 ||
168 (offset > 0 && offset > skb->len)) {
173 printk("offset %d cant exceed pkt length %d\n", 169 printk("offset %d cant exceed pkt length %d\n",
174 offset, skb->len); 170 offset, skb->len);
175 goto bad; 171 goto bad;
@@ -185,63 +181,47 @@ tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
185 skb->tc_verd = SET_TC_MUNGED(skb->tc_verd); 181 skb->tc_verd = SET_TC_MUNGED(skb->tc_verd);
186 goto done; 182 goto done;
187 } else { 183 } else {
188 printk("pedit BUG: index %d\n",p->index); 184 printk("pedit BUG: index %d\n", p->tcf_index);
189 } 185 }
190 186
191bad: 187bad:
192 p->qstats.overlimits++; 188 p->tcf_qstats.overlimits++;
193done: 189done:
194 p->bstats.bytes += skb->len; 190 p->tcf_bstats.bytes += skb->len;
195 p->bstats.packets++; 191 p->tcf_bstats.packets++;
196 spin_unlock(&p->lock); 192 spin_unlock(&p->tcf_lock);
197 return p->action; 193 return p->tcf_action;
198} 194}
199 195
200static int 196static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
201tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref) 197 int bind, int ref)
202{ 198{
203 unsigned char *b = skb->tail; 199 unsigned char *b = skb->tail;
200 struct tcf_pedit *p = a->priv;
204 struct tc_pedit *opt; 201 struct tc_pedit *opt;
205 struct tcf_pedit *p = PRIV(a, pedit);
206 struct tcf_t t; 202 struct tcf_t t;
207 int s; 203 int s;
208 204
209 s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key); 205 s = sizeof(*opt) + p->tcfp_nkeys * sizeof(struct tc_pedit_key);
210 206
211 /* netlink spinlocks held above us - must use ATOMIC */ 207 /* netlink spinlocks held above us - must use ATOMIC */
212 opt = kzalloc(s, GFP_ATOMIC); 208 opt = kzalloc(s, GFP_ATOMIC);
213 if (opt == NULL) 209 if (unlikely(!opt))
214 return -ENOBUFS; 210 return -ENOBUFS;
215 211
216 memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key)); 212 memcpy(opt->keys, p->tcfp_keys,
217 opt->index = p->index; 213 p->tcfp_nkeys * sizeof(struct tc_pedit_key));
218 opt->nkeys = p->nkeys; 214 opt->index = p->tcf_index;
219 opt->flags = p->flags; 215 opt->nkeys = p->tcfp_nkeys;
220 opt->action = p->action; 216 opt->flags = p->tcfp_flags;
221 opt->refcnt = p->refcnt - ref; 217 opt->action = p->tcf_action;
222 opt->bindcnt = p->bindcnt - bind; 218 opt->refcnt = p->tcf_refcnt - ref;
223 219 opt->bindcnt = p->tcf_bindcnt - bind;
224
225#ifdef PEDIT_DEB
226 {
227 /* Debug - get rid of later */
228 int i;
229 struct tc_pedit_key *key = opt->keys;
230
231 for (i=0; i<opt->nkeys; i++, key++) {
232 printk( "\n key #%d",i);
233 printk( " at %d: val %08x mask %08x",
234 (unsigned int)key->off,
235 (unsigned int)key->val,
236 (unsigned int)key->mask);
237 }
238 }
239#endif
240 220
241 RTA_PUT(skb, TCA_PEDIT_PARMS, s, opt); 221 RTA_PUT(skb, TCA_PEDIT_PARMS, s, opt);
242 t.install = jiffies_to_clock_t(jiffies - p->tm.install); 222 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
243 t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 223 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
244 t.expires = jiffies_to_clock_t(p->tm.expires); 224 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
245 RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); 225 RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t);
246 kfree(opt); 226 kfree(opt);
247 return skb->len; 227 return skb->len;
@@ -252,9 +232,9 @@ rtattr_failure:
252 return -1; 232 return -1;
253} 233}
254 234
255static 235static struct tc_action_ops act_pedit_ops = {
256struct tc_action_ops act_pedit_ops = {
257 .kind = "pedit", 236 .kind = "pedit",
237 .hinfo = &pedit_hash_info,
258 .type = TCA_ACT_PEDIT, 238 .type = TCA_ACT_PEDIT,
259 .capab = TCA_CAP_NONE, 239 .capab = TCA_CAP_NONE,
260 .owner = THIS_MODULE, 240 .owner = THIS_MODULE,
@@ -270,14 +250,12 @@ MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
270MODULE_DESCRIPTION("Generic Packet Editor actions"); 250MODULE_DESCRIPTION("Generic Packet Editor actions");
271MODULE_LICENSE("GPL"); 251MODULE_LICENSE("GPL");
272 252
273static int __init 253static int __init pedit_init_module(void)
274pedit_init_module(void)
275{ 254{
276 return tcf_register_action(&act_pedit_ops); 255 return tcf_register_action(&act_pedit_ops);
277} 256}
278 257
279static void __exit 258static void __exit pedit_cleanup_module(void)
280pedit_cleanup_module(void)
281{ 259{
282 tcf_unregister_action(&act_pedit_ops); 260 tcf_unregister_action(&act_pedit_ops);
283} 261}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index da905d7b4b40..fed47b658837 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -32,43 +32,27 @@
32#include <net/sock.h> 32#include <net/sock.h>
33#include <net/act_api.h> 33#include <net/act_api.h>
34 34
35#define L2T(p,L) ((p)->R_tab->data[(L)>>(p)->R_tab->rate.cell_log]) 35#define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log])
36#define L2T_P(p,L) ((p)->P_tab->data[(L)>>(p)->P_tab->rate.cell_log]) 36#define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log])
37#define PRIV(a) ((struct tcf_police *) (a)->priv)
38
39/* use generic hash table */
40#define MY_TAB_SIZE 16
41#define MY_TAB_MASK 15
42static u32 idx_gen;
43static struct tcf_police *tcf_police_ht[MY_TAB_SIZE];
44/* Policer hash table lock */
45static DEFINE_RWLOCK(police_lock);
46
47/* Each policer is serialized by its individual spinlock */
48 37
49static __inline__ unsigned tcf_police_hash(u32 index) 38#define POL_TAB_MASK 15
50{ 39static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
51 return index&0xF; 40static u32 police_idx_gen;
52} 41static DEFINE_RWLOCK(police_lock);
53 42
54static __inline__ struct tcf_police * tcf_police_lookup(u32 index) 43static struct tcf_hashinfo police_hash_info = {
55{ 44 .htab = tcf_police_ht,
56 struct tcf_police *p; 45 .hmask = POL_TAB_MASK,
46 .lock = &police_lock,
47};
57 48
58 read_lock(&police_lock); 49/* Each policer is serialized by its individual spinlock */
59 for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) {
60 if (p->index == index)
61 break;
62 }
63 read_unlock(&police_lock);
64 return p;
65}
66 50
67#ifdef CONFIG_NET_CLS_ACT 51#ifdef CONFIG_NET_CLS_ACT
68static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, 52static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
69 int type, struct tc_action *a) 53 int type, struct tc_action *a)
70{ 54{
71 struct tcf_police *p; 55 struct tcf_common *p;
72 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; 56 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
73 struct rtattr *r; 57 struct rtattr *r;
74 58
@@ -76,10 +60,10 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
76 60
77 s_i = cb->args[0]; 61 s_i = cb->args[0];
78 62
79 for (i = 0; i < MY_TAB_SIZE; i++) { 63 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
80 p = tcf_police_ht[tcf_police_hash(i)]; 64 p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
81 65
82 for (; p; p = p->next) { 66 for (; p; p = p->tcfc_next) {
83 index++; 67 index++;
84 if (index < s_i) 68 if (index < s_i)
85 continue; 69 continue;
@@ -110,48 +94,26 @@ rtattr_failure:
110 skb_trim(skb, (u8*)r - skb->data); 94 skb_trim(skb, (u8*)r - skb->data);
111 goto done; 95 goto done;
112} 96}
113
114static inline int
115tcf_act_police_hash_search(struct tc_action *a, u32 index)
116{
117 struct tcf_police *p = tcf_police_lookup(index);
118
119 if (p != NULL) {
120 a->priv = p;
121 return 1;
122 } else {
123 return 0;
124 }
125}
126#endif 97#endif
127 98
128static inline u32 tcf_police_new_index(void)
129{
130 do {
131 if (++idx_gen == 0)
132 idx_gen = 1;
133 } while (tcf_police_lookup(idx_gen));
134
135 return idx_gen;
136}
137
138void tcf_police_destroy(struct tcf_police *p) 99void tcf_police_destroy(struct tcf_police *p)
139{ 100{
140 unsigned h = tcf_police_hash(p->index); 101 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
141 struct tcf_police **p1p; 102 struct tcf_common **p1p;
142 103
143 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) { 104 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
144 if (*p1p == p) { 105 if (*p1p == &p->common) {
145 write_lock_bh(&police_lock); 106 write_lock_bh(&police_lock);
146 *p1p = p->next; 107 *p1p = p->tcf_next;
147 write_unlock_bh(&police_lock); 108 write_unlock_bh(&police_lock);
148#ifdef CONFIG_NET_ESTIMATOR 109#ifdef CONFIG_NET_ESTIMATOR
149 gen_kill_estimator(&p->bstats, &p->rate_est); 110 gen_kill_estimator(&p->tcf_bstats,
111 &p->tcf_rate_est);
150#endif 112#endif
151 if (p->R_tab) 113 if (p->tcfp_R_tab)
152 qdisc_put_rtab(p->R_tab); 114 qdisc_put_rtab(p->tcfp_R_tab);
153 if (p->P_tab) 115 if (p->tcfp_P_tab)
154 qdisc_put_rtab(p->P_tab); 116 qdisc_put_rtab(p->tcfp_P_tab);
155 kfree(p); 117 kfree(p);
156 return; 118 return;
157 } 119 }
@@ -167,7 +129,7 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
167 int ret = 0, err; 129 int ret = 0, err;
168 struct rtattr *tb[TCA_POLICE_MAX]; 130 struct rtattr *tb[TCA_POLICE_MAX];
169 struct tc_police *parm; 131 struct tc_police *parm;
170 struct tcf_police *p; 132 struct tcf_police *police;
171 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; 133 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
172 134
173 if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0) 135 if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
@@ -185,27 +147,32 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
185 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32)) 147 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
186 return -EINVAL; 148 return -EINVAL;
187 149
188 if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) { 150 if (parm->index) {
189 a->priv = p; 151 struct tcf_common *pc;
190 if (bind) { 152
191 p->bindcnt += 1; 153 pc = tcf_hash_lookup(parm->index, &police_hash_info);
192 p->refcnt += 1; 154 if (pc != NULL) {
155 a->priv = pc;
156 police = to_police(pc);
157 if (bind) {
158 police->tcf_bindcnt += 1;
159 police->tcf_refcnt += 1;
160 }
161 if (ovr)
162 goto override;
163 return ret;
193 } 164 }
194 if (ovr)
195 goto override;
196 return ret;
197 } 165 }
198 166
199 p = kzalloc(sizeof(*p), GFP_KERNEL); 167 police = kzalloc(sizeof(*police), GFP_KERNEL);
200 if (p == NULL) 168 if (police == NULL)
201 return -ENOMEM; 169 return -ENOMEM;
202
203 ret = ACT_P_CREATED; 170 ret = ACT_P_CREATED;
204 p->refcnt = 1; 171 police->tcf_refcnt = 1;
205 spin_lock_init(&p->lock); 172 spin_lock_init(&police->tcf_lock);
206 p->stats_lock = &p->lock; 173 police->tcf_stats_lock = &police->tcf_lock;
207 if (bind) 174 if (bind)
208 p->bindcnt = 1; 175 police->tcf_bindcnt = 1;
209override: 176override:
210 if (parm->rate.rate) { 177 if (parm->rate.rate) {
211 err = -ENOMEM; 178 err = -ENOMEM;
@@ -215,67 +182,71 @@ override:
215 if (parm->peakrate.rate) { 182 if (parm->peakrate.rate) {
216 P_tab = qdisc_get_rtab(&parm->peakrate, 183 P_tab = qdisc_get_rtab(&parm->peakrate,
217 tb[TCA_POLICE_PEAKRATE-1]); 184 tb[TCA_POLICE_PEAKRATE-1]);
218 if (p->P_tab == NULL) { 185 if (P_tab == NULL) {
219 qdisc_put_rtab(R_tab); 186 qdisc_put_rtab(R_tab);
220 goto failure; 187 goto failure;
221 } 188 }
222 } 189 }
223 } 190 }
224 /* No failure allowed after this point */ 191 /* No failure allowed after this point */
225 spin_lock_bh(&p->lock); 192 spin_lock_bh(&police->tcf_lock);
226 if (R_tab != NULL) { 193 if (R_tab != NULL) {
227 qdisc_put_rtab(p->R_tab); 194 qdisc_put_rtab(police->tcfp_R_tab);
228 p->R_tab = R_tab; 195 police->tcfp_R_tab = R_tab;
229 } 196 }
230 if (P_tab != NULL) { 197 if (P_tab != NULL) {
231 qdisc_put_rtab(p->P_tab); 198 qdisc_put_rtab(police->tcfp_P_tab);
232 p->P_tab = P_tab; 199 police->tcfp_P_tab = P_tab;
233 } 200 }
234 201
235 if (tb[TCA_POLICE_RESULT-1]) 202 if (tb[TCA_POLICE_RESULT-1])
236 p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 203 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
237 p->toks = p->burst = parm->burst; 204 police->tcfp_toks = police->tcfp_burst = parm->burst;
238 p->mtu = parm->mtu; 205 police->tcfp_mtu = parm->mtu;
239 if (p->mtu == 0) { 206 if (police->tcfp_mtu == 0) {
240 p->mtu = ~0; 207 police->tcfp_mtu = ~0;
241 if (p->R_tab) 208 if (police->tcfp_R_tab)
242 p->mtu = 255<<p->R_tab->rate.cell_log; 209 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
243 } 210 }
244 if (p->P_tab) 211 if (police->tcfp_P_tab)
245 p->ptoks = L2T_P(p, p->mtu); 212 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
246 p->action = parm->action; 213 police->tcf_action = parm->action;
247 214
248#ifdef CONFIG_NET_ESTIMATOR 215#ifdef CONFIG_NET_ESTIMATOR
249 if (tb[TCA_POLICE_AVRATE-1]) 216 if (tb[TCA_POLICE_AVRATE-1])
250 p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 217 police->tcfp_ewma_rate =
218 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
251 if (est) 219 if (est)
252 gen_replace_estimator(&p->bstats, &p->rate_est, p->stats_lock, est); 220 gen_replace_estimator(&police->tcf_bstats,
221 &police->tcf_rate_est,
222 police->tcf_stats_lock, est);
253#endif 223#endif
254 224
255 spin_unlock_bh(&p->lock); 225 spin_unlock_bh(&police->tcf_lock);
256 if (ret != ACT_P_CREATED) 226 if (ret != ACT_P_CREATED)
257 return ret; 227 return ret;
258 228
259 PSCHED_GET_TIME(p->t_c); 229 PSCHED_GET_TIME(police->tcfp_t_c);
260 p->index = parm->index ? : tcf_police_new_index(); 230 police->tcf_index = parm->index ? parm->index :
261 h = tcf_police_hash(p->index); 231 tcf_hash_new_index(&police_idx_gen, &police_hash_info);
232 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
262 write_lock_bh(&police_lock); 233 write_lock_bh(&police_lock);
263 p->next = tcf_police_ht[h]; 234 police->tcf_next = tcf_police_ht[h];
264 tcf_police_ht[h] = p; 235 tcf_police_ht[h] = &police->common;
265 write_unlock_bh(&police_lock); 236 write_unlock_bh(&police_lock);
266 237
267 a->priv = p; 238 a->priv = police;
268 return ret; 239 return ret;
269 240
270failure: 241failure:
271 if (ret == ACT_P_CREATED) 242 if (ret == ACT_P_CREATED)
272 kfree(p); 243 kfree(police);
273 return err; 244 return err;
274} 245}
275 246
276static int tcf_act_police_cleanup(struct tc_action *a, int bind) 247static int tcf_act_police_cleanup(struct tc_action *a, int bind)
277{ 248{
278 struct tcf_police *p = PRIV(a); 249 struct tcf_police *p = a->priv;
279 250
280 if (p != NULL) 251 if (p != NULL)
281 return tcf_police_release(p, bind); 252 return tcf_police_release(p, bind);
@@ -285,86 +256,87 @@ static int tcf_act_police_cleanup(struct tc_action *a, int bind)
285static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, 256static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
286 struct tcf_result *res) 257 struct tcf_result *res)
287{ 258{
259 struct tcf_police *police = a->priv;
288 psched_time_t now; 260 psched_time_t now;
289 struct tcf_police *p = PRIV(a);
290 long toks; 261 long toks;
291 long ptoks = 0; 262 long ptoks = 0;
292 263
293 spin_lock(&p->lock); 264 spin_lock(&police->tcf_lock);
294 265
295 p->bstats.bytes += skb->len; 266 police->tcf_bstats.bytes += skb->len;
296 p->bstats.packets++; 267 police->tcf_bstats.packets++;
297 268
298#ifdef CONFIG_NET_ESTIMATOR 269#ifdef CONFIG_NET_ESTIMATOR
299 if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) { 270 if (police->tcfp_ewma_rate &&
300 p->qstats.overlimits++; 271 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
301 spin_unlock(&p->lock); 272 police->tcf_qstats.overlimits++;
302 return p->action; 273 spin_unlock(&police->tcf_lock);
274 return police->tcf_action;
303 } 275 }
304#endif 276#endif
305 277
306 if (skb->len <= p->mtu) { 278 if (skb->len <= police->tcfp_mtu) {
307 if (p->R_tab == NULL) { 279 if (police->tcfp_R_tab == NULL) {
308 spin_unlock(&p->lock); 280 spin_unlock(&police->tcf_lock);
309 return p->result; 281 return police->tcfp_result;
310 } 282 }
311 283
312 PSCHED_GET_TIME(now); 284 PSCHED_GET_TIME(now);
313 285
314 toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst); 286 toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
315 287 police->tcfp_burst);
316 if (p->P_tab) { 288 if (police->tcfp_P_tab) {
317 ptoks = toks + p->ptoks; 289 ptoks = toks + police->tcfp_ptoks;
318 if (ptoks > (long)L2T_P(p, p->mtu)) 290 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
319 ptoks = (long)L2T_P(p, p->mtu); 291 ptoks = (long)L2T_P(police, police->tcfp_mtu);
320 ptoks -= L2T_P(p, skb->len); 292 ptoks -= L2T_P(police, skb->len);
321 } 293 }
322 toks += p->toks; 294 toks += police->tcfp_toks;
323 if (toks > (long)p->burst) 295 if (toks > (long)police->tcfp_burst)
324 toks = p->burst; 296 toks = police->tcfp_burst;
325 toks -= L2T(p, skb->len); 297 toks -= L2T(police, skb->len);
326
327 if ((toks|ptoks) >= 0) { 298 if ((toks|ptoks) >= 0) {
328 p->t_c = now; 299 police->tcfp_t_c = now;
329 p->toks = toks; 300 police->tcfp_toks = toks;
330 p->ptoks = ptoks; 301 police->tcfp_ptoks = ptoks;
331 spin_unlock(&p->lock); 302 spin_unlock(&police->tcf_lock);
332 return p->result; 303 return police->tcfp_result;
333 } 304 }
334 } 305 }
335 306
336 p->qstats.overlimits++; 307 police->tcf_qstats.overlimits++;
337 spin_unlock(&p->lock); 308 spin_unlock(&police->tcf_lock);
338 return p->action; 309 return police->tcf_action;
339} 310}
340 311
341static int 312static int
342tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 313tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
343{ 314{
344 unsigned char *b = skb->tail; 315 unsigned char *b = skb->tail;
316 struct tcf_police *police = a->priv;
345 struct tc_police opt; 317 struct tc_police opt;
346 struct tcf_police *p = PRIV(a); 318
347 319 opt.index = police->tcf_index;
348 opt.index = p->index; 320 opt.action = police->tcf_action;
349 opt.action = p->action; 321 opt.mtu = police->tcfp_mtu;
350 opt.mtu = p->mtu; 322 opt.burst = police->tcfp_burst;
351 opt.burst = p->burst; 323 opt.refcnt = police->tcf_refcnt - ref;
352 opt.refcnt = p->refcnt - ref; 324 opt.bindcnt = police->tcf_bindcnt - bind;
353 opt.bindcnt = p->bindcnt - bind; 325 if (police->tcfp_R_tab)
354 if (p->R_tab) 326 opt.rate = police->tcfp_R_tab->rate;
355 opt.rate = p->R_tab->rate;
356 else 327 else
357 memset(&opt.rate, 0, sizeof(opt.rate)); 328 memset(&opt.rate, 0, sizeof(opt.rate));
358 if (p->P_tab) 329 if (police->tcfp_P_tab)
359 opt.peakrate = p->P_tab->rate; 330 opt.peakrate = police->tcfp_P_tab->rate;
360 else 331 else
361 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 332 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
362 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); 333 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
363 if (p->result) 334 if (police->tcfp_result)
364 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result); 335 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
336 &police->tcfp_result);
365#ifdef CONFIG_NET_ESTIMATOR 337#ifdef CONFIG_NET_ESTIMATOR
366 if (p->ewma_rate) 338 if (police->tcfp_ewma_rate)
367 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate); 339 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
368#endif 340#endif
369 return skb->len; 341 return skb->len;
370 342
@@ -379,13 +351,14 @@ MODULE_LICENSE("GPL");
379 351
380static struct tc_action_ops act_police_ops = { 352static struct tc_action_ops act_police_ops = {
381 .kind = "police", 353 .kind = "police",
354 .hinfo = &police_hash_info,
382 .type = TCA_ID_POLICE, 355 .type = TCA_ID_POLICE,
383 .capab = TCA_CAP_NONE, 356 .capab = TCA_CAP_NONE,
384 .owner = THIS_MODULE, 357 .owner = THIS_MODULE,
385 .act = tcf_act_police, 358 .act = tcf_act_police,
386 .dump = tcf_act_police_dump, 359 .dump = tcf_act_police_dump,
387 .cleanup = tcf_act_police_cleanup, 360 .cleanup = tcf_act_police_cleanup,
388 .lookup = tcf_act_police_hash_search, 361 .lookup = tcf_hash_search,
389 .init = tcf_act_police_locate, 362 .init = tcf_act_police_locate,
390 .walk = tcf_act_police_walker 363 .walk = tcf_act_police_walker
391}; 364};
@@ -407,10 +380,39 @@ module_exit(police_cleanup_module);
407 380
408#else /* CONFIG_NET_CLS_ACT */ 381#else /* CONFIG_NET_CLS_ACT */
409 382
410struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est) 383static struct tcf_common *tcf_police_lookup(u32 index)
411{ 384{
412 unsigned h; 385 struct tcf_hashinfo *hinfo = &police_hash_info;
413 struct tcf_police *p; 386 struct tcf_common *p;
387
388 read_lock(hinfo->lock);
389 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
390 p = p->tcfc_next) {
391 if (p->tcfc_index == index)
392 break;
393 }
394 read_unlock(hinfo->lock);
395
396 return p;
397}
398
399static u32 tcf_police_new_index(void)
400{
401 u32 *idx_gen = &police_idx_gen;
402 u32 val = *idx_gen;
403
404 do {
405 if (++val == 0)
406 val = 1;
407 } while (tcf_police_lookup(val));
408
409 return (*idx_gen = val);
410}
411
412struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
413{
414 unsigned int h;
415 struct tcf_police *police;
414 struct rtattr *tb[TCA_POLICE_MAX]; 416 struct rtattr *tb[TCA_POLICE_MAX];
415 struct tc_police *parm; 417 struct tc_police *parm;
416 418
@@ -423,149 +425,158 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
423 425
424 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]); 426 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
425 427
426 if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) { 428 if (parm->index) {
427 p->refcnt++; 429 struct tcf_common *pc;
428 return p;
429 }
430 430
431 p = kzalloc(sizeof(*p), GFP_KERNEL); 431 pc = tcf_police_lookup(parm->index);
432 if (p == NULL) 432 if (pc) {
433 police = to_police(pc);
434 police->tcf_refcnt++;
435 return police;
436 }
437 }
438 police = kzalloc(sizeof(*police), GFP_KERNEL);
439 if (unlikely(!police))
433 return NULL; 440 return NULL;
434 441
435 p->refcnt = 1; 442 police->tcf_refcnt = 1;
436 spin_lock_init(&p->lock); 443 spin_lock_init(&police->tcf_lock);
437 p->stats_lock = &p->lock; 444 police->tcf_stats_lock = &police->tcf_lock;
438 if (parm->rate.rate) { 445 if (parm->rate.rate) {
439 p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]); 446 police->tcfp_R_tab =
440 if (p->R_tab == NULL) 447 qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
448 if (police->tcfp_R_tab == NULL)
441 goto failure; 449 goto failure;
442 if (parm->peakrate.rate) { 450 if (parm->peakrate.rate) {
443 p->P_tab = qdisc_get_rtab(&parm->peakrate, 451 police->tcfp_P_tab =
444 tb[TCA_POLICE_PEAKRATE-1]); 452 qdisc_get_rtab(&parm->peakrate,
445 if (p->P_tab == NULL) 453 tb[TCA_POLICE_PEAKRATE-1]);
454 if (police->tcfp_P_tab == NULL)
446 goto failure; 455 goto failure;
447 } 456 }
448 } 457 }
449 if (tb[TCA_POLICE_RESULT-1]) { 458 if (tb[TCA_POLICE_RESULT-1]) {
450 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32)) 459 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
451 goto failure; 460 goto failure;
452 p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 461 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
453 } 462 }
454#ifdef CONFIG_NET_ESTIMATOR 463#ifdef CONFIG_NET_ESTIMATOR
455 if (tb[TCA_POLICE_AVRATE-1]) { 464 if (tb[TCA_POLICE_AVRATE-1]) {
456 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32)) 465 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
457 goto failure; 466 goto failure;
458 p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 467 police->tcfp_ewma_rate =
468 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
459 } 469 }
460#endif 470#endif
461 p->toks = p->burst = parm->burst; 471 police->tcfp_toks = police->tcfp_burst = parm->burst;
462 p->mtu = parm->mtu; 472 police->tcfp_mtu = parm->mtu;
463 if (p->mtu == 0) { 473 if (police->tcfp_mtu == 0) {
464 p->mtu = ~0; 474 police->tcfp_mtu = ~0;
465 if (p->R_tab) 475 if (police->tcfp_R_tab)
466 p->mtu = 255<<p->R_tab->rate.cell_log; 476 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
467 } 477 }
468 if (p->P_tab) 478 if (police->tcfp_P_tab)
469 p->ptoks = L2T_P(p, p->mtu); 479 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
470 PSCHED_GET_TIME(p->t_c); 480 PSCHED_GET_TIME(police->tcfp_t_c);
471 p->index = parm->index ? : tcf_police_new_index(); 481 police->tcf_index = parm->index ? parm->index :
472 p->action = parm->action; 482 tcf_police_new_index();
483 police->tcf_action = parm->action;
473#ifdef CONFIG_NET_ESTIMATOR 484#ifdef CONFIG_NET_ESTIMATOR
474 if (est) 485 if (est)
475 gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est); 486 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
487 police->tcf_stats_lock, est);
476#endif 488#endif
477 h = tcf_police_hash(p->index); 489 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
478 write_lock_bh(&police_lock); 490 write_lock_bh(&police_lock);
479 p->next = tcf_police_ht[h]; 491 police->tcf_next = tcf_police_ht[h];
480 tcf_police_ht[h] = p; 492 tcf_police_ht[h] = &police->common;
481 write_unlock_bh(&police_lock); 493 write_unlock_bh(&police_lock);
482 return p; 494 return police;
483 495
484failure: 496failure:
485 if (p->R_tab) 497 if (police->tcfp_R_tab)
486 qdisc_put_rtab(p->R_tab); 498 qdisc_put_rtab(police->tcfp_R_tab);
487 kfree(p); 499 kfree(police);
488 return NULL; 500 return NULL;
489} 501}
490 502
491int tcf_police(struct sk_buff *skb, struct tcf_police *p) 503int tcf_police(struct sk_buff *skb, struct tcf_police *police)
492{ 504{
493 psched_time_t now; 505 psched_time_t now;
494 long toks; 506 long toks;
495 long ptoks = 0; 507 long ptoks = 0;
496 508
497 spin_lock(&p->lock); 509 spin_lock(&police->tcf_lock);
498 510
499 p->bstats.bytes += skb->len; 511 police->tcf_bstats.bytes += skb->len;
500 p->bstats.packets++; 512 police->tcf_bstats.packets++;
501 513
502#ifdef CONFIG_NET_ESTIMATOR 514#ifdef CONFIG_NET_ESTIMATOR
503 if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) { 515 if (police->tcfp_ewma_rate &&
504 p->qstats.overlimits++; 516 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
505 spin_unlock(&p->lock); 517 police->tcf_qstats.overlimits++;
506 return p->action; 518 spin_unlock(&police->tcf_lock);
519 return police->tcf_action;
507 } 520 }
508#endif 521#endif
509 522 if (skb->len <= police->tcfp_mtu) {
510 if (skb->len <= p->mtu) { 523 if (police->tcfp_R_tab == NULL) {
511 if (p->R_tab == NULL) { 524 spin_unlock(&police->tcf_lock);
512 spin_unlock(&p->lock); 525 return police->tcfp_result;
513 return p->result;
514 } 526 }
515 527
516 PSCHED_GET_TIME(now); 528 PSCHED_GET_TIME(now);
517 529 toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
518 toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst); 530 police->tcfp_burst);
519 531 if (police->tcfp_P_tab) {
520 if (p->P_tab) { 532 ptoks = toks + police->tcfp_ptoks;
521 ptoks = toks + p->ptoks; 533 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
522 if (ptoks > (long)L2T_P(p, p->mtu)) 534 ptoks = (long)L2T_P(police, police->tcfp_mtu);
523 ptoks = (long)L2T_P(p, p->mtu); 535 ptoks -= L2T_P(police, skb->len);
524 ptoks -= L2T_P(p, skb->len);
525 } 536 }
526 toks += p->toks; 537 toks += police->tcfp_toks;
527 if (toks > (long)p->burst) 538 if (toks > (long)police->tcfp_burst)
528 toks = p->burst; 539 toks = police->tcfp_burst;
529 toks -= L2T(p, skb->len); 540 toks -= L2T(police, skb->len);
530
531 if ((toks|ptoks) >= 0) { 541 if ((toks|ptoks) >= 0) {
532 p->t_c = now; 542 police->tcfp_t_c = now;
533 p->toks = toks; 543 police->tcfp_toks = toks;
534 p->ptoks = ptoks; 544 police->tcfp_ptoks = ptoks;
535 spin_unlock(&p->lock); 545 spin_unlock(&police->tcf_lock);
536 return p->result; 546 return police->tcfp_result;
537 } 547 }
538 } 548 }
539 549
540 p->qstats.overlimits++; 550 police->tcf_qstats.overlimits++;
541 spin_unlock(&p->lock); 551 spin_unlock(&police->tcf_lock);
542 return p->action; 552 return police->tcf_action;
543} 553}
544EXPORT_SYMBOL(tcf_police); 554EXPORT_SYMBOL(tcf_police);
545 555
546int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) 556int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
547{ 557{
548 unsigned char *b = skb->tail; 558 unsigned char *b = skb->tail;
549 struct tc_police opt; 559 struct tc_police opt;
550 560
551 opt.index = p->index; 561 opt.index = police->tcf_index;
552 opt.action = p->action; 562 opt.action = police->tcf_action;
553 opt.mtu = p->mtu; 563 opt.mtu = police->tcfp_mtu;
554 opt.burst = p->burst; 564 opt.burst = police->tcfp_burst;
555 if (p->R_tab) 565 if (police->tcfp_R_tab)
556 opt.rate = p->R_tab->rate; 566 opt.rate = police->tcfp_R_tab->rate;
557 else 567 else
558 memset(&opt.rate, 0, sizeof(opt.rate)); 568 memset(&opt.rate, 0, sizeof(opt.rate));
559 if (p->P_tab) 569 if (police->tcfp_P_tab)
560 opt.peakrate = p->P_tab->rate; 570 opt.peakrate = police->tcfp_P_tab->rate;
561 else 571 else
562 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 572 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
563 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); 573 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
564 if (p->result) 574 if (police->tcfp_result)
565 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result); 575 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
576 &police->tcfp_result);
566#ifdef CONFIG_NET_ESTIMATOR 577#ifdef CONFIG_NET_ESTIMATOR
567 if (p->ewma_rate) 578 if (police->tcfp_ewma_rate)
568 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate); 579 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
569#endif 580#endif
570 return skb->len; 581 return skb->len;
571 582
@@ -574,19 +585,20 @@ rtattr_failure:
574 return -1; 585 return -1;
575} 586}
576 587
577int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p) 588int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
578{ 589{
579 struct gnet_dump d; 590 struct gnet_dump d;
580 591
581 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 592 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
582 TCA_XSTATS, p->stats_lock, &d) < 0) 593 TCA_XSTATS, police->tcf_stats_lock,
594 &d) < 0)
583 goto errout; 595 goto errout;
584 596
585 if (gnet_stats_copy_basic(&d, &p->bstats) < 0 || 597 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
586#ifdef CONFIG_NET_ESTIMATOR 598#ifdef CONFIG_NET_ESTIMATOR
587 gnet_stats_copy_rate_est(&d, &p->rate_est) < 0 || 599 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
588#endif 600#endif
589 gnet_stats_copy_queue(&d, &p->qstats) < 0) 601 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
590 goto errout; 602 goto errout;
591 603
592 if (gnet_stats_finish_copy(&d) < 0) 604 if (gnet_stats_finish_copy(&d) < 0)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 17105c82537f..8c1ab8ad8fa6 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -20,54 +20,175 @@
20 20
21#define TCA_ACT_SIMP 22 21#define TCA_ACT_SIMP 22
22 22
23/* XXX: Hide all these common elements under some macro
24 * probably
25*/
26#include <linux/tc_act/tc_defact.h> 23#include <linux/tc_act/tc_defact.h>
27#include <net/tc_act/tc_defact.h> 24#include <net/tc_act/tc_defact.h>
28 25
29/* use generic hash table with 8 buckets */ 26#define SIMP_TAB_MASK 7
30#define MY_TAB_SIZE 8 27static struct tcf_common *tcf_simp_ht[SIMP_TAB_MASK + 1];
31#define MY_TAB_MASK (MY_TAB_SIZE - 1) 28static u32 simp_idx_gen;
32static u32 idx_gen;
33static struct tcf_defact *tcf_simp_ht[MY_TAB_SIZE];
34static DEFINE_RWLOCK(simp_lock); 29static DEFINE_RWLOCK(simp_lock);
35 30
36/* override the defaults */ 31struct tcf_hashinfo simp_hash_info = {
37#define tcf_st tcf_defact 32 .htab = tcf_simp_ht,
38#define tc_st tc_defact 33 .hmask = SIMP_TAB_MASK,
39#define tcf_t_lock simp_lock 34 .lock = &simp_lock,
40#define tcf_ht tcf_simp_ht 35};
41
42#define CONFIG_NET_ACT_INIT 1
43#include <net/pkt_act.h>
44#include <net/act_generic.h>
45 36
46static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 37static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
47{ 38{
48 struct tcf_defact *p = PRIV(a, defact); 39 struct tcf_defact *d = a->priv;
49 40
50 spin_lock(&p->lock); 41 spin_lock(&d->tcf_lock);
51 p->tm.lastuse = jiffies; 42 d->tcf_tm.lastuse = jiffies;
52 p->bstats.bytes += skb->len; 43 d->tcf_bstats.bytes += skb->len;
53 p->bstats.packets++; 44 d->tcf_bstats.packets++;
54 45
55 /* print policy string followed by _ then packet count 46 /* print policy string followed by _ then packet count
56 * Example if this was the 3rd packet and the string was "hello" 47 * Example if this was the 3rd packet and the string was "hello"
57 * then it would look like "hello_3" (without quotes) 48 * then it would look like "hello_3" (without quotes)
58 **/ 49 **/
59 printk("simple: %s_%d\n", (char *)p->defdata, p->bstats.packets); 50 printk("simple: %s_%d\n",
60 spin_unlock(&p->lock); 51 (char *)d->tcfd_defdata, d->tcf_bstats.packets);
61 return p->action; 52 spin_unlock(&d->tcf_lock);
53 return d->tcf_action;
54}
55
56static int tcf_simp_release(struct tcf_defact *d, int bind)
57{
58 int ret = 0;
59 if (d) {
60 if (bind)
61 d->tcf_bindcnt--;
62 d->tcf_refcnt--;
63 if (d->tcf_bindcnt <= 0 && d->tcf_refcnt <= 0) {
64 kfree(d->tcfd_defdata);
65 tcf_hash_destroy(&d->common, &simp_hash_info);
66 ret = 1;
67 }
68 }
69 return ret;
70}
71
72static int alloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata)
73{
74 d->tcfd_defdata = kmalloc(datalen, GFP_KERNEL);
75 if (unlikely(!d->tcfd_defdata))
76 return -ENOMEM;
77 d->tcfd_datalen = datalen;
78 memcpy(d->tcfd_defdata, defdata, datalen);
79 return 0;
80}
81
82static int realloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata)
83{
84 kfree(d->tcfd_defdata);
85 return alloc_defdata(d, datalen, defdata);
86}
87
88static int tcf_simp_init(struct rtattr *rta, struct rtattr *est,
89 struct tc_action *a, int ovr, int bind)
90{
91 struct rtattr *tb[TCA_DEF_MAX];
92 struct tc_defact *parm;
93 struct tcf_defact *d;
94 struct tcf_common *pc;
95 void *defdata;
96 u32 datalen = 0;
97 int ret = 0;
98
99 if (rta == NULL || rtattr_parse_nested(tb, TCA_DEF_MAX, rta) < 0)
100 return -EINVAL;
101
102 if (tb[TCA_DEF_PARMS - 1] == NULL ||
103 RTA_PAYLOAD(tb[TCA_DEF_PARMS - 1]) < sizeof(*parm))
104 return -EINVAL;
105
106 parm = RTA_DATA(tb[TCA_DEF_PARMS - 1]);
107 defdata = RTA_DATA(tb[TCA_DEF_DATA - 1]);
108 if (defdata == NULL)
109 return -EINVAL;
110
111 datalen = RTA_PAYLOAD(tb[TCA_DEF_DATA - 1]);
112 if (datalen <= 0)
113 return -EINVAL;
114
115 pc = tcf_hash_check(parm->index, a, bind, &simp_hash_info);
116 if (!pc) {
117 pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
118 &simp_idx_gen, &simp_hash_info);
119 if (unlikely(!pc))
120 return -ENOMEM;
121
122 d = to_defact(pc);
123 ret = alloc_defdata(d, datalen, defdata);
124 if (ret < 0) {
125 kfree(pc);
126 return ret;
127 }
128 ret = ACT_P_CREATED;
129 } else {
130 d = to_defact(pc);
131 if (!ovr) {
132 tcf_simp_release(d, bind);
133 return -EEXIST;
134 }
135 realloc_defdata(d, datalen, defdata);
136 }
137
138 spin_lock_bh(&d->tcf_lock);
139 d->tcf_action = parm->action;
140 spin_unlock_bh(&d->tcf_lock);
141
142 if (ret == ACT_P_CREATED)
143 tcf_hash_insert(pc, &simp_hash_info);
144 return ret;
145}
146
147static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
148{
149 struct tcf_defact *d = a->priv;
150
151 if (d)
152 return tcf_simp_release(d, bind);
153 return 0;
154}
155
156static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
157 int bind, int ref)
158{
159 unsigned char *b = skb->tail;
160 struct tcf_defact *d = a->priv;
161 struct tc_defact opt;
162 struct tcf_t t;
163
164 opt.index = d->tcf_index;
165 opt.refcnt = d->tcf_refcnt - ref;
166 opt.bindcnt = d->tcf_bindcnt - bind;
167 opt.action = d->tcf_action;
168 RTA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
169 RTA_PUT(skb, TCA_DEF_DATA, d->tcfd_datalen, d->tcfd_defdata);
170 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
171 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
172 t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
173 RTA_PUT(skb, TCA_DEF_TM, sizeof(t), &t);
174 return skb->len;
175
176rtattr_failure:
177 skb_trim(skb, b - skb->data);
178 return -1;
62} 179}
63 180
64static struct tc_action_ops act_simp_ops = { 181static struct tc_action_ops act_simp_ops = {
65 .kind = "simple", 182 .kind = "simple",
66 .type = TCA_ACT_SIMP, 183 .hinfo = &simp_hash_info,
67 .capab = TCA_CAP_NONE, 184 .type = TCA_ACT_SIMP,
68 .owner = THIS_MODULE, 185 .capab = TCA_CAP_NONE,
69 .act = tcf_simp, 186 .owner = THIS_MODULE,
70 tca_use_default_ops 187 .act = tcf_simp,
188 .dump = tcf_simp_dump,
189 .cleanup = tcf_simp_cleanup,
190 .init = tcf_simp_init,
191 .walk = tcf_generic_walker,
71}; 192};
72 193
73MODULE_AUTHOR("Jamal Hadi Salim(2005)"); 194MODULE_AUTHOR("Jamal Hadi Salim(2005)");