diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-08-22 02:54:55 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-09-22 17:55:10 -0400 |
commit | e9ce1cd3cf6cf35b21d0ce990f2e738f35907386 (patch) | |
tree | 22a3ee7b78ae7cbf00520c66dcc389d87740069c /net/sched/act_api.c | |
parent | 2e4ca75b31b6851dcc036c2cdebf3ecfe279a653 (diff) |
[PKT_SCHED]: Kill pkt_act.h inlining.
This was simply making templates of functions and mostly causing a lot
of code duplication in the classifier action modules.
We solve this more cleanly by having a common "struct tcf_common" that
hash worker functions contained once in act_api.c can work with.
Callers work with real action objects that have the common struct
plus their module specific struct members. You go from a common
object to the higher level one using a "to_foo()" macro which makes
use of container_of() to do the dirty work.
This also kills off act_generic.h which was only used by act_simple.c
and keeping it around was more work than the it's value.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/act_api.c')
-rw-r--r-- | net/sched/act_api.c | 246 |
1 files changed, 227 insertions, 19 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 6990747d6d5a..835070e9169c 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -33,16 +33,230 @@ | |||
33 | #include <net/sch_generic.h> | 33 | #include <net/sch_generic.h> |
34 | #include <net/act_api.h> | 34 | #include <net/act_api.h> |
35 | 35 | ||
36 | #if 0 /* control */ | 36 | void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) |
37 | #define DPRINTK(format, args...) printk(KERN_DEBUG format, ##args) | 37 | { |
38 | #else | 38 | unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); |
39 | #define DPRINTK(format, args...) | 39 | struct tcf_common **p1p; |
40 | |||
41 | for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) { | ||
42 | if (*p1p == p) { | ||
43 | write_lock_bh(hinfo->lock); | ||
44 | *p1p = p->tcfc_next; | ||
45 | write_unlock_bh(hinfo->lock); | ||
46 | #ifdef CONFIG_NET_ESTIMATOR | ||
47 | gen_kill_estimator(&p->tcfc_bstats, | ||
48 | &p->tcfc_rate_est); | ||
40 | #endif | 49 | #endif |
41 | #if 0 /* data */ | 50 | kfree(p); |
42 | #define D2PRINTK(format, args...) printk(KERN_DEBUG format, ##args) | 51 | return; |
43 | #else | 52 | } |
44 | #define D2PRINTK(format, args...) | 53 | } |
54 | BUG_TRAP(0); | ||
55 | } | ||
56 | EXPORT_SYMBOL(tcf_hash_destroy); | ||
57 | |||
58 | int tcf_hash_release(struct tcf_common *p, int bind, | ||
59 | struct tcf_hashinfo *hinfo) | ||
60 | { | ||
61 | int ret = 0; | ||
62 | |||
63 | if (p) { | ||
64 | if (bind) | ||
65 | p->tcfc_bindcnt--; | ||
66 | |||
67 | p->tcfc_refcnt--; | ||
68 | if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { | ||
69 | tcf_hash_destroy(p, hinfo); | ||
70 | ret = 1; | ||
71 | } | ||
72 | } | ||
73 | return ret; | ||
74 | } | ||
75 | EXPORT_SYMBOL(tcf_hash_release); | ||
76 | |||
77 | static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, | ||
78 | struct tc_action *a, struct tcf_hashinfo *hinfo) | ||
79 | { | ||
80 | struct tcf_common *p; | ||
81 | int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; | ||
82 | struct rtattr *r ; | ||
83 | |||
84 | read_lock(hinfo->lock); | ||
85 | |||
86 | s_i = cb->args[0]; | ||
87 | |||
88 | for (i = 0; i < (hinfo->hmask + 1); i++) { | ||
89 | p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; | ||
90 | |||
91 | for (; p; p = p->tcfc_next) { | ||
92 | index++; | ||
93 | if (index < s_i) | ||
94 | continue; | ||
95 | a->priv = p; | ||
96 | a->order = n_i; | ||
97 | r = (struct rtattr*) skb->tail; | ||
98 | RTA_PUT(skb, a->order, 0, NULL); | ||
99 | err = tcf_action_dump_1(skb, a, 0, 0); | ||
100 | if (err < 0) { | ||
101 | index--; | ||
102 | skb_trim(skb, (u8*)r - skb->data); | ||
103 | goto done; | ||
104 | } | ||
105 | r->rta_len = skb->tail - (u8*)r; | ||
106 | n_i++; | ||
107 | if (n_i >= TCA_ACT_MAX_PRIO) | ||
108 | goto done; | ||
109 | } | ||
110 | } | ||
111 | done: | ||
112 | read_unlock(hinfo->lock); | ||
113 | if (n_i) | ||
114 | cb->args[0] += n_i; | ||
115 | return n_i; | ||
116 | |||
117 | rtattr_failure: | ||
118 | skb_trim(skb, (u8*)r - skb->data); | ||
119 | goto done; | ||
120 | } | ||
121 | |||
122 | static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, | ||
123 | struct tcf_hashinfo *hinfo) | ||
124 | { | ||
125 | struct tcf_common *p, *s_p; | ||
126 | struct rtattr *r ; | ||
127 | int i= 0, n_i = 0; | ||
128 | |||
129 | r = (struct rtattr*) skb->tail; | ||
130 | RTA_PUT(skb, a->order, 0, NULL); | ||
131 | RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind); | ||
132 | for (i = 0; i < (hinfo->hmask + 1); i++) { | ||
133 | p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; | ||
134 | |||
135 | while (p != NULL) { | ||
136 | s_p = p->tcfc_next; | ||
137 | if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) | ||
138 | module_put(a->ops->owner); | ||
139 | n_i++; | ||
140 | p = s_p; | ||
141 | } | ||
142 | } | ||
143 | RTA_PUT(skb, TCA_FCNT, 4, &n_i); | ||
144 | r->rta_len = skb->tail - (u8*)r; | ||
145 | |||
146 | return n_i; | ||
147 | rtattr_failure: | ||
148 | skb_trim(skb, (u8*)r - skb->data); | ||
149 | return -EINVAL; | ||
150 | } | ||
151 | |||
152 | int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, | ||
153 | int type, struct tc_action *a) | ||
154 | { | ||
155 | struct tcf_hashinfo *hinfo = a->ops->hinfo; | ||
156 | |||
157 | if (type == RTM_DELACTION) { | ||
158 | return tcf_del_walker(skb, a, hinfo); | ||
159 | } else if (type == RTM_GETACTION) { | ||
160 | return tcf_dump_walker(skb, cb, a, hinfo); | ||
161 | } else { | ||
162 | printk("tcf_generic_walker: unknown action %d\n", type); | ||
163 | return -EINVAL; | ||
164 | } | ||
165 | } | ||
166 | EXPORT_SYMBOL(tcf_generic_walker); | ||
167 | |||
168 | struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo) | ||
169 | { | ||
170 | struct tcf_common *p; | ||
171 | |||
172 | read_lock(hinfo->lock); | ||
173 | for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; | ||
174 | p = p->tcfc_next) { | ||
175 | if (p->tcfc_index == index) | ||
176 | break; | ||
177 | } | ||
178 | read_unlock(hinfo->lock); | ||
179 | |||
180 | return p; | ||
181 | } | ||
182 | EXPORT_SYMBOL(tcf_hash_lookup); | ||
183 | |||
184 | u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo) | ||
185 | { | ||
186 | u32 val = *idx_gen; | ||
187 | |||
188 | do { | ||
189 | if (++val == 0) | ||
190 | val = 1; | ||
191 | } while (tcf_hash_lookup(val, hinfo)); | ||
192 | |||
193 | return (*idx_gen = val); | ||
194 | } | ||
195 | EXPORT_SYMBOL(tcf_hash_new_index); | ||
196 | |||
197 | int tcf_hash_search(struct tc_action *a, u32 index) | ||
198 | { | ||
199 | struct tcf_hashinfo *hinfo = a->ops->hinfo; | ||
200 | struct tcf_common *p = tcf_hash_lookup(index, hinfo); | ||
201 | |||
202 | if (p) { | ||
203 | a->priv = p; | ||
204 | return 1; | ||
205 | } | ||
206 | return 0; | ||
207 | } | ||
208 | EXPORT_SYMBOL(tcf_hash_search); | ||
209 | |||
210 | struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind, | ||
211 | struct tcf_hashinfo *hinfo) | ||
212 | { | ||
213 | struct tcf_common *p = NULL; | ||
214 | if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { | ||
215 | if (bind) { | ||
216 | p->tcfc_bindcnt++; | ||
217 | p->tcfc_refcnt++; | ||
218 | } | ||
219 | a->priv = p; | ||
220 | } | ||
221 | return p; | ||
222 | } | ||
223 | EXPORT_SYMBOL(tcf_hash_check); | ||
224 | |||
225 | struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo) | ||
226 | { | ||
227 | struct tcf_common *p = kzalloc(size, GFP_KERNEL); | ||
228 | |||
229 | if (unlikely(!p)) | ||
230 | return p; | ||
231 | p->tcfc_refcnt = 1; | ||
232 | if (bind) | ||
233 | p->tcfc_bindcnt = 1; | ||
234 | |||
235 | spin_lock_init(&p->tcfc_lock); | ||
236 | p->tcfc_stats_lock = &p->tcfc_lock; | ||
237 | p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo); | ||
238 | p->tcfc_tm.install = jiffies; | ||
239 | p->tcfc_tm.lastuse = jiffies; | ||
240 | #ifdef CONFIG_NET_ESTIMATOR | ||
241 | if (est) | ||
242 | gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, | ||
243 | p->tcfc_stats_lock, est); | ||
45 | #endif | 244 | #endif |
245 | a->priv = (void *) p; | ||
246 | return p; | ||
247 | } | ||
248 | EXPORT_SYMBOL(tcf_hash_create); | ||
249 | |||
250 | void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo) | ||
251 | { | ||
252 | unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); | ||
253 | |||
254 | write_lock_bh(hinfo->lock); | ||
255 | p->tcfc_next = hinfo->htab[h]; | ||
256 | hinfo->htab[h] = p; | ||
257 | write_unlock_bh(hinfo->lock); | ||
258 | } | ||
259 | EXPORT_SYMBOL(tcf_hash_insert); | ||
46 | 260 | ||
47 | static struct tc_action_ops *act_base = NULL; | 261 | static struct tc_action_ops *act_base = NULL; |
48 | static DEFINE_RWLOCK(act_mod_lock); | 262 | static DEFINE_RWLOCK(act_mod_lock); |
@@ -155,9 +369,6 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action *act, | |||
155 | 369 | ||
156 | if (skb->tc_verd & TC_NCLS) { | 370 | if (skb->tc_verd & TC_NCLS) { |
157 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); | 371 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); |
158 | D2PRINTK("(%p)tcf_action_exec: cleared TC_NCLS in %s out %s\n", | ||
159 | skb, skb->input_dev ? skb->input_dev->name : "xxx", | ||
160 | skb->dev->name); | ||
161 | ret = TC_ACT_OK; | 372 | ret = TC_ACT_OK; |
162 | goto exec_done; | 373 | goto exec_done; |
163 | } | 374 | } |
@@ -187,8 +398,6 @@ void tcf_action_destroy(struct tc_action *act, int bind) | |||
187 | 398 | ||
188 | for (a = act; a; a = act) { | 399 | for (a = act; a; a = act) { |
189 | if (a->ops && a->ops->cleanup) { | 400 | if (a->ops && a->ops->cleanup) { |
190 | DPRINTK("tcf_action_destroy destroying %p next %p\n", | ||
191 | a, a->next); | ||
192 | if (a->ops->cleanup(a, bind) == ACT_P_DELETED) | 401 | if (a->ops->cleanup(a, bind) == ACT_P_DELETED) |
193 | module_put(a->ops->owner); | 402 | module_put(a->ops->owner); |
194 | act = act->next; | 403 | act = act->next; |
@@ -331,7 +540,6 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, | |||
331 | if (*err != ACT_P_CREATED) | 540 | if (*err != ACT_P_CREATED) |
332 | module_put(a_o->owner); | 541 | module_put(a_o->owner); |
333 | a->ops = a_o; | 542 | a->ops = a_o; |
334 | DPRINTK("tcf_action_init_1: successfull %s\n", act_name); | ||
335 | 543 | ||
336 | *err = 0; | 544 | *err = 0; |
337 | return a; | 545 | return a; |
@@ -392,12 +600,12 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, | |||
392 | if (compat_mode) { | 600 | if (compat_mode) { |
393 | if (a->type == TCA_OLD_COMPAT) | 601 | if (a->type == TCA_OLD_COMPAT) |
394 | err = gnet_stats_start_copy_compat(skb, 0, | 602 | err = gnet_stats_start_copy_compat(skb, 0, |
395 | TCA_STATS, TCA_XSTATS, h->stats_lock, &d); | 603 | TCA_STATS, TCA_XSTATS, h->tcf_stats_lock, &d); |
396 | else | 604 | else |
397 | return 0; | 605 | return 0; |
398 | } else | 606 | } else |
399 | err = gnet_stats_start_copy(skb, TCA_ACT_STATS, | 607 | err = gnet_stats_start_copy(skb, TCA_ACT_STATS, |
400 | h->stats_lock, &d); | 608 | h->tcf_stats_lock, &d); |
401 | 609 | ||
402 | if (err < 0) | 610 | if (err < 0) |
403 | goto errout; | 611 | goto errout; |
@@ -406,11 +614,11 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, | |||
406 | if (a->ops->get_stats(skb, a) < 0) | 614 | if (a->ops->get_stats(skb, a) < 0) |
407 | goto errout; | 615 | goto errout; |
408 | 616 | ||
409 | if (gnet_stats_copy_basic(&d, &h->bstats) < 0 || | 617 | if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || |
410 | #ifdef CONFIG_NET_ESTIMATOR | 618 | #ifdef CONFIG_NET_ESTIMATOR |
411 | gnet_stats_copy_rate_est(&d, &h->rate_est) < 0 || | 619 | gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 || |
412 | #endif | 620 | #endif |
413 | gnet_stats_copy_queue(&d, &h->qstats) < 0) | 621 | gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) |
414 | goto errout; | 622 | goto errout; |
415 | 623 | ||
416 | if (gnet_stats_finish_copy(&d) < 0) | 624 | if (gnet_stats_finish_copy(&d) < 0) |