aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/sched/act_api.c139
-rw-r--r--net/sched/act_police.c10
2 files changed, 72 insertions, 77 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index d97419f35e7e..e4a5f2607ffa 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -29,46 +29,43 @@
29 29
30static void free_tcf(struct rcu_head *head) 30static void free_tcf(struct rcu_head *head)
31{ 31{
32 struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu); 32 struct tc_action *p = container_of(head, struct tc_action, tcfa_rcu);
33 33
34 free_percpu(p->cpu_bstats); 34 free_percpu(p->cpu_bstats);
35 free_percpu(p->cpu_qstats); 35 free_percpu(p->cpu_qstats);
36 kfree(p); 36 kfree(p);
37} 37}
38 38
39static void tcf_hash_destroy(struct tcf_hashinfo *hinfo, struct tc_action *a) 39static void tcf_hash_destroy(struct tcf_hashinfo *hinfo, struct tc_action *p)
40{ 40{
41 struct tcf_common *p = (struct tcf_common *)a;
42
43 spin_lock_bh(&hinfo->lock); 41 spin_lock_bh(&hinfo->lock);
44 hlist_del(&p->tcfc_head); 42 hlist_del(&p->tcfa_head);
45 spin_unlock_bh(&hinfo->lock); 43 spin_unlock_bh(&hinfo->lock);
46 gen_kill_estimator(&p->tcfc_bstats, 44 gen_kill_estimator(&p->tcfa_bstats,
47 &p->tcfc_rate_est); 45 &p->tcfa_rate_est);
48 /* 46 /*
49 * gen_estimator est_timer() might access p->tcfc_lock 47 * gen_estimator est_timer() might access p->tcfa_lock
50 * or bstats, wait a RCU grace period before freeing p 48 * or bstats, wait a RCU grace period before freeing p
51 */ 49 */
52 call_rcu(&p->tcfc_rcu, free_tcf); 50 call_rcu(&p->tcfa_rcu, free_tcf);
53} 51}
54 52
55int __tcf_hash_release(struct tc_action *a, bool bind, bool strict) 53int __tcf_hash_release(struct tc_action *p, bool bind, bool strict)
56{ 54{
57 struct tcf_common *p = (struct tcf_common *)a;
58 int ret = 0; 55 int ret = 0;
59 56
60 if (p) { 57 if (p) {
61 if (bind) 58 if (bind)
62 p->tcfc_bindcnt--; 59 p->tcfa_bindcnt--;
63 else if (strict && p->tcfc_bindcnt > 0) 60 else if (strict && p->tcfa_bindcnt > 0)
64 return -EPERM; 61 return -EPERM;
65 62
66 p->tcfc_refcnt--; 63 p->tcfa_refcnt--;
67 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { 64 if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
68 if (a->ops->cleanup) 65 if (p->ops->cleanup)
69 a->ops->cleanup(a, bind); 66 p->ops->cleanup(p, bind);
70 list_del(&a->list); 67 list_del(&p->list);
71 tcf_hash_destroy(a->hinfo, a); 68 tcf_hash_destroy(p->hinfo, p);
72 ret = ACT_P_DELETED; 69 ret = ACT_P_DELETED;
73 } 70 }
74 } 71 }
@@ -89,11 +86,11 @@ static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
89 86
90 for (i = 0; i < (hinfo->hmask + 1); i++) { 87 for (i = 0; i < (hinfo->hmask + 1); i++) {
91 struct hlist_head *head; 88 struct hlist_head *head;
92 struct tcf_common *p; 89 struct tc_action *p;
93 90
94 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; 91 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
95 92
96 hlist_for_each_entry_rcu(p, head, tcfc_head) { 93 hlist_for_each_entry_rcu(p, head, tcfa_head) {
97 index++; 94 index++;
98 if (index < s_i) 95 if (index < s_i)
99 continue; 96 continue;
@@ -101,7 +98,7 @@ static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
101 nest = nla_nest_start(skb, n_i); 98 nest = nla_nest_start(skb, n_i);
102 if (nest == NULL) 99 if (nest == NULL)
103 goto nla_put_failure; 100 goto nla_put_failure;
104 err = tcf_action_dump_1(skb, (struct tc_action *)p, 0, 0); 101 err = tcf_action_dump_1(skb, p, 0, 0);
105 if (err < 0) { 102 if (err < 0) {
106 index--; 103 index--;
107 nlmsg_trim(skb, nest); 104 nlmsg_trim(skb, nest);
@@ -139,13 +136,13 @@ static int tcf_del_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
139 for (i = 0; i < (hinfo->hmask + 1); i++) { 136 for (i = 0; i < (hinfo->hmask + 1); i++) {
140 struct hlist_head *head; 137 struct hlist_head *head;
141 struct hlist_node *n; 138 struct hlist_node *n;
142 struct tcf_common *p; 139 struct tc_action *p;
143 140
144 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; 141 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
145 hlist_for_each_entry_safe(p, n, head, tcfc_head) { 142 hlist_for_each_entry_safe(p, n, head, tcfa_head) {
146 ret = __tcf_hash_release((struct tc_action *)p, false, true); 143 ret = __tcf_hash_release(p, false, true);
147 if (ret == ACT_P_DELETED) { 144 if (ret == ACT_P_DELETED) {
148 module_put(p->tcfc_act.ops->owner); 145 module_put(p->ops->owner);
149 n_i++; 146 n_i++;
150 } else if (ret < 0) 147 } else if (ret < 0)
151 goto nla_put_failure; 148 goto nla_put_failure;
@@ -178,15 +175,15 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
178} 175}
179EXPORT_SYMBOL(tcf_generic_walker); 176EXPORT_SYMBOL(tcf_generic_walker);
180 177
181static struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo) 178static struct tc_action *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
182{ 179{
183 struct tcf_common *p = NULL; 180 struct tc_action *p = NULL;
184 struct hlist_head *head; 181 struct hlist_head *head;
185 182
186 spin_lock_bh(&hinfo->lock); 183 spin_lock_bh(&hinfo->lock);
187 head = &hinfo->htab[tcf_hash(index, hinfo->hmask)]; 184 head = &hinfo->htab[tcf_hash(index, hinfo->hmask)];
188 hlist_for_each_entry_rcu(p, head, tcfc_head) 185 hlist_for_each_entry_rcu(p, head, tcfa_head)
189 if (p->tcfc_index == index) 186 if (p->tcfa_index == index)
190 break; 187 break;
191 spin_unlock_bh(&hinfo->lock); 188 spin_unlock_bh(&hinfo->lock);
192 189
@@ -211,10 +208,10 @@ EXPORT_SYMBOL(tcf_hash_new_index);
211int tcf_hash_search(struct tc_action_net *tn, struct tc_action **a, u32 index) 208int tcf_hash_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
212{ 209{
213 struct tcf_hashinfo *hinfo = tn->hinfo; 210 struct tcf_hashinfo *hinfo = tn->hinfo;
214 struct tcf_common *p = tcf_hash_lookup(index, hinfo); 211 struct tc_action *p = tcf_hash_lookup(index, hinfo);
215 212
216 if (p) { 213 if (p) {
217 *a = &p->tcfc_act; 214 *a = p;
218 return 1; 215 return 1;
219 } 216 }
220 return 0; 217 return 0;
@@ -225,12 +222,13 @@ bool tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
225 int bind) 222 int bind)
226{ 223{
227 struct tcf_hashinfo *hinfo = tn->hinfo; 224 struct tcf_hashinfo *hinfo = tn->hinfo;
228 struct tcf_common *p = NULL; 225 struct tc_action *p = NULL;
226
229 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { 227 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
230 if (bind) 228 if (bind)
231 p->tcfc_bindcnt++; 229 p->tcfa_bindcnt++;
232 p->tcfc_refcnt++; 230 p->tcfa_refcnt++;
233 *a = &p->tcfc_act; 231 *a = p;
234 return true; 232 return true;
235 } 233 }
236 return false; 234 return false;
@@ -239,11 +237,10 @@ EXPORT_SYMBOL(tcf_hash_check);
239 237
240void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est) 238void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
241{ 239{
242 struct tcf_common *pc = (struct tcf_common *)a;
243 if (est) 240 if (est)
244 gen_kill_estimator(&pc->tcfc_bstats, 241 gen_kill_estimator(&a->tcfa_bstats,
245 &pc->tcfc_rate_est); 242 &a->tcfa_rate_est);
246 call_rcu(&pc->tcfc_rcu, free_tcf); 243 call_rcu(&a->tcfa_rcu, free_tcf);
247} 244}
248EXPORT_SYMBOL(tcf_hash_cleanup); 245EXPORT_SYMBOL(tcf_hash_cleanup);
249 246
@@ -251,15 +248,15 @@ int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
251 struct tc_action **a, const struct tc_action_ops *ops, 248 struct tc_action **a, const struct tc_action_ops *ops,
252 int bind, bool cpustats) 249 int bind, bool cpustats)
253{ 250{
254 struct tcf_common *p = kzalloc(ops->size, GFP_KERNEL); 251 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
255 struct tcf_hashinfo *hinfo = tn->hinfo; 252 struct tcf_hashinfo *hinfo = tn->hinfo;
256 int err = -ENOMEM; 253 int err = -ENOMEM;
257 254
258 if (unlikely(!p)) 255 if (unlikely(!p))
259 return -ENOMEM; 256 return -ENOMEM;
260 p->tcfc_refcnt = 1; 257 p->tcfa_refcnt = 1;
261 if (bind) 258 if (bind)
262 p->tcfc_bindcnt = 1; 259 p->tcfa_bindcnt = 1;
263 260
264 if (cpustats) { 261 if (cpustats) {
265 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 262 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
@@ -275,38 +272,37 @@ err2:
275 goto err1; 272 goto err1;
276 } 273 }
277 } 274 }
278 spin_lock_init(&p->tcfc_lock); 275 spin_lock_init(&p->tcfa_lock);
279 INIT_HLIST_NODE(&p->tcfc_head); 276 INIT_HLIST_NODE(&p->tcfa_head);
280 p->tcfc_index = index ? index : tcf_hash_new_index(tn); 277 p->tcfa_index = index ? index : tcf_hash_new_index(tn);
281 p->tcfc_tm.install = jiffies; 278 p->tcfa_tm.install = jiffies;
282 p->tcfc_tm.lastuse = jiffies; 279 p->tcfa_tm.lastuse = jiffies;
283 p->tcfc_tm.firstuse = 0; 280 p->tcfa_tm.firstuse = 0;
284 if (est) { 281 if (est) {
285 err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats, 282 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
286 &p->tcfc_rate_est, 283 &p->tcfa_rate_est,
287 &p->tcfc_lock, NULL, est); 284 &p->tcfa_lock, NULL, est);
288 if (err) { 285 if (err) {
289 free_percpu(p->cpu_qstats); 286 free_percpu(p->cpu_qstats);
290 goto err2; 287 goto err2;
291 } 288 }
292 } 289 }
293 290
294 p->tcfc_act.hinfo = hinfo; 291 p->hinfo = hinfo;
295 p->tcfc_act.ops = ops; 292 p->ops = ops;
296 INIT_LIST_HEAD(&p->tcfc_act.list); 293 INIT_LIST_HEAD(&p->list);
297 *a = &p->tcfc_act; 294 *a = p;
298 return 0; 295 return 0;
299} 296}
300EXPORT_SYMBOL(tcf_hash_create); 297EXPORT_SYMBOL(tcf_hash_create);
301 298
302void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a) 299void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a)
303{ 300{
304 struct tcf_common *p = (struct tcf_common *)a;
305 struct tcf_hashinfo *hinfo = tn->hinfo; 301 struct tcf_hashinfo *hinfo = tn->hinfo;
306 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 302 unsigned int h = tcf_hash(a->tcfa_index, hinfo->hmask);
307 303
308 spin_lock_bh(&hinfo->lock); 304 spin_lock_bh(&hinfo->lock);
309 hlist_add_head(&p->tcfc_head, &hinfo->htab[h]); 305 hlist_add_head(&a->tcfa_head, &hinfo->htab[h]);
310 spin_unlock_bh(&hinfo->lock); 306 spin_unlock_bh(&hinfo->lock);
311} 307}
312EXPORT_SYMBOL(tcf_hash_insert); 308EXPORT_SYMBOL(tcf_hash_insert);
@@ -317,13 +313,13 @@ void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
317 int i; 313 int i;
318 314
319 for (i = 0; i < hinfo->hmask + 1; i++) { 315 for (i = 0; i < hinfo->hmask + 1; i++) {
320 struct tcf_common *p; 316 struct tc_action *p;
321 struct hlist_node *n; 317 struct hlist_node *n;
322 318
323 hlist_for_each_entry_safe(p, n, &hinfo->htab[i], tcfc_head) { 319 hlist_for_each_entry_safe(p, n, &hinfo->htab[i], tcfa_head) {
324 int ret; 320 int ret;
325 321
326 ret = __tcf_hash_release((struct tc_action *)p, false, true); 322 ret = __tcf_hash_release(p, false, true);
327 if (ret == ACT_P_DELETED) 323 if (ret == ACT_P_DELETED)
328 module_put(ops->owner); 324 module_put(ops->owner);
329 else if (ret < 0) 325 else if (ret < 0)
@@ -625,12 +621,11 @@ err:
625 return err; 621 return err;
626} 622}
627 623
628int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, 624int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
629 int compat_mode) 625 int compat_mode)
630{ 626{
631 int err = 0; 627 int err = 0;
632 struct gnet_dump d; 628 struct gnet_dump d;
633 struct tcf_common *p = (struct tcf_common *)a;
634 629
635 if (p == NULL) 630 if (p == NULL)
636 goto errout; 631 goto errout;
@@ -639,27 +634,27 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
639 * to add additional backward compatibility statistic TLVs. 634 * to add additional backward compatibility statistic TLVs.
640 */ 635 */
641 if (compat_mode) { 636 if (compat_mode) {
642 if (a->type == TCA_OLD_COMPAT) 637 if (p->type == TCA_OLD_COMPAT)
643 err = gnet_stats_start_copy_compat(skb, 0, 638 err = gnet_stats_start_copy_compat(skb, 0,
644 TCA_STATS, 639 TCA_STATS,
645 TCA_XSTATS, 640 TCA_XSTATS,
646 &p->tcfc_lock, &d, 641 &p->tcfa_lock, &d,
647 TCA_PAD); 642 TCA_PAD);
648 else 643 else
649 return 0; 644 return 0;
650 } else 645 } else
651 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 646 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
652 &p->tcfc_lock, &d, TCA_ACT_PAD); 647 &p->tcfa_lock, &d, TCA_ACT_PAD);
653 648
654 if (err < 0) 649 if (err < 0)
655 goto errout; 650 goto errout;
656 651
657 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfc_bstats) < 0 || 652 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
658 gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, 653 gnet_stats_copy_rate_est(&d, &p->tcfa_bstats,
659 &p->tcfc_rate_est) < 0 || 654 &p->tcfa_rate_est) < 0 ||
660 gnet_stats_copy_queue(&d, p->cpu_qstats, 655 gnet_stats_copy_queue(&d, p->cpu_qstats,
661 &p->tcfc_qstats, 656 &p->tcfa_qstats,
662 p->tcfc_qstats.qlen) < 0) 657 p->tcfa_qstats.qlen) < 0)
663 goto errout; 658 goto errout;
664 659
665 if (gnet_stats_finish_copy(&d) < 0) 660 if (gnet_stats_finish_copy(&d) < 0)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 123794af55c3..b3c7e975fc9e 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -23,7 +23,7 @@
23#include <net/netlink.h> 23#include <net/netlink.h>
24 24
25struct tcf_police { 25struct tcf_police {
26 struct tcf_common common; 26 struct tc_action common;
27 int tcfp_result; 27 int tcfp_result;
28 u32 tcfp_ewma_rate; 28 u32 tcfp_ewma_rate;
29 s64 tcfp_burst; 29 s64 tcfp_burst;
@@ -73,11 +73,11 @@ static int tcf_act_police_walker(struct net *net, struct sk_buff *skb,
73 73
74 for (i = 0; i < (POL_TAB_MASK + 1); i++) { 74 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
75 struct hlist_head *head; 75 struct hlist_head *head;
76 struct tcf_common *p; 76 struct tc_action *p;
77 77
78 head = &hinfo->htab[tcf_hash(i, POL_TAB_MASK)]; 78 head = &hinfo->htab[tcf_hash(i, POL_TAB_MASK)];
79 79
80 hlist_for_each_entry_rcu(p, head, tcfc_head) { 80 hlist_for_each_entry_rcu(p, head, tcfa_head) {
81 index++; 81 index++;
82 if (index < s_i) 82 if (index < s_i)
83 continue; 83 continue;
@@ -85,9 +85,9 @@ static int tcf_act_police_walker(struct net *net, struct sk_buff *skb,
85 if (nest == NULL) 85 if (nest == NULL)
86 goto nla_put_failure; 86 goto nla_put_failure;
87 if (type == RTM_DELACTION) 87 if (type == RTM_DELACTION)
88 err = tcf_action_dump_1(skb, (struct tc_action *)p, 0, 1); 88 err = tcf_action_dump_1(skb, p, 0, 1);
89 else 89 else
90 err = tcf_action_dump_1(skb, (struct tc_action *)p, 0, 0); 90 err = tcf_action_dump_1(skb, p, 0, 0);
91 if (err < 0) { 91 if (err < 0) {
92 index--; 92 index--;
93 nla_nest_cancel(skb, nest); 93 nla_nest_cancel(skb, nest);