diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-01-19 14:26:56 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-20 02:31:12 -0500 |
commit | cc7ec456f82da7f89a5b376e613b3ac4311b3e9a (patch) | |
tree | 534729db08c10f40c090261cdc191dd2303dfc5c /net/sched | |
parent | 7180a03118cac7256fb04f929fe34d0aeee92c40 (diff) |
net_sched: cleanups
Cleanup net/sched code to current CodingStyle and practices.
Reduce inline abuse
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
41 files changed, 842 insertions, 801 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 23b25f89e7e0..15873e14cb54 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -78,7 +78,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, | |||
78 | struct tc_action *a, struct tcf_hashinfo *hinfo) | 78 | struct tc_action *a, struct tcf_hashinfo *hinfo) |
79 | { | 79 | { |
80 | struct tcf_common *p; | 80 | struct tcf_common *p; |
81 | int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; | 81 | int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; |
82 | struct nlattr *nest; | 82 | struct nlattr *nest; |
83 | 83 | ||
84 | read_lock_bh(hinfo->lock); | 84 | read_lock_bh(hinfo->lock); |
@@ -126,7 +126,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, | |||
126 | { | 126 | { |
127 | struct tcf_common *p, *s_p; | 127 | struct tcf_common *p, *s_p; |
128 | struct nlattr *nest; | 128 | struct nlattr *nest; |
129 | int i= 0, n_i = 0; | 129 | int i = 0, n_i = 0; |
130 | 130 | ||
131 | nest = nla_nest_start(skb, a->order); | 131 | nest = nla_nest_start(skb, a->order); |
132 | if (nest == NULL) | 132 | if (nest == NULL) |
@@ -138,7 +138,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, | |||
138 | while (p != NULL) { | 138 | while (p != NULL) { |
139 | s_p = p->tcfc_next; | 139 | s_p = p->tcfc_next; |
140 | if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) | 140 | if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) |
141 | module_put(a->ops->owner); | 141 | module_put(a->ops->owner); |
142 | n_i++; | 142 | n_i++; |
143 | p = s_p; | 143 | p = s_p; |
144 | } | 144 | } |
@@ -447,7 +447,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | |||
447 | nest = nla_nest_start(skb, TCA_OPTIONS); | 447 | nest = nla_nest_start(skb, TCA_OPTIONS); |
448 | if (nest == NULL) | 448 | if (nest == NULL) |
449 | goto nla_put_failure; | 449 | goto nla_put_failure; |
450 | if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { | 450 | err = tcf_action_dump_old(skb, a, bind, ref); |
451 | if (err > 0) { | ||
451 | nla_nest_end(skb, nest); | 452 | nla_nest_end(skb, nest); |
452 | return err; | 453 | return err; |
453 | } | 454 | } |
@@ -491,7 +492,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, | |||
491 | struct tc_action *a; | 492 | struct tc_action *a; |
492 | struct tc_action_ops *a_o; | 493 | struct tc_action_ops *a_o; |
493 | char act_name[IFNAMSIZ]; | 494 | char act_name[IFNAMSIZ]; |
494 | struct nlattr *tb[TCA_ACT_MAX+1]; | 495 | struct nlattr *tb[TCA_ACT_MAX + 1]; |
495 | struct nlattr *kind; | 496 | struct nlattr *kind; |
496 | int err; | 497 | int err; |
497 | 498 | ||
@@ -549,9 +550,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, | |||
549 | goto err_free; | 550 | goto err_free; |
550 | 551 | ||
551 | /* module count goes up only when brand new policy is created | 552 | /* module count goes up only when brand new policy is created |
552 | if it exists and is only bound to in a_o->init() then | 553 | * if it exists and is only bound to in a_o->init() then |
553 | ACT_P_CREATED is not returned (a zero is). | 554 | * ACT_P_CREATED is not returned (a zero is). |
554 | */ | 555 | */ |
555 | if (err != ACT_P_CREATED) | 556 | if (err != ACT_P_CREATED) |
556 | module_put(a_o->owner); | 557 | module_put(a_o->owner); |
557 | a->ops = a_o; | 558 | a->ops = a_o; |
@@ -569,7 +570,7 @@ err_out: | |||
569 | struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, | 570 | struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, |
570 | char *name, int ovr, int bind) | 571 | char *name, int ovr, int bind) |
571 | { | 572 | { |
572 | struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; | 573 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; |
573 | struct tc_action *head = NULL, *act, *act_prev = NULL; | 574 | struct tc_action *head = NULL, *act, *act_prev = NULL; |
574 | int err; | 575 | int err; |
575 | int i; | 576 | int i; |
@@ -697,7 +698,7 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n, | |||
697 | static struct tc_action * | 698 | static struct tc_action * |
698 | tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) | 699 | tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) |
699 | { | 700 | { |
700 | struct nlattr *tb[TCA_ACT_MAX+1]; | 701 | struct nlattr *tb[TCA_ACT_MAX + 1]; |
701 | struct tc_action *a; | 702 | struct tc_action *a; |
702 | int index; | 703 | int index; |
703 | int err; | 704 | int err; |
@@ -770,7 +771,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, | |||
770 | struct tcamsg *t; | 771 | struct tcamsg *t; |
771 | struct netlink_callback dcb; | 772 | struct netlink_callback dcb; |
772 | struct nlattr *nest; | 773 | struct nlattr *nest; |
773 | struct nlattr *tb[TCA_ACT_MAX+1]; | 774 | struct nlattr *tb[TCA_ACT_MAX + 1]; |
774 | struct nlattr *kind; | 775 | struct nlattr *kind; |
775 | struct tc_action *a = create_a(0); | 776 | struct tc_action *a = create_a(0); |
776 | int err = -ENOMEM; | 777 | int err = -ENOMEM; |
@@ -821,7 +822,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, | |||
821 | nlh->nlmsg_flags |= NLM_F_ROOT; | 822 | nlh->nlmsg_flags |= NLM_F_ROOT; |
822 | module_put(a->ops->owner); | 823 | module_put(a->ops->owner); |
823 | kfree(a); | 824 | kfree(a); |
824 | err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 825 | err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
826 | n->nlmsg_flags & NLM_F_ECHO); | ||
825 | if (err > 0) | 827 | if (err > 0) |
826 | return 0; | 828 | return 0; |
827 | 829 | ||
@@ -842,14 +844,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
842 | u32 pid, int event) | 844 | u32 pid, int event) |
843 | { | 845 | { |
844 | int i, ret; | 846 | int i, ret; |
845 | struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; | 847 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; |
846 | struct tc_action *head = NULL, *act, *act_prev = NULL; | 848 | struct tc_action *head = NULL, *act, *act_prev = NULL; |
847 | 849 | ||
848 | ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); | 850 | ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); |
849 | if (ret < 0) | 851 | if (ret < 0) |
850 | return ret; | 852 | return ret; |
851 | 853 | ||
852 | if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { | 854 | if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { |
853 | if (tb[1] != NULL) | 855 | if (tb[1] != NULL) |
854 | return tca_action_flush(net, tb[1], n, pid); | 856 | return tca_action_flush(net, tb[1], n, pid); |
855 | else | 857 | else |
@@ -892,7 +894,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
892 | /* now do the delete */ | 894 | /* now do the delete */ |
893 | tcf_action_destroy(head, 0); | 895 | tcf_action_destroy(head, 0); |
894 | ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, | 896 | ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
895 | n->nlmsg_flags&NLM_F_ECHO); | 897 | n->nlmsg_flags & NLM_F_ECHO); |
896 | if (ret > 0) | 898 | if (ret > 0) |
897 | return 0; | 899 | return 0; |
898 | return ret; | 900 | return ret; |
@@ -936,7 +938,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a, | |||
936 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; | 938 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
937 | NETLINK_CB(skb).dst_group = RTNLGRP_TC; | 939 | NETLINK_CB(skb).dst_group = RTNLGRP_TC; |
938 | 940 | ||
939 | err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); | 941 | err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO); |
940 | if (err > 0) | 942 | if (err > 0) |
941 | err = 0; | 943 | err = 0; |
942 | return err; | 944 | return err; |
@@ -967,7 +969,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
967 | 969 | ||
968 | /* dump then free all the actions after update; inserted policy | 970 | /* dump then free all the actions after update; inserted policy |
969 | * stays intact | 971 | * stays intact |
970 | * */ | 972 | */ |
971 | ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); | 973 | ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); |
972 | for (a = act; a; a = act) { | 974 | for (a = act; a; a = act) { |
973 | act = a->next; | 975 | act = a->next; |
@@ -993,8 +995,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
993 | return -EINVAL; | 995 | return -EINVAL; |
994 | } | 996 | } |
995 | 997 | ||
996 | /* n->nlmsg_flags&NLM_F_CREATE | 998 | /* n->nlmsg_flags & NLM_F_CREATE */ |
997 | * */ | ||
998 | switch (n->nlmsg_type) { | 999 | switch (n->nlmsg_type) { |
999 | case RTM_NEWACTION: | 1000 | case RTM_NEWACTION: |
1000 | /* we are going to assume all other flags | 1001 | /* we are going to assume all other flags |
@@ -1003,7 +1004,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1003 | * but since we want avoid ambiguity (eg when flags | 1004 | * but since we want avoid ambiguity (eg when flags |
1004 | * is zero) then just set this | 1005 | * is zero) then just set this |
1005 | */ | 1006 | */ |
1006 | if (n->nlmsg_flags&NLM_F_REPLACE) | 1007 | if (n->nlmsg_flags & NLM_F_REPLACE) |
1007 | ovr = 1; | 1008 | ovr = 1; |
1008 | replay: | 1009 | replay: |
1009 | ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); | 1010 | ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); |
@@ -1028,7 +1029,7 @@ replay: | |||
1028 | static struct nlattr * | 1029 | static struct nlattr * |
1029 | find_dump_kind(const struct nlmsghdr *n) | 1030 | find_dump_kind(const struct nlmsghdr *n) |
1030 | { | 1031 | { |
1031 | struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; | 1032 | struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; |
1032 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; | 1033 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; |
1033 | struct nlattr *nla[TCAA_MAX + 1]; | 1034 | struct nlattr *nla[TCAA_MAX + 1]; |
1034 | struct nlattr *kind; | 1035 | struct nlattr *kind; |
@@ -1071,9 +1072,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | |||
1071 | } | 1072 | } |
1072 | 1073 | ||
1073 | a_o = tc_lookup_action(kind); | 1074 | a_o = tc_lookup_action(kind); |
1074 | if (a_o == NULL) { | 1075 | if (a_o == NULL) |
1075 | return 0; | 1076 | return 0; |
1076 | } | ||
1077 | 1077 | ||
1078 | memset(&a, 0, sizeof(struct tc_action)); | 1078 | memset(&a, 0, sizeof(struct tc_action)); |
1079 | a.ops = a_o; | 1079 | a.ops = a_o; |
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 83ddfc07e45d..6cdf9abe475f 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
@@ -63,7 +63,7 @@ static int tcf_csum_init(struct nlattr *nla, struct nlattr *est, | |||
63 | if (nla == NULL) | 63 | if (nla == NULL) |
64 | return -EINVAL; | 64 | return -EINVAL; |
65 | 65 | ||
66 | err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy); | 66 | err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy); |
67 | if (err < 0) | 67 | if (err < 0) |
68 | return err; | 68 | return err; |
69 | 69 | ||
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index c2ed90a4c0b4..2b4ab4b05ce8 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -50,7 +50,7 @@ static int gact_determ(struct tcf_gact *gact) | |||
50 | } | 50 | } |
51 | 51 | ||
52 | typedef int (*g_rand)(struct tcf_gact *gact); | 52 | typedef int (*g_rand)(struct tcf_gact *gact); |
53 | static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ }; | 53 | static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ }; |
54 | #endif /* CONFIG_GACT_PROB */ | 54 | #endif /* CONFIG_GACT_PROB */ |
55 | 55 | ||
56 | static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = { | 56 | static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = { |
@@ -89,7 +89,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, | |||
89 | pc = tcf_hash_create(parm->index, est, a, sizeof(*gact), | 89 | pc = tcf_hash_create(parm->index, est, a, sizeof(*gact), |
90 | bind, &gact_idx_gen, &gact_hash_info); | 90 | bind, &gact_idx_gen, &gact_hash_info); |
91 | if (IS_ERR(pc)) | 91 | if (IS_ERR(pc)) |
92 | return PTR_ERR(pc); | 92 | return PTR_ERR(pc); |
93 | ret = ACT_P_CREATED; | 93 | ret = ACT_P_CREATED; |
94 | } else { | 94 | } else { |
95 | if (!ovr) { | 95 | if (!ovr) { |
@@ -205,9 +205,9 @@ MODULE_LICENSE("GPL"); | |||
205 | static int __init gact_init_module(void) | 205 | static int __init gact_init_module(void) |
206 | { | 206 | { |
207 | #ifdef CONFIG_GACT_PROB | 207 | #ifdef CONFIG_GACT_PROB |
208 | printk(KERN_INFO "GACT probability on\n"); | 208 | pr_info("GACT probability on\n"); |
209 | #else | 209 | #else |
210 | printk(KERN_INFO "GACT probability NOT on\n"); | 210 | pr_info("GACT probability NOT on\n"); |
211 | #endif | 211 | #endif |
212 | return tcf_register_action(&act_gact_ops); | 212 | return tcf_register_action(&act_gact_ops); |
213 | } | 213 | } |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index c2a7c20e81c1..9fc211a1b20e 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -138,7 +138,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, | |||
138 | pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind, | 138 | pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind, |
139 | &ipt_idx_gen, &ipt_hash_info); | 139 | &ipt_idx_gen, &ipt_hash_info); |
140 | if (IS_ERR(pc)) | 140 | if (IS_ERR(pc)) |
141 | return PTR_ERR(pc); | 141 | return PTR_ERR(pc); |
142 | ret = ACT_P_CREATED; | 142 | ret = ACT_P_CREATED; |
143 | } else { | 143 | } else { |
144 | if (!ovr) { | 144 | if (!ovr) { |
@@ -162,7 +162,8 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, | |||
162 | if (unlikely(!t)) | 162 | if (unlikely(!t)) |
163 | goto err2; | 163 | goto err2; |
164 | 164 | ||
165 | if ((err = ipt_init_target(t, tname, hook)) < 0) | 165 | err = ipt_init_target(t, tname, hook); |
166 | if (err < 0) | ||
166 | goto err3; | 167 | goto err3; |
167 | 168 | ||
168 | spin_lock_bh(&ipt->tcf_lock); | 169 | spin_lock_bh(&ipt->tcf_lock); |
@@ -212,8 +213,9 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, | |||
212 | bstats_update(&ipt->tcf_bstats, skb); | 213 | bstats_update(&ipt->tcf_bstats, skb); |
213 | 214 | ||
214 | /* yes, we have to worry about both in and out dev | 215 | /* yes, we have to worry about both in and out dev |
215 | worry later - danger - this API seems to have changed | 216 | * worry later - danger - this API seems to have changed |
216 | from earlier kernels */ | 217 | * from earlier kernels |
218 | */ | ||
217 | par.in = skb->dev; | 219 | par.in = skb->dev; |
218 | par.out = NULL; | 220 | par.out = NULL; |
219 | par.hooknum = ipt->tcfi_hook; | 221 | par.hooknum = ipt->tcfi_hook; |
@@ -253,9 +255,9 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int | |||
253 | struct tc_cnt c; | 255 | struct tc_cnt c; |
254 | 256 | ||
255 | /* for simple targets kernel size == user size | 257 | /* for simple targets kernel size == user size |
256 | ** user name = target name | 258 | * user name = target name |
257 | ** for foolproof you need to not assume this | 259 | * for foolproof you need to not assume this |
258 | */ | 260 | */ |
259 | 261 | ||
260 | t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); | 262 | t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); |
261 | if (unlikely(!t)) | 263 | if (unlikely(!t)) |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index d765067e99db..961386e2f2c0 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -41,13 +41,13 @@ static struct tcf_hashinfo mirred_hash_info = { | |||
41 | .lock = &mirred_lock, | 41 | .lock = &mirred_lock, |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static inline int tcf_mirred_release(struct tcf_mirred *m, int bind) | 44 | static int tcf_mirred_release(struct tcf_mirred *m, int bind) |
45 | { | 45 | { |
46 | if (m) { | 46 | if (m) { |
47 | if (bind) | 47 | if (bind) |
48 | m->tcf_bindcnt--; | 48 | m->tcf_bindcnt--; |
49 | m->tcf_refcnt--; | 49 | m->tcf_refcnt--; |
50 | if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { | 50 | if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) { |
51 | list_del(&m->tcfm_list); | 51 | list_del(&m->tcfm_list); |
52 | if (m->tcfm_dev) | 52 | if (m->tcfm_dev) |
53 | dev_put(m->tcfm_dev); | 53 | dev_put(m->tcfm_dev); |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 178a4bd7b7cb..762b027650a9 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -69,7 +69,7 @@ static int tcf_nat_init(struct nlattr *nla, struct nlattr *est, | |||
69 | pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, | 69 | pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, |
70 | &nat_idx_gen, &nat_hash_info); | 70 | &nat_idx_gen, &nat_hash_info); |
71 | if (IS_ERR(pc)) | 71 | if (IS_ERR(pc)) |
72 | return PTR_ERR(pc); | 72 | return PTR_ERR(pc); |
73 | p = to_tcf_nat(pc); | 73 | p = to_tcf_nat(pc); |
74 | ret = ACT_P_CREATED; | 74 | ret = ACT_P_CREATED; |
75 | } else { | 75 | } else { |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 445bef716f77..50c7c06c019d 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -70,7 +70,7 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, | |||
70 | pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, | 70 | pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, |
71 | &pedit_idx_gen, &pedit_hash_info); | 71 | &pedit_idx_gen, &pedit_hash_info); |
72 | if (IS_ERR(pc)) | 72 | if (IS_ERR(pc)) |
73 | return PTR_ERR(pc); | 73 | return PTR_ERR(pc); |
74 | p = to_pedit(pc); | 74 | p = to_pedit(pc); |
75 | keys = kmalloc(ksize, GFP_KERNEL); | 75 | keys = kmalloc(ksize, GFP_KERNEL); |
76 | if (keys == NULL) { | 76 | if (keys == NULL) { |
@@ -127,11 +127,9 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
127 | int i, munged = 0; | 127 | int i, munged = 0; |
128 | unsigned int off; | 128 | unsigned int off; |
129 | 129 | ||
130 | if (skb_cloned(skb)) { | 130 | if (skb_cloned(skb) && |
131 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 131 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) |
132 | return p->tcf_action; | 132 | return p->tcf_action; |
133 | } | ||
134 | } | ||
135 | 133 | ||
136 | off = skb_network_offset(skb); | 134 | off = skb_network_offset(skb); |
137 | 135 | ||
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index e2f08b1e2e58..8a1630774fd6 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -22,8 +22,8 @@ | |||
22 | #include <net/act_api.h> | 22 | #include <net/act_api.h> |
23 | #include <net/netlink.h> | 23 | #include <net/netlink.h> |
24 | 24 | ||
25 | #define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L) | 25 | #define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L) |
26 | #define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L) | 26 | #define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L) |
27 | 27 | ||
28 | #define POL_TAB_MASK 15 | 28 | #define POL_TAB_MASK 15 |
29 | static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1]; | 29 | static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1]; |
@@ -37,8 +37,7 @@ static struct tcf_hashinfo police_hash_info = { | |||
37 | }; | 37 | }; |
38 | 38 | ||
39 | /* old policer structure from before tc actions */ | 39 | /* old policer structure from before tc actions */ |
40 | struct tc_police_compat | 40 | struct tc_police_compat { |
41 | { | ||
42 | u32 index; | 41 | u32 index; |
43 | int action; | 42 | int action; |
44 | u32 limit; | 43 | u32 limit; |
@@ -139,7 +138,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { | |||
139 | static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est, | 138 | static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est, |
140 | struct tc_action *a, int ovr, int bind) | 139 | struct tc_action *a, int ovr, int bind) |
141 | { | 140 | { |
142 | unsigned h; | 141 | unsigned int h; |
143 | int ret = 0, err; | 142 | int ret = 0, err; |
144 | struct nlattr *tb[TCA_POLICE_MAX + 1]; | 143 | struct nlattr *tb[TCA_POLICE_MAX + 1]; |
145 | struct tc_police *parm; | 144 | struct tc_police *parm; |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 7287cff7af3e..a34a22de60b3 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
47 | /* print policy string followed by _ then packet count | 47 | /* print policy string followed by _ then packet count |
48 | * Example if this was the 3rd packet and the string was "hello" | 48 | * Example if this was the 3rd packet and the string was "hello" |
49 | * then it would look like "hello_3" (without quotes) | 49 | * then it would look like "hello_3" (without quotes) |
50 | **/ | 50 | */ |
51 | pr_info("simple: %s_%d\n", | 51 | pr_info("simple: %s_%d\n", |
52 | (char *)d->tcfd_defdata, d->tcf_bstats.packets); | 52 | (char *)d->tcfd_defdata, d->tcf_bstats.packets); |
53 | spin_unlock(&d->tcf_lock); | 53 | spin_unlock(&d->tcf_lock); |
@@ -125,7 +125,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, | |||
125 | pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, | 125 | pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, |
126 | &simp_idx_gen, &simp_hash_info); | 126 | &simp_idx_gen, &simp_hash_info); |
127 | if (IS_ERR(pc)) | 127 | if (IS_ERR(pc)) |
128 | return PTR_ERR(pc); | 128 | return PTR_ERR(pc); |
129 | 129 | ||
130 | d = to_defact(pc); | 130 | d = to_defact(pc); |
131 | ret = alloc_defdata(d, defdata); | 131 | ret = alloc_defdata(d, defdata); |
@@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, | |||
149 | return ret; | 149 | return ret; |
150 | } | 150 | } |
151 | 151 | ||
152 | static inline int tcf_simp_cleanup(struct tc_action *a, int bind) | 152 | static int tcf_simp_cleanup(struct tc_action *a, int bind) |
153 | { | 153 | { |
154 | struct tcf_defact *d = a->priv; | 154 | struct tcf_defact *d = a->priv; |
155 | 155 | ||
@@ -158,8 +158,8 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind) | |||
158 | return 0; | 158 | return 0; |
159 | } | 159 | } |
160 | 160 | ||
161 | static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, | 161 | static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, |
162 | int bind, int ref) | 162 | int bind, int ref) |
163 | { | 163 | { |
164 | unsigned char *b = skb_tail_pointer(skb); | 164 | unsigned char *b = skb_tail_pointer(skb); |
165 | struct tcf_defact *d = a->priv; | 165 | struct tcf_defact *d = a->priv; |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 836f5fee9e58..5f6f0c7c3905 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -113,7 +113,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
113 | pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, | 113 | pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, |
114 | &skbedit_idx_gen, &skbedit_hash_info); | 114 | &skbedit_idx_gen, &skbedit_hash_info); |
115 | if (IS_ERR(pc)) | 115 | if (IS_ERR(pc)) |
116 | return PTR_ERR(pc); | 116 | return PTR_ERR(pc); |
117 | 117 | ||
118 | d = to_skbedit(pc); | 118 | d = to_skbedit(pc); |
119 | ret = ACT_P_CREATED; | 119 | ret = ACT_P_CREATED; |
@@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
144 | return ret; | 144 | return ret; |
145 | } | 145 | } |
146 | 146 | ||
147 | static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind) | 147 | static int tcf_skbedit_cleanup(struct tc_action *a, int bind) |
148 | { | 148 | { |
149 | struct tcf_skbedit *d = a->priv; | 149 | struct tcf_skbedit *d = a->priv; |
150 | 150 | ||
@@ -153,8 +153,8 @@ static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind) | |||
153 | return 0; | 153 | return 0; |
154 | } | 154 | } |
155 | 155 | ||
156 | static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, | 156 | static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, |
157 | int bind, int ref) | 157 | int bind, int ref) |
158 | { | 158 | { |
159 | unsigned char *b = skb_tail_pointer(skb); | 159 | unsigned char *b = skb_tail_pointer(skb); |
160 | struct tcf_skbedit *d = a->priv; | 160 | struct tcf_skbedit *d = a->priv; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 5fd0c28ef79a..bb2c523f8158 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -85,7 +85,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) | |||
85 | int rc = -ENOENT; | 85 | int rc = -ENOENT; |
86 | 86 | ||
87 | write_lock(&cls_mod_lock); | 87 | write_lock(&cls_mod_lock); |
88 | for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) | 88 | for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next) |
89 | if (t == ops) | 89 | if (t == ops) |
90 | break; | 90 | break; |
91 | 91 | ||
@@ -111,7 +111,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp) | |||
111 | u32 first = TC_H_MAKE(0xC0000000U, 0U); | 111 | u32 first = TC_H_MAKE(0xC0000000U, 0U); |
112 | 112 | ||
113 | if (tp) | 113 | if (tp) |
114 | first = tp->prio-1; | 114 | first = tp->prio - 1; |
115 | 115 | ||
116 | return first; | 116 | return first; |
117 | } | 117 | } |
@@ -149,7 +149,8 @@ replay: | |||
149 | 149 | ||
150 | if (prio == 0) { | 150 | if (prio == 0) { |
151 | /* If no priority is given, user wants we allocated it. */ | 151 | /* If no priority is given, user wants we allocated it. */ |
152 | if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) | 152 | if (n->nlmsg_type != RTM_NEWTFILTER || |
153 | !(n->nlmsg_flags & NLM_F_CREATE)) | ||
153 | return -ENOENT; | 154 | return -ENOENT; |
154 | prio = TC_H_MAKE(0x80000000U, 0U); | 155 | prio = TC_H_MAKE(0x80000000U, 0U); |
155 | } | 156 | } |
@@ -176,7 +177,8 @@ replay: | |||
176 | } | 177 | } |
177 | 178 | ||
178 | /* Is it classful? */ | 179 | /* Is it classful? */ |
179 | if ((cops = q->ops->cl_ops) == NULL) | 180 | cops = q->ops->cl_ops; |
181 | if (!cops) | ||
180 | return -EINVAL; | 182 | return -EINVAL; |
181 | 183 | ||
182 | if (cops->tcf_chain == NULL) | 184 | if (cops->tcf_chain == NULL) |
@@ -196,10 +198,11 @@ replay: | |||
196 | goto errout; | 198 | goto errout; |
197 | 199 | ||
198 | /* Check the chain for existence of proto-tcf with this priority */ | 200 | /* Check the chain for existence of proto-tcf with this priority */ |
199 | for (back = chain; (tp=*back) != NULL; back = &tp->next) { | 201 | for (back = chain; (tp = *back) != NULL; back = &tp->next) { |
200 | if (tp->prio >= prio) { | 202 | if (tp->prio >= prio) { |
201 | if (tp->prio == prio) { | 203 | if (tp->prio == prio) { |
202 | if (!nprio || (tp->protocol != protocol && protocol)) | 204 | if (!nprio || |
205 | (tp->protocol != protocol && protocol)) | ||
203 | goto errout; | 206 | goto errout; |
204 | } else | 207 | } else |
205 | tp = NULL; | 208 | tp = NULL; |
@@ -216,7 +219,8 @@ replay: | |||
216 | goto errout; | 219 | goto errout; |
217 | 220 | ||
218 | err = -ENOENT; | 221 | err = -ENOENT; |
219 | if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) | 222 | if (n->nlmsg_type != RTM_NEWTFILTER || |
223 | !(n->nlmsg_flags & NLM_F_CREATE)) | ||
220 | goto errout; | 224 | goto errout; |
221 | 225 | ||
222 | 226 | ||
@@ -420,7 +424,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
420 | 424 | ||
421 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 425 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
422 | return skb->len; | 426 | return skb->len; |
423 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) | 427 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
428 | if (!dev) | ||
424 | return skb->len; | 429 | return skb->len; |
425 | 430 | ||
426 | if (!tcm->tcm_parent) | 431 | if (!tcm->tcm_parent) |
@@ -429,7 +434,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
429 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); | 434 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
430 | if (!q) | 435 | if (!q) |
431 | goto out; | 436 | goto out; |
432 | if ((cops = q->ops->cl_ops) == NULL) | 437 | cops = q->ops->cl_ops; |
438 | if (!cops) | ||
433 | goto errout; | 439 | goto errout; |
434 | if (cops->tcf_chain == NULL) | 440 | if (cops->tcf_chain == NULL) |
435 | goto errout; | 441 | goto errout; |
@@ -444,8 +450,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
444 | 450 | ||
445 | s_t = cb->args[0]; | 451 | s_t = cb->args[0]; |
446 | 452 | ||
447 | for (tp=*chain, t=0; tp; tp = tp->next, t++) { | 453 | for (tp = *chain, t = 0; tp; tp = tp->next, t++) { |
448 | if (t < s_t) continue; | 454 | if (t < s_t) |
455 | continue; | ||
449 | if (TC_H_MAJ(tcm->tcm_info) && | 456 | if (TC_H_MAJ(tcm->tcm_info) && |
450 | TC_H_MAJ(tcm->tcm_info) != tp->prio) | 457 | TC_H_MAJ(tcm->tcm_info) != tp->prio) |
451 | continue; | 458 | continue; |
@@ -468,10 +475,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
468 | arg.skb = skb; | 475 | arg.skb = skb; |
469 | arg.cb = cb; | 476 | arg.cb = cb; |
470 | arg.w.stop = 0; | 477 | arg.w.stop = 0; |
471 | arg.w.skip = cb->args[1]-1; | 478 | arg.w.skip = cb->args[1] - 1; |
472 | arg.w.count = 0; | 479 | arg.w.count = 0; |
473 | tp->ops->walk(tp, &arg.w); | 480 | tp->ops->walk(tp, &arg.w); |
474 | cb->args[1] = arg.w.count+1; | 481 | cb->args[1] = arg.w.count + 1; |
475 | if (arg.w.stop) | 482 | if (arg.w.stop) |
476 | break; | 483 | break; |
477 | } | 484 | } |
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index f23d9155b1ef..8be8872dd571 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c | |||
@@ -21,14 +21,12 @@ | |||
21 | #include <net/act_api.h> | 21 | #include <net/act_api.h> |
22 | #include <net/pkt_cls.h> | 22 | #include <net/pkt_cls.h> |
23 | 23 | ||
24 | struct basic_head | 24 | struct basic_head { |
25 | { | ||
26 | u32 hgenerator; | 25 | u32 hgenerator; |
27 | struct list_head flist; | 26 | struct list_head flist; |
28 | }; | 27 | }; |
29 | 28 | ||
30 | struct basic_filter | 29 | struct basic_filter { |
31 | { | ||
32 | u32 handle; | 30 | u32 handle; |
33 | struct tcf_exts exts; | 31 | struct tcf_exts exts; |
34 | struct tcf_ematch_tree ematches; | 32 | struct tcf_ematch_tree ematches; |
@@ -92,8 +90,7 @@ static int basic_init(struct tcf_proto *tp) | |||
92 | return 0; | 90 | return 0; |
93 | } | 91 | } |
94 | 92 | ||
95 | static inline void basic_delete_filter(struct tcf_proto *tp, | 93 | static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f) |
96 | struct basic_filter *f) | ||
97 | { | 94 | { |
98 | tcf_unbind_filter(tp, &f->res); | 95 | tcf_unbind_filter(tp, &f->res); |
99 | tcf_exts_destroy(tp, &f->exts); | 96 | tcf_exts_destroy(tp, &f->exts); |
@@ -135,9 +132,9 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = { | |||
135 | [TCA_BASIC_EMATCHES] = { .type = NLA_NESTED }, | 132 | [TCA_BASIC_EMATCHES] = { .type = NLA_NESTED }, |
136 | }; | 133 | }; |
137 | 134 | ||
138 | static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f, | 135 | static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f, |
139 | unsigned long base, struct nlattr **tb, | 136 | unsigned long base, struct nlattr **tb, |
140 | struct nlattr *est) | 137 | struct nlattr *est) |
141 | { | 138 | { |
142 | int err = -EINVAL; | 139 | int err = -EINVAL; |
143 | struct tcf_exts e; | 140 | struct tcf_exts e; |
@@ -203,7 +200,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle, | |||
203 | } while (--i > 0 && basic_get(tp, head->hgenerator)); | 200 | } while (--i > 0 && basic_get(tp, head->hgenerator)); |
204 | 201 | ||
205 | if (i <= 0) { | 202 | if (i <= 0) { |
206 | printk(KERN_ERR "Insufficient number of handles\n"); | 203 | pr_err("Insufficient number of handles\n"); |
207 | goto errout; | 204 | goto errout; |
208 | } | 205 | } |
209 | 206 | ||
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index d49c40fb7e09..32a335194ca5 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -56,7 +56,8 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | |||
56 | { | 56 | { |
57 | struct cgroup_cls_state *cs; | 57 | struct cgroup_cls_state *cs; |
58 | 58 | ||
59 | if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL))) | 59 | cs = kzalloc(sizeof(*cs), GFP_KERNEL); |
60 | if (!cs) | ||
60 | return ERR_PTR(-ENOMEM); | 61 | return ERR_PTR(-ENOMEM); |
61 | 62 | ||
62 | if (cgrp->parent) | 63 | if (cgrp->parent) |
@@ -94,8 +95,7 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
94 | return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); | 95 | return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); |
95 | } | 96 | } |
96 | 97 | ||
97 | struct cls_cgroup_head | 98 | struct cls_cgroup_head { |
98 | { | ||
99 | u32 handle; | 99 | u32 handle; |
100 | struct tcf_exts exts; | 100 | struct tcf_exts exts; |
101 | struct tcf_ematch_tree ematches; | 101 | struct tcf_ematch_tree ematches; |
@@ -166,7 +166,7 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, | |||
166 | u32 handle, struct nlattr **tca, | 166 | u32 handle, struct nlattr **tca, |
167 | unsigned long *arg) | 167 | unsigned long *arg) |
168 | { | 168 | { |
169 | struct nlattr *tb[TCA_CGROUP_MAX+1]; | 169 | struct nlattr *tb[TCA_CGROUP_MAX + 1]; |
170 | struct cls_cgroup_head *head = tp->root; | 170 | struct cls_cgroup_head *head = tp->root; |
171 | struct tcf_ematch_tree t; | 171 | struct tcf_ematch_tree t; |
172 | struct tcf_exts e; | 172 | struct tcf_exts e; |
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 5b271a18bc3a..5eec16e516b9 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c | |||
@@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb) | |||
121 | if (!pskb_network_may_pull(skb, sizeof(*iph))) | 121 | if (!pskb_network_may_pull(skb, sizeof(*iph))) |
122 | break; | 122 | break; |
123 | iph = ip_hdr(skb); | 123 | iph = ip_hdr(skb); |
124 | if (iph->frag_off & htons(IP_MF|IP_OFFSET)) | 124 | if (iph->frag_off & htons(IP_MF | IP_OFFSET)) |
125 | break; | 125 | break; |
126 | poff = proto_ports_offset(iph->protocol); | 126 | poff = proto_ports_offset(iph->protocol); |
127 | if (poff >= 0 && | 127 | if (poff >= 0 && |
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb) | |||
163 | if (!pskb_network_may_pull(skb, sizeof(*iph))) | 163 | if (!pskb_network_may_pull(skb, sizeof(*iph))) |
164 | break; | 164 | break; |
165 | iph = ip_hdr(skb); | 165 | iph = ip_hdr(skb); |
166 | if (iph->frag_off & htons(IP_MF|IP_OFFSET)) | 166 | if (iph->frag_off & htons(IP_MF | IP_OFFSET)) |
167 | break; | 167 | break; |
168 | poff = proto_ports_offset(iph->protocol); | 168 | poff = proto_ports_offset(iph->protocol); |
169 | if (poff >= 0 && | 169 | if (poff >= 0 && |
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 93b0a7b6f9b4..26e7bc4ffb79 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c | |||
@@ -31,14 +31,12 @@ | |||
31 | 31 | ||
32 | #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *)) | 32 | #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *)) |
33 | 33 | ||
34 | struct fw_head | 34 | struct fw_head { |
35 | { | ||
36 | struct fw_filter *ht[HTSIZE]; | 35 | struct fw_filter *ht[HTSIZE]; |
37 | u32 mask; | 36 | u32 mask; |
38 | }; | 37 | }; |
39 | 38 | ||
40 | struct fw_filter | 39 | struct fw_filter { |
41 | { | ||
42 | struct fw_filter *next; | 40 | struct fw_filter *next; |
43 | u32 id; | 41 | u32 id; |
44 | struct tcf_result res; | 42 | struct tcf_result res; |
@@ -53,7 +51,7 @@ static const struct tcf_ext_map fw_ext_map = { | |||
53 | .police = TCA_FW_POLICE | 51 | .police = TCA_FW_POLICE |
54 | }; | 52 | }; |
55 | 53 | ||
56 | static __inline__ int fw_hash(u32 handle) | 54 | static inline int fw_hash(u32 handle) |
57 | { | 55 | { |
58 | if (HTSIZE == 4096) | 56 | if (HTSIZE == 4096) |
59 | return ((handle >> 24) & 0xFFF) ^ | 57 | return ((handle >> 24) & 0xFFF) ^ |
@@ -82,14 +80,14 @@ static __inline__ int fw_hash(u32 handle) | |||
82 | static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, | 80 | static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, |
83 | struct tcf_result *res) | 81 | struct tcf_result *res) |
84 | { | 82 | { |
85 | struct fw_head *head = (struct fw_head*)tp->root; | 83 | struct fw_head *head = (struct fw_head *)tp->root; |
86 | struct fw_filter *f; | 84 | struct fw_filter *f; |
87 | int r; | 85 | int r; |
88 | u32 id = skb->mark; | 86 | u32 id = skb->mark; |
89 | 87 | ||
90 | if (head != NULL) { | 88 | if (head != NULL) { |
91 | id &= head->mask; | 89 | id &= head->mask; |
92 | for (f=head->ht[fw_hash(id)]; f; f=f->next) { | 90 | for (f = head->ht[fw_hash(id)]; f; f = f->next) { |
93 | if (f->id == id) { | 91 | if (f->id == id) { |
94 | *res = f->res; | 92 | *res = f->res; |
95 | #ifdef CONFIG_NET_CLS_IND | 93 | #ifdef CONFIG_NET_CLS_IND |
@@ -105,7 +103,8 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
105 | } | 103 | } |
106 | } else { | 104 | } else { |
107 | /* old method */ | 105 | /* old method */ |
108 | if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) { | 106 | if (id && (TC_H_MAJ(id) == 0 || |
107 | !(TC_H_MAJ(id ^ tp->q->handle)))) { | ||
109 | res->classid = id; | 108 | res->classid = id; |
110 | res->class = 0; | 109 | res->class = 0; |
111 | return 0; | 110 | return 0; |
@@ -117,13 +116,13 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
117 | 116 | ||
118 | static unsigned long fw_get(struct tcf_proto *tp, u32 handle) | 117 | static unsigned long fw_get(struct tcf_proto *tp, u32 handle) |
119 | { | 118 | { |
120 | struct fw_head *head = (struct fw_head*)tp->root; | 119 | struct fw_head *head = (struct fw_head *)tp->root; |
121 | struct fw_filter *f; | 120 | struct fw_filter *f; |
122 | 121 | ||
123 | if (head == NULL) | 122 | if (head == NULL) |
124 | return 0; | 123 | return 0; |
125 | 124 | ||
126 | for (f=head->ht[fw_hash(handle)]; f; f=f->next) { | 125 | for (f = head->ht[fw_hash(handle)]; f; f = f->next) { |
127 | if (f->id == handle) | 126 | if (f->id == handle) |
128 | return (unsigned long)f; | 127 | return (unsigned long)f; |
129 | } | 128 | } |
@@ -139,8 +138,7 @@ static int fw_init(struct tcf_proto *tp) | |||
139 | return 0; | 138 | return 0; |
140 | } | 139 | } |
141 | 140 | ||
142 | static inline void | 141 | static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f) |
143 | fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f) | ||
144 | { | 142 | { |
145 | tcf_unbind_filter(tp, &f->res); | 143 | tcf_unbind_filter(tp, &f->res); |
146 | tcf_exts_destroy(tp, &f->exts); | 144 | tcf_exts_destroy(tp, &f->exts); |
@@ -156,8 +154,8 @@ static void fw_destroy(struct tcf_proto *tp) | |||
156 | if (head == NULL) | 154 | if (head == NULL) |
157 | return; | 155 | return; |
158 | 156 | ||
159 | for (h=0; h<HTSIZE; h++) { | 157 | for (h = 0; h < HTSIZE; h++) { |
160 | while ((f=head->ht[h]) != NULL) { | 158 | while ((f = head->ht[h]) != NULL) { |
161 | head->ht[h] = f->next; | 159 | head->ht[h] = f->next; |
162 | fw_delete_filter(tp, f); | 160 | fw_delete_filter(tp, f); |
163 | } | 161 | } |
@@ -167,14 +165,14 @@ static void fw_destroy(struct tcf_proto *tp) | |||
167 | 165 | ||
168 | static int fw_delete(struct tcf_proto *tp, unsigned long arg) | 166 | static int fw_delete(struct tcf_proto *tp, unsigned long arg) |
169 | { | 167 | { |
170 | struct fw_head *head = (struct fw_head*)tp->root; | 168 | struct fw_head *head = (struct fw_head *)tp->root; |
171 | struct fw_filter *f = (struct fw_filter*)arg; | 169 | struct fw_filter *f = (struct fw_filter *)arg; |
172 | struct fw_filter **fp; | 170 | struct fw_filter **fp; |
173 | 171 | ||
174 | if (head == NULL || f == NULL) | 172 | if (head == NULL || f == NULL) |
175 | goto out; | 173 | goto out; |
176 | 174 | ||
177 | for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) { | 175 | for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) { |
178 | if (*fp == f) { | 176 | if (*fp == f) { |
179 | tcf_tree_lock(tp); | 177 | tcf_tree_lock(tp); |
180 | *fp = f->next; | 178 | *fp = f->next; |
@@ -240,7 +238,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base, | |||
240 | struct nlattr **tca, | 238 | struct nlattr **tca, |
241 | unsigned long *arg) | 239 | unsigned long *arg) |
242 | { | 240 | { |
243 | struct fw_head *head = (struct fw_head*)tp->root; | 241 | struct fw_head *head = (struct fw_head *)tp->root; |
244 | struct fw_filter *f = (struct fw_filter *) *arg; | 242 | struct fw_filter *f = (struct fw_filter *) *arg; |
245 | struct nlattr *opt = tca[TCA_OPTIONS]; | 243 | struct nlattr *opt = tca[TCA_OPTIONS]; |
246 | struct nlattr *tb[TCA_FW_MAX + 1]; | 244 | struct nlattr *tb[TCA_FW_MAX + 1]; |
@@ -302,7 +300,7 @@ errout: | |||
302 | 300 | ||
303 | static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) | 301 | static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) |
304 | { | 302 | { |
305 | struct fw_head *head = (struct fw_head*)tp->root; | 303 | struct fw_head *head = (struct fw_head *)tp->root; |
306 | int h; | 304 | int h; |
307 | 305 | ||
308 | if (head == NULL) | 306 | if (head == NULL) |
@@ -332,7 +330,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh, | |||
332 | struct sk_buff *skb, struct tcmsg *t) | 330 | struct sk_buff *skb, struct tcmsg *t) |
333 | { | 331 | { |
334 | struct fw_head *head = (struct fw_head *)tp->root; | 332 | struct fw_head *head = (struct fw_head *)tp->root; |
335 | struct fw_filter *f = (struct fw_filter*)fh; | 333 | struct fw_filter *f = (struct fw_filter *)fh; |
336 | unsigned char *b = skb_tail_pointer(skb); | 334 | unsigned char *b = skb_tail_pointer(skb); |
337 | struct nlattr *nest; | 335 | struct nlattr *nest; |
338 | 336 | ||
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 694dcd85dec8..d580cdfca093 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c | |||
@@ -23,34 +23,30 @@ | |||
23 | #include <net/pkt_cls.h> | 23 | #include <net/pkt_cls.h> |
24 | 24 | ||
25 | /* | 25 | /* |
26 | 1. For now we assume that route tags < 256. | 26 | * 1. For now we assume that route tags < 256. |
27 | It allows to use direct table lookups, instead of hash tables. | 27 | * It allows to use direct table lookups, instead of hash tables. |
28 | 2. For now we assume that "from TAG" and "fromdev DEV" statements | 28 | * 2. For now we assume that "from TAG" and "fromdev DEV" statements |
29 | are mutually exclusive. | 29 | * are mutually exclusive. |
30 | 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" | 30 | * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" |
31 | */ | 31 | */ |
32 | 32 | ||
33 | struct route4_fastmap | 33 | struct route4_fastmap { |
34 | { | ||
35 | struct route4_filter *filter; | 34 | struct route4_filter *filter; |
36 | u32 id; | 35 | u32 id; |
37 | int iif; | 36 | int iif; |
38 | }; | 37 | }; |
39 | 38 | ||
40 | struct route4_head | 39 | struct route4_head { |
41 | { | ||
42 | struct route4_fastmap fastmap[16]; | 40 | struct route4_fastmap fastmap[16]; |
43 | struct route4_bucket *table[256+1]; | 41 | struct route4_bucket *table[256 + 1]; |
44 | }; | 42 | }; |
45 | 43 | ||
46 | struct route4_bucket | 44 | struct route4_bucket { |
47 | { | ||
48 | /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ | 45 | /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ |
49 | struct route4_filter *ht[16+16+1]; | 46 | struct route4_filter *ht[16 + 16 + 1]; |
50 | }; | 47 | }; |
51 | 48 | ||
52 | struct route4_filter | 49 | struct route4_filter { |
53 | { | ||
54 | struct route4_filter *next; | 50 | struct route4_filter *next; |
55 | u32 id; | 51 | u32 id; |
56 | int iif; | 52 | int iif; |
@@ -61,20 +57,20 @@ struct route4_filter | |||
61 | struct route4_bucket *bkt; | 57 | struct route4_bucket *bkt; |
62 | }; | 58 | }; |
63 | 59 | ||
64 | #define ROUTE4_FAILURE ((struct route4_filter*)(-1L)) | 60 | #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) |
65 | 61 | ||
66 | static const struct tcf_ext_map route_ext_map = { | 62 | static const struct tcf_ext_map route_ext_map = { |
67 | .police = TCA_ROUTE4_POLICE, | 63 | .police = TCA_ROUTE4_POLICE, |
68 | .action = TCA_ROUTE4_ACT | 64 | .action = TCA_ROUTE4_ACT |
69 | }; | 65 | }; |
70 | 66 | ||
71 | static __inline__ int route4_fastmap_hash(u32 id, int iif) | 67 | static inline int route4_fastmap_hash(u32 id, int iif) |
72 | { | 68 | { |
73 | return id&0xF; | 69 | return id & 0xF; |
74 | } | 70 | } |
75 | 71 | ||
76 | static inline | 72 | static void |
77 | void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) | 73 | route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) |
78 | { | 74 | { |
79 | spinlock_t *root_lock = qdisc_root_sleeping_lock(q); | 75 | spinlock_t *root_lock = qdisc_root_sleeping_lock(q); |
80 | 76 | ||
@@ -83,32 +79,33 @@ void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) | |||
83 | spin_unlock_bh(root_lock); | 79 | spin_unlock_bh(root_lock); |
84 | } | 80 | } |
85 | 81 | ||
86 | static inline void | 82 | static void |
87 | route4_set_fastmap(struct route4_head *head, u32 id, int iif, | 83 | route4_set_fastmap(struct route4_head *head, u32 id, int iif, |
88 | struct route4_filter *f) | 84 | struct route4_filter *f) |
89 | { | 85 | { |
90 | int h = route4_fastmap_hash(id, iif); | 86 | int h = route4_fastmap_hash(id, iif); |
87 | |||
91 | head->fastmap[h].id = id; | 88 | head->fastmap[h].id = id; |
92 | head->fastmap[h].iif = iif; | 89 | head->fastmap[h].iif = iif; |
93 | head->fastmap[h].filter = f; | 90 | head->fastmap[h].filter = f; |
94 | } | 91 | } |
95 | 92 | ||
96 | static __inline__ int route4_hash_to(u32 id) | 93 | static inline int route4_hash_to(u32 id) |
97 | { | 94 | { |
98 | return id&0xFF; | 95 | return id & 0xFF; |
99 | } | 96 | } |
100 | 97 | ||
101 | static __inline__ int route4_hash_from(u32 id) | 98 | static inline int route4_hash_from(u32 id) |
102 | { | 99 | { |
103 | return (id>>16)&0xF; | 100 | return (id >> 16) & 0xF; |
104 | } | 101 | } |
105 | 102 | ||
106 | static __inline__ int route4_hash_iif(int iif) | 103 | static inline int route4_hash_iif(int iif) |
107 | { | 104 | { |
108 | return 16 + ((iif>>16)&0xF); | 105 | return 16 + ((iif >> 16) & 0xF); |
109 | } | 106 | } |
110 | 107 | ||
111 | static __inline__ int route4_hash_wild(void) | 108 | static inline int route4_hash_wild(void) |
112 | { | 109 | { |
113 | return 32; | 110 | return 32; |
114 | } | 111 | } |
@@ -131,21 +128,22 @@ static __inline__ int route4_hash_wild(void) | |||
131 | static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, | 128 | static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, |
132 | struct tcf_result *res) | 129 | struct tcf_result *res) |
133 | { | 130 | { |
134 | struct route4_head *head = (struct route4_head*)tp->root; | 131 | struct route4_head *head = (struct route4_head *)tp->root; |
135 | struct dst_entry *dst; | 132 | struct dst_entry *dst; |
136 | struct route4_bucket *b; | 133 | struct route4_bucket *b; |
137 | struct route4_filter *f; | 134 | struct route4_filter *f; |
138 | u32 id, h; | 135 | u32 id, h; |
139 | int iif, dont_cache = 0; | 136 | int iif, dont_cache = 0; |
140 | 137 | ||
141 | if ((dst = skb_dst(skb)) == NULL) | 138 | dst = skb_dst(skb); |
139 | if (!dst) | ||
142 | goto failure; | 140 | goto failure; |
143 | 141 | ||
144 | id = dst->tclassid; | 142 | id = dst->tclassid; |
145 | if (head == NULL) | 143 | if (head == NULL) |
146 | goto old_method; | 144 | goto old_method; |
147 | 145 | ||
148 | iif = ((struct rtable*)dst)->fl.iif; | 146 | iif = ((struct rtable *)dst)->fl.iif; |
149 | 147 | ||
150 | h = route4_fastmap_hash(id, iif); | 148 | h = route4_fastmap_hash(id, iif); |
151 | if (id == head->fastmap[h].id && | 149 | if (id == head->fastmap[h].id && |
@@ -161,7 +159,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
161 | h = route4_hash_to(id); | 159 | h = route4_hash_to(id); |
162 | 160 | ||
163 | restart: | 161 | restart: |
164 | if ((b = head->table[h]) != NULL) { | 162 | b = head->table[h]; |
163 | if (b) { | ||
165 | for (f = b->ht[route4_hash_from(id)]; f; f = f->next) | 164 | for (f = b->ht[route4_hash_from(id)]; f; f = f->next) |
166 | if (f->id == id) | 165 | if (f->id == id) |
167 | ROUTE4_APPLY_RESULT(); | 166 | ROUTE4_APPLY_RESULT(); |
@@ -197,8 +196,9 @@ old_method: | |||
197 | 196 | ||
198 | static inline u32 to_hash(u32 id) | 197 | static inline u32 to_hash(u32 id) |
199 | { | 198 | { |
200 | u32 h = id&0xFF; | 199 | u32 h = id & 0xFF; |
201 | if (id&0x8000) | 200 | |
201 | if (id & 0x8000) | ||
202 | h += 256; | 202 | h += 256; |
203 | return h; | 203 | return h; |
204 | } | 204 | } |
@@ -211,17 +211,17 @@ static inline u32 from_hash(u32 id) | |||
211 | if (!(id & 0x8000)) { | 211 | if (!(id & 0x8000)) { |
212 | if (id > 255) | 212 | if (id > 255) |
213 | return 256; | 213 | return 256; |
214 | return id&0xF; | 214 | return id & 0xF; |
215 | } | 215 | } |
216 | return 16 + (id&0xF); | 216 | return 16 + (id & 0xF); |
217 | } | 217 | } |
218 | 218 | ||
219 | static unsigned long route4_get(struct tcf_proto *tp, u32 handle) | 219 | static unsigned long route4_get(struct tcf_proto *tp, u32 handle) |
220 | { | 220 | { |
221 | struct route4_head *head = (struct route4_head*)tp->root; | 221 | struct route4_head *head = (struct route4_head *)tp->root; |
222 | struct route4_bucket *b; | 222 | struct route4_bucket *b; |
223 | struct route4_filter *f; | 223 | struct route4_filter *f; |
224 | unsigned h1, h2; | 224 | unsigned int h1, h2; |
225 | 225 | ||
226 | if (!head) | 226 | if (!head) |
227 | return 0; | 227 | return 0; |
@@ -230,11 +230,12 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle) | |||
230 | if (h1 > 256) | 230 | if (h1 > 256) |
231 | return 0; | 231 | return 0; |
232 | 232 | ||
233 | h2 = from_hash(handle>>16); | 233 | h2 = from_hash(handle >> 16); |
234 | if (h2 > 32) | 234 | if (h2 > 32) |
235 | return 0; | 235 | return 0; |
236 | 236 | ||
237 | if ((b = head->table[h1]) != NULL) { | 237 | b = head->table[h1]; |
238 | if (b) { | ||
238 | for (f = b->ht[h2]; f; f = f->next) | 239 | for (f = b->ht[h2]; f; f = f->next) |
239 | if (f->handle == handle) | 240 | if (f->handle == handle) |
240 | return (unsigned long)f; | 241 | return (unsigned long)f; |
@@ -251,7 +252,7 @@ static int route4_init(struct tcf_proto *tp) | |||
251 | return 0; | 252 | return 0; |
252 | } | 253 | } |
253 | 254 | ||
254 | static inline void | 255 | static void |
255 | route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f) | 256 | route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f) |
256 | { | 257 | { |
257 | tcf_unbind_filter(tp, &f->res); | 258 | tcf_unbind_filter(tp, &f->res); |
@@ -267,11 +268,12 @@ static void route4_destroy(struct tcf_proto *tp) | |||
267 | if (head == NULL) | 268 | if (head == NULL) |
268 | return; | 269 | return; |
269 | 270 | ||
270 | for (h1=0; h1<=256; h1++) { | 271 | for (h1 = 0; h1 <= 256; h1++) { |
271 | struct route4_bucket *b; | 272 | struct route4_bucket *b; |
272 | 273 | ||
273 | if ((b = head->table[h1]) != NULL) { | 274 | b = head->table[h1]; |
274 | for (h2=0; h2<=32; h2++) { | 275 | if (b) { |
276 | for (h2 = 0; h2 <= 32; h2++) { | ||
275 | struct route4_filter *f; | 277 | struct route4_filter *f; |
276 | 278 | ||
277 | while ((f = b->ht[h2]) != NULL) { | 279 | while ((f = b->ht[h2]) != NULL) { |
@@ -287,9 +289,9 @@ static void route4_destroy(struct tcf_proto *tp) | |||
287 | 289 | ||
288 | static int route4_delete(struct tcf_proto *tp, unsigned long arg) | 290 | static int route4_delete(struct tcf_proto *tp, unsigned long arg) |
289 | { | 291 | { |
290 | struct route4_head *head = (struct route4_head*)tp->root; | 292 | struct route4_head *head = (struct route4_head *)tp->root; |
291 | struct route4_filter **fp, *f = (struct route4_filter*)arg; | 293 | struct route4_filter **fp, *f = (struct route4_filter *)arg; |
292 | unsigned h = 0; | 294 | unsigned int h = 0; |
293 | struct route4_bucket *b; | 295 | struct route4_bucket *b; |
294 | int i; | 296 | int i; |
295 | 297 | ||
@@ -299,7 +301,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) | |||
299 | h = f->handle; | 301 | h = f->handle; |
300 | b = f->bkt; | 302 | b = f->bkt; |
301 | 303 | ||
302 | for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) { | 304 | for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) { |
303 | if (*fp == f) { | 305 | if (*fp == f) { |
304 | tcf_tree_lock(tp); | 306 | tcf_tree_lock(tp); |
305 | *fp = f->next; | 307 | *fp = f->next; |
@@ -310,7 +312,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) | |||
310 | 312 | ||
311 | /* Strip tree */ | 313 | /* Strip tree */ |
312 | 314 | ||
313 | for (i=0; i<=32; i++) | 315 | for (i = 0; i <= 32; i++) |
314 | if (b->ht[i]) | 316 | if (b->ht[i]) |
315 | return 0; | 317 | return 0; |
316 | 318 | ||
@@ -380,7 +382,8 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base, | |||
380 | } | 382 | } |
381 | 383 | ||
382 | h1 = to_hash(nhandle); | 384 | h1 = to_hash(nhandle); |
383 | if ((b = head->table[h1]) == NULL) { | 385 | b = head->table[h1]; |
386 | if (!b) { | ||
384 | err = -ENOBUFS; | 387 | err = -ENOBUFS; |
385 | b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); | 388 | b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); |
386 | if (b == NULL) | 389 | if (b == NULL) |
@@ -391,6 +394,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base, | |||
391 | tcf_tree_unlock(tp); | 394 | tcf_tree_unlock(tp); |
392 | } else { | 395 | } else { |
393 | unsigned int h2 = from_hash(nhandle >> 16); | 396 | unsigned int h2 = from_hash(nhandle >> 16); |
397 | |||
394 | err = -EEXIST; | 398 | err = -EEXIST; |
395 | for (fp = b->ht[h2]; fp; fp = fp->next) | 399 | for (fp = b->ht[h2]; fp; fp = fp->next) |
396 | if (fp->handle == f->handle) | 400 | if (fp->handle == f->handle) |
@@ -444,7 +448,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, | |||
444 | if (err < 0) | 448 | if (err < 0) |
445 | return err; | 449 | return err; |
446 | 450 | ||
447 | if ((f = (struct route4_filter*)*arg) != NULL) { | 451 | f = (struct route4_filter *)*arg; |
452 | if (f) { | ||
448 | if (f->handle != handle && handle) | 453 | if (f->handle != handle && handle) |
449 | return -EINVAL; | 454 | return -EINVAL; |
450 | 455 | ||
@@ -481,7 +486,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, | |||
481 | 486 | ||
482 | reinsert: | 487 | reinsert: |
483 | h = from_hash(f->handle >> 16); | 488 | h = from_hash(f->handle >> 16); |
484 | for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next) | 489 | for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next) |
485 | if (f->handle < f1->handle) | 490 | if (f->handle < f1->handle) |
486 | break; | 491 | break; |
487 | 492 | ||
@@ -492,7 +497,8 @@ reinsert: | |||
492 | if (old_handle && f->handle != old_handle) { | 497 | if (old_handle && f->handle != old_handle) { |
493 | th = to_hash(old_handle); | 498 | th = to_hash(old_handle); |
494 | h = from_hash(old_handle >> 16); | 499 | h = from_hash(old_handle >> 16); |
495 | if ((b = head->table[th]) != NULL) { | 500 | b = head->table[th]; |
501 | if (b) { | ||
496 | for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) { | 502 | for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) { |
497 | if (*fp == f) { | 503 | if (*fp == f) { |
498 | *fp = f->next; | 504 | *fp = f->next; |
@@ -515,7 +521,7 @@ errout: | |||
515 | static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) | 521 | static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) |
516 | { | 522 | { |
517 | struct route4_head *head = tp->root; | 523 | struct route4_head *head = tp->root; |
518 | unsigned h, h1; | 524 | unsigned int h, h1; |
519 | 525 | ||
520 | if (head == NULL) | 526 | if (head == NULL) |
521 | arg->stop = 1; | 527 | arg->stop = 1; |
@@ -549,7 +555,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) | |||
549 | static int route4_dump(struct tcf_proto *tp, unsigned long fh, | 555 | static int route4_dump(struct tcf_proto *tp, unsigned long fh, |
550 | struct sk_buff *skb, struct tcmsg *t) | 556 | struct sk_buff *skb, struct tcmsg *t) |
551 | { | 557 | { |
552 | struct route4_filter *f = (struct route4_filter*)fh; | 558 | struct route4_filter *f = (struct route4_filter *)fh; |
553 | unsigned char *b = skb_tail_pointer(skb); | 559 | unsigned char *b = skb_tail_pointer(skb); |
554 | struct nlattr *nest; | 560 | struct nlattr *nest; |
555 | u32 id; | 561 | u32 id; |
@@ -563,15 +569,15 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh, | |||
563 | if (nest == NULL) | 569 | if (nest == NULL) |
564 | goto nla_put_failure; | 570 | goto nla_put_failure; |
565 | 571 | ||
566 | if (!(f->handle&0x8000)) { | 572 | if (!(f->handle & 0x8000)) { |
567 | id = f->id&0xFF; | 573 | id = f->id & 0xFF; |
568 | NLA_PUT_U32(skb, TCA_ROUTE4_TO, id); | 574 | NLA_PUT_U32(skb, TCA_ROUTE4_TO, id); |
569 | } | 575 | } |
570 | if (f->handle&0x80000000) { | 576 | if (f->handle & 0x80000000) { |
571 | if ((f->handle>>16) != 0xFFFF) | 577 | if ((f->handle >> 16) != 0xFFFF) |
572 | NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif); | 578 | NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif); |
573 | } else { | 579 | } else { |
574 | id = f->id>>16; | 580 | id = f->id >> 16; |
575 | NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id); | 581 | NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id); |
576 | } | 582 | } |
577 | if (f->res.classid) | 583 | if (f->res.classid) |
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 425a1790b048..402c44b241a3 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h | |||
@@ -66,28 +66,25 @@ | |||
66 | powerful classification engine. */ | 66 | powerful classification engine. */ |
67 | 67 | ||
68 | 68 | ||
69 | struct rsvp_head | 69 | struct rsvp_head { |
70 | { | ||
71 | u32 tmap[256/32]; | 70 | u32 tmap[256/32]; |
72 | u32 hgenerator; | 71 | u32 hgenerator; |
73 | u8 tgenerator; | 72 | u8 tgenerator; |
74 | struct rsvp_session *ht[256]; | 73 | struct rsvp_session *ht[256]; |
75 | }; | 74 | }; |
76 | 75 | ||
77 | struct rsvp_session | 76 | struct rsvp_session { |
78 | { | ||
79 | struct rsvp_session *next; | 77 | struct rsvp_session *next; |
80 | __be32 dst[RSVP_DST_LEN]; | 78 | __be32 dst[RSVP_DST_LEN]; |
81 | struct tc_rsvp_gpi dpi; | 79 | struct tc_rsvp_gpi dpi; |
82 | u8 protocol; | 80 | u8 protocol; |
83 | u8 tunnelid; | 81 | u8 tunnelid; |
84 | /* 16 (src,sport) hash slots, and one wildcard source slot */ | 82 | /* 16 (src,sport) hash slots, and one wildcard source slot */ |
85 | struct rsvp_filter *ht[16+1]; | 83 | struct rsvp_filter *ht[16 + 1]; |
86 | }; | 84 | }; |
87 | 85 | ||
88 | 86 | ||
89 | struct rsvp_filter | 87 | struct rsvp_filter { |
90 | { | ||
91 | struct rsvp_filter *next; | 88 | struct rsvp_filter *next; |
92 | __be32 src[RSVP_DST_LEN]; | 89 | __be32 src[RSVP_DST_LEN]; |
93 | struct tc_rsvp_gpi spi; | 90 | struct tc_rsvp_gpi spi; |
@@ -100,17 +97,19 @@ struct rsvp_filter | |||
100 | struct rsvp_session *sess; | 97 | struct rsvp_session *sess; |
101 | }; | 98 | }; |
102 | 99 | ||
103 | static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) | 100 | static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) |
104 | { | 101 | { |
105 | unsigned h = (__force __u32)dst[RSVP_DST_LEN-1]; | 102 | unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1]; |
103 | |||
106 | h ^= h>>16; | 104 | h ^= h>>16; |
107 | h ^= h>>8; | 105 | h ^= h>>8; |
108 | return (h ^ protocol ^ tunnelid) & 0xFF; | 106 | return (h ^ protocol ^ tunnelid) & 0xFF; |
109 | } | 107 | } |
110 | 108 | ||
111 | static __inline__ unsigned hash_src(__be32 *src) | 109 | static inline unsigned int hash_src(__be32 *src) |
112 | { | 110 | { |
113 | unsigned h = (__force __u32)src[RSVP_DST_LEN-1]; | 111 | unsigned int h = (__force __u32)src[RSVP_DST_LEN-1]; |
112 | |||
114 | h ^= h>>16; | 113 | h ^= h>>16; |
115 | h ^= h>>8; | 114 | h ^= h>>8; |
116 | h ^= h>>4; | 115 | h ^= h>>4; |
@@ -134,10 +133,10 @@ static struct tcf_ext_map rsvp_ext_map = { | |||
134 | static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, | 133 | static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, |
135 | struct tcf_result *res) | 134 | struct tcf_result *res) |
136 | { | 135 | { |
137 | struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht; | 136 | struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; |
138 | struct rsvp_session *s; | 137 | struct rsvp_session *s; |
139 | struct rsvp_filter *f; | 138 | struct rsvp_filter *f; |
140 | unsigned h1, h2; | 139 | unsigned int h1, h2; |
141 | __be32 *dst, *src; | 140 | __be32 *dst, *src; |
142 | u8 protocol; | 141 | u8 protocol; |
143 | u8 tunnelid = 0; | 142 | u8 tunnelid = 0; |
@@ -162,13 +161,13 @@ restart: | |||
162 | src = &nhptr->saddr.s6_addr32[0]; | 161 | src = &nhptr->saddr.s6_addr32[0]; |
163 | dst = &nhptr->daddr.s6_addr32[0]; | 162 | dst = &nhptr->daddr.s6_addr32[0]; |
164 | protocol = nhptr->nexthdr; | 163 | protocol = nhptr->nexthdr; |
165 | xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr); | 164 | xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr); |
166 | #else | 165 | #else |
167 | src = &nhptr->saddr; | 166 | src = &nhptr->saddr; |
168 | dst = &nhptr->daddr; | 167 | dst = &nhptr->daddr; |
169 | protocol = nhptr->protocol; | 168 | protocol = nhptr->protocol; |
170 | xprt = ((u8*)nhptr) + (nhptr->ihl<<2); | 169 | xprt = ((u8 *)nhptr) + (nhptr->ihl<<2); |
171 | if (nhptr->frag_off & htons(IP_MF|IP_OFFSET)) | 170 | if (nhptr->frag_off & htons(IP_MF | IP_OFFSET)) |
172 | return -1; | 171 | return -1; |
173 | #endif | 172 | #endif |
174 | 173 | ||
@@ -176,10 +175,10 @@ restart: | |||
176 | h2 = hash_src(src); | 175 | h2 = hash_src(src); |
177 | 176 | ||
178 | for (s = sht[h1]; s; s = s->next) { | 177 | for (s = sht[h1]; s; s = s->next) { |
179 | if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && | 178 | if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] && |
180 | protocol == s->protocol && | 179 | protocol == s->protocol && |
181 | !(s->dpi.mask & | 180 | !(s->dpi.mask & |
182 | (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) && | 181 | (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) && |
183 | #if RSVP_DST_LEN == 4 | 182 | #if RSVP_DST_LEN == 4 |
184 | dst[0] == s->dst[0] && | 183 | dst[0] == s->dst[0] && |
185 | dst[1] == s->dst[1] && | 184 | dst[1] == s->dst[1] && |
@@ -188,8 +187,8 @@ restart: | |||
188 | tunnelid == s->tunnelid) { | 187 | tunnelid == s->tunnelid) { |
189 | 188 | ||
190 | for (f = s->ht[h2]; f; f = f->next) { | 189 | for (f = s->ht[h2]; f; f = f->next) { |
191 | if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] && | 190 | if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] && |
192 | !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key)) | 191 | !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key)) |
193 | #if RSVP_DST_LEN == 4 | 192 | #if RSVP_DST_LEN == 4 |
194 | && | 193 | && |
195 | src[0] == f->src[0] && | 194 | src[0] == f->src[0] && |
@@ -205,7 +204,7 @@ matched: | |||
205 | return 0; | 204 | return 0; |
206 | 205 | ||
207 | tunnelid = f->res.classid; | 206 | tunnelid = f->res.classid; |
208 | nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr)); | 207 | nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr)); |
209 | goto restart; | 208 | goto restart; |
210 | } | 209 | } |
211 | } | 210 | } |
@@ -224,11 +223,11 @@ matched: | |||
224 | 223 | ||
225 | static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) | 224 | static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) |
226 | { | 225 | { |
227 | struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht; | 226 | struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; |
228 | struct rsvp_session *s; | 227 | struct rsvp_session *s; |
229 | struct rsvp_filter *f; | 228 | struct rsvp_filter *f; |
230 | unsigned h1 = handle&0xFF; | 229 | unsigned int h1 = handle & 0xFF; |
231 | unsigned h2 = (handle>>8)&0xFF; | 230 | unsigned int h2 = (handle >> 8) & 0xFF; |
232 | 231 | ||
233 | if (h2 > 16) | 232 | if (h2 > 16) |
234 | return 0; | 233 | return 0; |
@@ -258,7 +257,7 @@ static int rsvp_init(struct tcf_proto *tp) | |||
258 | return -ENOBUFS; | 257 | return -ENOBUFS; |
259 | } | 258 | } |
260 | 259 | ||
261 | static inline void | 260 | static void |
262 | rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) | 261 | rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) |
263 | { | 262 | { |
264 | tcf_unbind_filter(tp, &f->res); | 263 | tcf_unbind_filter(tp, &f->res); |
@@ -277,13 +276,13 @@ static void rsvp_destroy(struct tcf_proto *tp) | |||
277 | 276 | ||
278 | sht = data->ht; | 277 | sht = data->ht; |
279 | 278 | ||
280 | for (h1=0; h1<256; h1++) { | 279 | for (h1 = 0; h1 < 256; h1++) { |
281 | struct rsvp_session *s; | 280 | struct rsvp_session *s; |
282 | 281 | ||
283 | while ((s = sht[h1]) != NULL) { | 282 | while ((s = sht[h1]) != NULL) { |
284 | sht[h1] = s->next; | 283 | sht[h1] = s->next; |
285 | 284 | ||
286 | for (h2=0; h2<=16; h2++) { | 285 | for (h2 = 0; h2 <= 16; h2++) { |
287 | struct rsvp_filter *f; | 286 | struct rsvp_filter *f; |
288 | 287 | ||
289 | while ((f = s->ht[h2]) != NULL) { | 288 | while ((f = s->ht[h2]) != NULL) { |
@@ -299,13 +298,13 @@ static void rsvp_destroy(struct tcf_proto *tp) | |||
299 | 298 | ||
300 | static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) | 299 | static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) |
301 | { | 300 | { |
302 | struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg; | 301 | struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg; |
303 | unsigned h = f->handle; | 302 | unsigned int h = f->handle; |
304 | struct rsvp_session **sp; | 303 | struct rsvp_session **sp; |
305 | struct rsvp_session *s = f->sess; | 304 | struct rsvp_session *s = f->sess; |
306 | int i; | 305 | int i; |
307 | 306 | ||
308 | for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) { | 307 | for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) { |
309 | if (*fp == f) { | 308 | if (*fp == f) { |
310 | tcf_tree_lock(tp); | 309 | tcf_tree_lock(tp); |
311 | *fp = f->next; | 310 | *fp = f->next; |
@@ -314,12 +313,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) | |||
314 | 313 | ||
315 | /* Strip tree */ | 314 | /* Strip tree */ |
316 | 315 | ||
317 | for (i=0; i<=16; i++) | 316 | for (i = 0; i <= 16; i++) |
318 | if (s->ht[i]) | 317 | if (s->ht[i]) |
319 | return 0; | 318 | return 0; |
320 | 319 | ||
321 | /* OK, session has no flows */ | 320 | /* OK, session has no flows */ |
322 | for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF]; | 321 | for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF]; |
323 | *sp; sp = &(*sp)->next) { | 322 | *sp; sp = &(*sp)->next) { |
324 | if (*sp == s) { | 323 | if (*sp == s) { |
325 | tcf_tree_lock(tp); | 324 | tcf_tree_lock(tp); |
@@ -337,13 +336,14 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) | |||
337 | return 0; | 336 | return 0; |
338 | } | 337 | } |
339 | 338 | ||
340 | static unsigned gen_handle(struct tcf_proto *tp, unsigned salt) | 339 | static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt) |
341 | { | 340 | { |
342 | struct rsvp_head *data = tp->root; | 341 | struct rsvp_head *data = tp->root; |
343 | int i = 0xFFFF; | 342 | int i = 0xFFFF; |
344 | 343 | ||
345 | while (i-- > 0) { | 344 | while (i-- > 0) { |
346 | u32 h; | 345 | u32 h; |
346 | |||
347 | if ((data->hgenerator += 0x10000) == 0) | 347 | if ((data->hgenerator += 0x10000) == 0) |
348 | data->hgenerator = 0x10000; | 348 | data->hgenerator = 0x10000; |
349 | h = data->hgenerator|salt; | 349 | h = data->hgenerator|salt; |
@@ -355,10 +355,10 @@ static unsigned gen_handle(struct tcf_proto *tp, unsigned salt) | |||
355 | 355 | ||
356 | static int tunnel_bts(struct rsvp_head *data) | 356 | static int tunnel_bts(struct rsvp_head *data) |
357 | { | 357 | { |
358 | int n = data->tgenerator>>5; | 358 | int n = data->tgenerator >> 5; |
359 | u32 b = 1<<(data->tgenerator&0x1F); | 359 | u32 b = 1 << (data->tgenerator & 0x1F); |
360 | 360 | ||
361 | if (data->tmap[n]&b) | 361 | if (data->tmap[n] & b) |
362 | return 0; | 362 | return 0; |
363 | data->tmap[n] |= b; | 363 | data->tmap[n] |= b; |
364 | return 1; | 364 | return 1; |
@@ -372,10 +372,10 @@ static void tunnel_recycle(struct rsvp_head *data) | |||
372 | 372 | ||
373 | memset(tmap, 0, sizeof(tmap)); | 373 | memset(tmap, 0, sizeof(tmap)); |
374 | 374 | ||
375 | for (h1=0; h1<256; h1++) { | 375 | for (h1 = 0; h1 < 256; h1++) { |
376 | struct rsvp_session *s; | 376 | struct rsvp_session *s; |
377 | for (s = sht[h1]; s; s = s->next) { | 377 | for (s = sht[h1]; s; s = s->next) { |
378 | for (h2=0; h2<=16; h2++) { | 378 | for (h2 = 0; h2 <= 16; h2++) { |
379 | struct rsvp_filter *f; | 379 | struct rsvp_filter *f; |
380 | 380 | ||
381 | for (f = s->ht[h2]; f; f = f->next) { | 381 | for (f = s->ht[h2]; f; f = f->next) { |
@@ -395,8 +395,8 @@ static u32 gen_tunnel(struct rsvp_head *data) | |||
395 | { | 395 | { |
396 | int i, k; | 396 | int i, k; |
397 | 397 | ||
398 | for (k=0; k<2; k++) { | 398 | for (k = 0; k < 2; k++) { |
399 | for (i=255; i>0; i--) { | 399 | for (i = 255; i > 0; i--) { |
400 | if (++data->tgenerator == 0) | 400 | if (++data->tgenerator == 0) |
401 | data->tgenerator = 1; | 401 | data->tgenerator = 1; |
402 | if (tunnel_bts(data)) | 402 | if (tunnel_bts(data)) |
@@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, | |||
428 | struct nlattr *opt = tca[TCA_OPTIONS-1]; | 428 | struct nlattr *opt = tca[TCA_OPTIONS-1]; |
429 | struct nlattr *tb[TCA_RSVP_MAX + 1]; | 429 | struct nlattr *tb[TCA_RSVP_MAX + 1]; |
430 | struct tcf_exts e; | 430 | struct tcf_exts e; |
431 | unsigned h1, h2; | 431 | unsigned int h1, h2; |
432 | __be32 *dst; | 432 | __be32 *dst; |
433 | int err; | 433 | int err; |
434 | 434 | ||
@@ -443,7 +443,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, | |||
443 | if (err < 0) | 443 | if (err < 0) |
444 | return err; | 444 | return err; |
445 | 445 | ||
446 | if ((f = (struct rsvp_filter*)*arg) != NULL) { | 446 | f = (struct rsvp_filter *)*arg; |
447 | if (f) { | ||
447 | /* Node exists: adjust only classid */ | 448 | /* Node exists: adjust only classid */ |
448 | 449 | ||
449 | if (f->handle != handle && handle) | 450 | if (f->handle != handle && handle) |
@@ -500,7 +501,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, | |||
500 | goto errout; | 501 | goto errout; |
501 | } | 502 | } |
502 | 503 | ||
503 | for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) { | 504 | for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) { |
504 | if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && | 505 | if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && |
505 | pinfo && pinfo->protocol == s->protocol && | 506 | pinfo && pinfo->protocol == s->protocol && |
506 | memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 && | 507 | memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 && |
@@ -523,7 +524,7 @@ insert: | |||
523 | tcf_exts_change(tp, &f->exts, &e); | 524 | tcf_exts_change(tp, &f->exts, &e); |
524 | 525 | ||
525 | for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next) | 526 | for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next) |
526 | if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask) | 527 | if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask) |
527 | break; | 528 | break; |
528 | f->next = *fp; | 529 | f->next = *fp; |
529 | wmb(); | 530 | wmb(); |
@@ -567,7 +568,7 @@ errout2: | |||
567 | static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) | 568 | static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) |
568 | { | 569 | { |
569 | struct rsvp_head *head = tp->root; | 570 | struct rsvp_head *head = tp->root; |
570 | unsigned h, h1; | 571 | unsigned int h, h1; |
571 | 572 | ||
572 | if (arg->stop) | 573 | if (arg->stop) |
573 | return; | 574 | return; |
@@ -598,7 +599,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) | |||
598 | static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, | 599 | static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, |
599 | struct sk_buff *skb, struct tcmsg *t) | 600 | struct sk_buff *skb, struct tcmsg *t) |
600 | { | 601 | { |
601 | struct rsvp_filter *f = (struct rsvp_filter*)fh; | 602 | struct rsvp_filter *f = (struct rsvp_filter *)fh; |
602 | struct rsvp_session *s; | 603 | struct rsvp_session *s; |
603 | unsigned char *b = skb_tail_pointer(skb); | 604 | unsigned char *b = skb_tail_pointer(skb); |
604 | struct nlattr *nest; | 605 | struct nlattr *nest; |
@@ -624,7 +625,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, | |||
624 | NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); | 625 | NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); |
625 | if (f->res.classid) | 626 | if (f->res.classid) |
626 | NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid); | 627 | NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid); |
627 | if (((f->handle>>8)&0xFF) != 16) | 628 | if (((f->handle >> 8) & 0xFF) != 16) |
628 | NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src); | 629 | NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src); |
629 | 630 | ||
630 | if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) | 631 | if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) |
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 20ef330bb918..36667fa64237 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c | |||
@@ -249,7 +249,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, | |||
249 | * of the hashing index is below the threshold. | 249 | * of the hashing index is below the threshold. |
250 | */ | 250 | */ |
251 | if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD) | 251 | if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD) |
252 | cp.hash = (cp.mask >> cp.shift)+1; | 252 | cp.hash = (cp.mask >> cp.shift) + 1; |
253 | else | 253 | else |
254 | cp.hash = DEFAULT_HASH_SIZE; | 254 | cp.hash = DEFAULT_HASH_SIZE; |
255 | } | 255 | } |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index b0c2a82178af..966920c14e7a 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -42,8 +42,7 @@ | |||
42 | #include <net/act_api.h> | 42 | #include <net/act_api.h> |
43 | #include <net/pkt_cls.h> | 43 | #include <net/pkt_cls.h> |
44 | 44 | ||
45 | struct tc_u_knode | 45 | struct tc_u_knode { |
46 | { | ||
47 | struct tc_u_knode *next; | 46 | struct tc_u_knode *next; |
48 | u32 handle; | 47 | u32 handle; |
49 | struct tc_u_hnode *ht_up; | 48 | struct tc_u_hnode *ht_up; |
@@ -63,19 +62,17 @@ struct tc_u_knode | |||
63 | struct tc_u32_sel sel; | 62 | struct tc_u32_sel sel; |
64 | }; | 63 | }; |
65 | 64 | ||
66 | struct tc_u_hnode | 65 | struct tc_u_hnode { |
67 | { | ||
68 | struct tc_u_hnode *next; | 66 | struct tc_u_hnode *next; |
69 | u32 handle; | 67 | u32 handle; |
70 | u32 prio; | 68 | u32 prio; |
71 | struct tc_u_common *tp_c; | 69 | struct tc_u_common *tp_c; |
72 | int refcnt; | 70 | int refcnt; |
73 | unsigned divisor; | 71 | unsigned int divisor; |
74 | struct tc_u_knode *ht[1]; | 72 | struct tc_u_knode *ht[1]; |
75 | }; | 73 | }; |
76 | 74 | ||
77 | struct tc_u_common | 75 | struct tc_u_common { |
78 | { | ||
79 | struct tc_u_hnode *hlist; | 76 | struct tc_u_hnode *hlist; |
80 | struct Qdisc *q; | 77 | struct Qdisc *q; |
81 | int refcnt; | 78 | int refcnt; |
@@ -87,9 +84,11 @@ static const struct tcf_ext_map u32_ext_map = { | |||
87 | .police = TCA_U32_POLICE | 84 | .police = TCA_U32_POLICE |
88 | }; | 85 | }; |
89 | 86 | ||
90 | static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift) | 87 | static inline unsigned int u32_hash_fold(__be32 key, |
88 | const struct tc_u32_sel *sel, | ||
89 | u8 fshift) | ||
91 | { | 90 | { |
92 | unsigned h = ntohl(key & sel->hmask)>>fshift; | 91 | unsigned int h = ntohl(key & sel->hmask) >> fshift; |
93 | 92 | ||
94 | return h; | 93 | return h; |
95 | } | 94 | } |
@@ -101,7 +100,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re | |||
101 | unsigned int off; | 100 | unsigned int off; |
102 | } stack[TC_U32_MAXDEPTH]; | 101 | } stack[TC_U32_MAXDEPTH]; |
103 | 102 | ||
104 | struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; | 103 | struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root; |
105 | unsigned int off = skb_network_offset(skb); | 104 | unsigned int off = skb_network_offset(skb); |
106 | struct tc_u_knode *n; | 105 | struct tc_u_knode *n; |
107 | int sdepth = 0; | 106 | int sdepth = 0; |
@@ -120,7 +119,7 @@ next_knode: | |||
120 | struct tc_u32_key *key = n->sel.keys; | 119 | struct tc_u32_key *key = n->sel.keys; |
121 | 120 | ||
122 | #ifdef CONFIG_CLS_U32_PERF | 121 | #ifdef CONFIG_CLS_U32_PERF |
123 | n->pf->rcnt +=1; | 122 | n->pf->rcnt += 1; |
124 | j = 0; | 123 | j = 0; |
125 | #endif | 124 | #endif |
126 | 125 | ||
@@ -133,7 +132,7 @@ next_knode: | |||
133 | } | 132 | } |
134 | #endif | 133 | #endif |
135 | 134 | ||
136 | for (i = n->sel.nkeys; i>0; i--, key++) { | 135 | for (i = n->sel.nkeys; i > 0; i--, key++) { |
137 | int toff = off + key->off + (off2 & key->offmask); | 136 | int toff = off + key->off + (off2 & key->offmask); |
138 | __be32 *data, _data; | 137 | __be32 *data, _data; |
139 | 138 | ||
@@ -148,13 +147,13 @@ next_knode: | |||
148 | goto next_knode; | 147 | goto next_knode; |
149 | } | 148 | } |
150 | #ifdef CONFIG_CLS_U32_PERF | 149 | #ifdef CONFIG_CLS_U32_PERF |
151 | n->pf->kcnts[j] +=1; | 150 | n->pf->kcnts[j] += 1; |
152 | j++; | 151 | j++; |
153 | #endif | 152 | #endif |
154 | } | 153 | } |
155 | if (n->ht_down == NULL) { | 154 | if (n->ht_down == NULL) { |
156 | check_terminal: | 155 | check_terminal: |
157 | if (n->sel.flags&TC_U32_TERMINAL) { | 156 | if (n->sel.flags & TC_U32_TERMINAL) { |
158 | 157 | ||
159 | *res = n->res; | 158 | *res = n->res; |
160 | #ifdef CONFIG_NET_CLS_IND | 159 | #ifdef CONFIG_NET_CLS_IND |
@@ -164,7 +163,7 @@ check_terminal: | |||
164 | } | 163 | } |
165 | #endif | 164 | #endif |
166 | #ifdef CONFIG_CLS_U32_PERF | 165 | #ifdef CONFIG_CLS_U32_PERF |
167 | n->pf->rhit +=1; | 166 | n->pf->rhit += 1; |
168 | #endif | 167 | #endif |
169 | r = tcf_exts_exec(skb, &n->exts, res); | 168 | r = tcf_exts_exec(skb, &n->exts, res); |
170 | if (r < 0) { | 169 | if (r < 0) { |
@@ -197,10 +196,10 @@ check_terminal: | |||
197 | sel = ht->divisor & u32_hash_fold(*data, &n->sel, | 196 | sel = ht->divisor & u32_hash_fold(*data, &n->sel, |
198 | n->fshift); | 197 | n->fshift); |
199 | } | 198 | } |
200 | if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) | 199 | if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT))) |
201 | goto next_ht; | 200 | goto next_ht; |
202 | 201 | ||
203 | if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { | 202 | if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) { |
204 | off2 = n->sel.off + 3; | 203 | off2 = n->sel.off + 3; |
205 | if (n->sel.flags & TC_U32_VAROFFSET) { | 204 | if (n->sel.flags & TC_U32_VAROFFSET) { |
206 | __be16 *data, _data; | 205 | __be16 *data, _data; |
@@ -215,7 +214,7 @@ check_terminal: | |||
215 | } | 214 | } |
216 | off2 &= ~3; | 215 | off2 &= ~3; |
217 | } | 216 | } |
218 | if (n->sel.flags&TC_U32_EAT) { | 217 | if (n->sel.flags & TC_U32_EAT) { |
219 | off += off2; | 218 | off += off2; |
220 | off2 = 0; | 219 | off2 = 0; |
221 | } | 220 | } |
@@ -236,11 +235,11 @@ out: | |||
236 | 235 | ||
237 | deadloop: | 236 | deadloop: |
238 | if (net_ratelimit()) | 237 | if (net_ratelimit()) |
239 | printk(KERN_WARNING "cls_u32: dead loop\n"); | 238 | pr_warning("cls_u32: dead loop\n"); |
240 | return -1; | 239 | return -1; |
241 | } | 240 | } |
242 | 241 | ||
243 | static __inline__ struct tc_u_hnode * | 242 | static struct tc_u_hnode * |
244 | u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) | 243 | u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) |
245 | { | 244 | { |
246 | struct tc_u_hnode *ht; | 245 | struct tc_u_hnode *ht; |
@@ -252,10 +251,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) | |||
252 | return ht; | 251 | return ht; |
253 | } | 252 | } |
254 | 253 | ||
255 | static __inline__ struct tc_u_knode * | 254 | static struct tc_u_knode * |
256 | u32_lookup_key(struct tc_u_hnode *ht, u32 handle) | 255 | u32_lookup_key(struct tc_u_hnode *ht, u32 handle) |
257 | { | 256 | { |
258 | unsigned sel; | 257 | unsigned int sel; |
259 | struct tc_u_knode *n = NULL; | 258 | struct tc_u_knode *n = NULL; |
260 | 259 | ||
261 | sel = TC_U32_HASH(handle); | 260 | sel = TC_U32_HASH(handle); |
@@ -300,7 +299,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c) | |||
300 | do { | 299 | do { |
301 | if (++tp_c->hgenerator == 0x7FF) | 300 | if (++tp_c->hgenerator == 0x7FF) |
302 | tp_c->hgenerator = 1; | 301 | tp_c->hgenerator = 1; |
303 | } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); | 302 | } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); |
304 | 303 | ||
305 | return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0; | 304 | return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0; |
306 | } | 305 | } |
@@ -378,9 +377,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key) | |||
378 | static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) | 377 | static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) |
379 | { | 378 | { |
380 | struct tc_u_knode *n; | 379 | struct tc_u_knode *n; |
381 | unsigned h; | 380 | unsigned int h; |
382 | 381 | ||
383 | for (h=0; h<=ht->divisor; h++) { | 382 | for (h = 0; h <= ht->divisor; h++) { |
384 | while ((n = ht->ht[h]) != NULL) { | 383 | while ((n = ht->ht[h]) != NULL) { |
385 | ht->ht[h] = n->next; | 384 | ht->ht[h] = n->next; |
386 | 385 | ||
@@ -446,13 +445,13 @@ static void u32_destroy(struct tcf_proto *tp) | |||
446 | 445 | ||
447 | static int u32_delete(struct tcf_proto *tp, unsigned long arg) | 446 | static int u32_delete(struct tcf_proto *tp, unsigned long arg) |
448 | { | 447 | { |
449 | struct tc_u_hnode *ht = (struct tc_u_hnode*)arg; | 448 | struct tc_u_hnode *ht = (struct tc_u_hnode *)arg; |
450 | 449 | ||
451 | if (ht == NULL) | 450 | if (ht == NULL) |
452 | return 0; | 451 | return 0; |
453 | 452 | ||
454 | if (TC_U32_KEY(ht->handle)) | 453 | if (TC_U32_KEY(ht->handle)) |
455 | return u32_delete_key(tp, (struct tc_u_knode*)ht); | 454 | return u32_delete_key(tp, (struct tc_u_knode *)ht); |
456 | 455 | ||
457 | if (tp->root == ht) | 456 | if (tp->root == ht) |
458 | return -EINVAL; | 457 | return -EINVAL; |
@@ -470,14 +469,14 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg) | |||
470 | static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) | 469 | static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) |
471 | { | 470 | { |
472 | struct tc_u_knode *n; | 471 | struct tc_u_knode *n; |
473 | unsigned i = 0x7FF; | 472 | unsigned int i = 0x7FF; |
474 | 473 | ||
475 | for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next) | 474 | for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) |
476 | if (i < TC_U32_NODE(n->handle)) | 475 | if (i < TC_U32_NODE(n->handle)) |
477 | i = TC_U32_NODE(n->handle); | 476 | i = TC_U32_NODE(n->handle); |
478 | i++; | 477 | i++; |
479 | 478 | ||
480 | return handle|(i>0xFFF ? 0xFFF : i); | 479 | return handle | (i > 0xFFF ? 0xFFF : i); |
481 | } | 480 | } |
482 | 481 | ||
483 | static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { | 482 | static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { |
@@ -566,7 +565,8 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, | |||
566 | if (err < 0) | 565 | if (err < 0) |
567 | return err; | 566 | return err; |
568 | 567 | ||
569 | if ((n = (struct tc_u_knode*)*arg) != NULL) { | 568 | n = (struct tc_u_knode *)*arg; |
569 | if (n) { | ||
570 | if (TC_U32_KEY(n->handle) == 0) | 570 | if (TC_U32_KEY(n->handle) == 0) |
571 | return -EINVAL; | 571 | return -EINVAL; |
572 | 572 | ||
@@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, | |||
574 | } | 574 | } |
575 | 575 | ||
576 | if (tb[TCA_U32_DIVISOR]) { | 576 | if (tb[TCA_U32_DIVISOR]) { |
577 | unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); | 577 | unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); |
578 | 578 | ||
579 | if (--divisor > 0x100) | 579 | if (--divisor > 0x100) |
580 | return -EINVAL; | 580 | return -EINVAL; |
@@ -585,7 +585,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, | |||
585 | if (handle == 0) | 585 | if (handle == 0) |
586 | return -ENOMEM; | 586 | return -ENOMEM; |
587 | } | 587 | } |
588 | ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); | 588 | ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL); |
589 | if (ht == NULL) | 589 | if (ht == NULL) |
590 | return -ENOBUFS; | 590 | return -ENOBUFS; |
591 | ht->tp_c = tp_c; | 591 | ht->tp_c = tp_c; |
@@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) | |||
683 | struct tc_u_common *tp_c = tp->data; | 683 | struct tc_u_common *tp_c = tp->data; |
684 | struct tc_u_hnode *ht; | 684 | struct tc_u_hnode *ht; |
685 | struct tc_u_knode *n; | 685 | struct tc_u_knode *n; |
686 | unsigned h; | 686 | unsigned int h; |
687 | 687 | ||
688 | if (arg->stop) | 688 | if (arg->stop) |
689 | return; | 689 | return; |
@@ -717,7 +717,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) | |||
717 | static int u32_dump(struct tcf_proto *tp, unsigned long fh, | 717 | static int u32_dump(struct tcf_proto *tp, unsigned long fh, |
718 | struct sk_buff *skb, struct tcmsg *t) | 718 | struct sk_buff *skb, struct tcmsg *t) |
719 | { | 719 | { |
720 | struct tc_u_knode *n = (struct tc_u_knode*)fh; | 720 | struct tc_u_knode *n = (struct tc_u_knode *)fh; |
721 | struct nlattr *nest; | 721 | struct nlattr *nest; |
722 | 722 | ||
723 | if (n == NULL) | 723 | if (n == NULL) |
@@ -730,8 +730,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, | |||
730 | goto nla_put_failure; | 730 | goto nla_put_failure; |
731 | 731 | ||
732 | if (TC_U32_KEY(n->handle) == 0) { | 732 | if (TC_U32_KEY(n->handle) == 0) { |
733 | struct tc_u_hnode *ht = (struct tc_u_hnode*)fh; | 733 | struct tc_u_hnode *ht = (struct tc_u_hnode *)fh; |
734 | u32 divisor = ht->divisor+1; | 734 | u32 divisor = ht->divisor + 1; |
735 | |||
735 | NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); | 736 | NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); |
736 | } else { | 737 | } else { |
737 | NLA_PUT(skb, TCA_U32_SEL, | 738 | NLA_PUT(skb, TCA_U32_SEL, |
@@ -755,7 +756,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, | |||
755 | goto nla_put_failure; | 756 | goto nla_put_failure; |
756 | 757 | ||
757 | #ifdef CONFIG_NET_CLS_IND | 758 | #ifdef CONFIG_NET_CLS_IND |
758 | if(strlen(n->indev)) | 759 | if (strlen(n->indev)) |
759 | NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev); | 760 | NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev); |
760 | #endif | 761 | #endif |
761 | #ifdef CONFIG_CLS_U32_PERF | 762 | #ifdef CONFIG_CLS_U32_PERF |
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c index bc450397487a..1c8360a2752a 100644 --- a/net/sched/em_cmp.c +++ b/net/sched/em_cmp.c | |||
@@ -33,40 +33,41 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em, | |||
33 | return 0; | 33 | return 0; |
34 | 34 | ||
35 | switch (cmp->align) { | 35 | switch (cmp->align) { |
36 | case TCF_EM_ALIGN_U8: | 36 | case TCF_EM_ALIGN_U8: |
37 | val = *ptr; | 37 | val = *ptr; |
38 | break; | 38 | break; |
39 | 39 | ||
40 | case TCF_EM_ALIGN_U16: | 40 | case TCF_EM_ALIGN_U16: |
41 | val = get_unaligned_be16(ptr); | 41 | val = get_unaligned_be16(ptr); |
42 | 42 | ||
43 | if (cmp_needs_transformation(cmp)) | 43 | if (cmp_needs_transformation(cmp)) |
44 | val = be16_to_cpu(val); | 44 | val = be16_to_cpu(val); |
45 | break; | 45 | break; |
46 | 46 | ||
47 | case TCF_EM_ALIGN_U32: | 47 | case TCF_EM_ALIGN_U32: |
48 | /* Worth checking boundries? The branching seems | 48 | /* Worth checking boundries? The branching seems |
49 | * to get worse. Visit again. */ | 49 | * to get worse. Visit again. |
50 | val = get_unaligned_be32(ptr); | 50 | */ |
51 | val = get_unaligned_be32(ptr); | ||
51 | 52 | ||
52 | if (cmp_needs_transformation(cmp)) | 53 | if (cmp_needs_transformation(cmp)) |
53 | val = be32_to_cpu(val); | 54 | val = be32_to_cpu(val); |
54 | break; | 55 | break; |
55 | 56 | ||
56 | default: | 57 | default: |
57 | return 0; | 58 | return 0; |
58 | } | 59 | } |
59 | 60 | ||
60 | if (cmp->mask) | 61 | if (cmp->mask) |
61 | val &= cmp->mask; | 62 | val &= cmp->mask; |
62 | 63 | ||
63 | switch (cmp->opnd) { | 64 | switch (cmp->opnd) { |
64 | case TCF_EM_OPND_EQ: | 65 | case TCF_EM_OPND_EQ: |
65 | return val == cmp->val; | 66 | return val == cmp->val; |
66 | case TCF_EM_OPND_LT: | 67 | case TCF_EM_OPND_LT: |
67 | return val < cmp->val; | 68 | return val < cmp->val; |
68 | case TCF_EM_OPND_GT: | 69 | case TCF_EM_OPND_GT: |
69 | return val > cmp->val; | 70 | return val > cmp->val; |
70 | } | 71 | } |
71 | 72 | ||
72 | return 0; | 73 | return 0; |
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 34da5e29ea1a..7af1f65fe678 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c | |||
@@ -73,21 +73,18 @@ | |||
73 | #include <net/pkt_cls.h> | 73 | #include <net/pkt_cls.h> |
74 | #include <net/sock.h> | 74 | #include <net/sock.h> |
75 | 75 | ||
76 | struct meta_obj | 76 | struct meta_obj { |
77 | { | ||
78 | unsigned long value; | 77 | unsigned long value; |
79 | unsigned int len; | 78 | unsigned int len; |
80 | }; | 79 | }; |
81 | 80 | ||
82 | struct meta_value | 81 | struct meta_value { |
83 | { | ||
84 | struct tcf_meta_val hdr; | 82 | struct tcf_meta_val hdr; |
85 | unsigned long val; | 83 | unsigned long val; |
86 | unsigned int len; | 84 | unsigned int len; |
87 | }; | 85 | }; |
88 | 86 | ||
89 | struct meta_match | 87 | struct meta_match { |
90 | { | ||
91 | struct meta_value lvalue; | 88 | struct meta_value lvalue; |
92 | struct meta_value rvalue; | 89 | struct meta_value rvalue; |
93 | }; | 90 | }; |
@@ -483,8 +480,7 @@ META_COLLECTOR(int_sk_write_pend) | |||
483 | * Meta value collectors assignment table | 480 | * Meta value collectors assignment table |
484 | **************************************************************************/ | 481 | **************************************************************************/ |
485 | 482 | ||
486 | struct meta_ops | 483 | struct meta_ops { |
487 | { | ||
488 | void (*get)(struct sk_buff *, struct tcf_pkt_info *, | 484 | void (*get)(struct sk_buff *, struct tcf_pkt_info *, |
489 | struct meta_value *, struct meta_obj *, int *); | 485 | struct meta_value *, struct meta_obj *, int *); |
490 | }; | 486 | }; |
@@ -494,7 +490,7 @@ struct meta_ops | |||
494 | 490 | ||
495 | /* Meta value operations table listing all meta value collectors and | 491 | /* Meta value operations table listing all meta value collectors and |
496 | * assigns them to a type and meta id. */ | 492 | * assigns them to a type and meta id. */ |
497 | static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { | 493 | static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = { |
498 | [TCF_META_TYPE_VAR] = { | 494 | [TCF_META_TYPE_VAR] = { |
499 | [META_ID(DEV)] = META_FUNC(var_dev), | 495 | [META_ID(DEV)] = META_FUNC(var_dev), |
500 | [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if), | 496 | [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if), |
@@ -550,7 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { | |||
550 | } | 546 | } |
551 | }; | 547 | }; |
552 | 548 | ||
553 | static inline struct meta_ops * meta_ops(struct meta_value *val) | 549 | static inline struct meta_ops *meta_ops(struct meta_value *val) |
554 | { | 550 | { |
555 | return &__meta_ops[meta_type(val)][meta_id(val)]; | 551 | return &__meta_ops[meta_type(val)][meta_id(val)]; |
556 | } | 552 | } |
@@ -649,9 +645,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) | |||
649 | { | 645 | { |
650 | if (v->len == sizeof(unsigned long)) | 646 | if (v->len == sizeof(unsigned long)) |
651 | NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val); | 647 | NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val); |
652 | else if (v->len == sizeof(u32)) { | 648 | else if (v->len == sizeof(u32)) |
653 | NLA_PUT_U32(skb, tlv, v->val); | 649 | NLA_PUT_U32(skb, tlv, v->val); |
654 | } | ||
655 | 650 | ||
656 | return 0; | 651 | return 0; |
657 | 652 | ||
@@ -663,8 +658,7 @@ nla_put_failure: | |||
663 | * Type specific operations table | 658 | * Type specific operations table |
664 | **************************************************************************/ | 659 | **************************************************************************/ |
665 | 660 | ||
666 | struct meta_type_ops | 661 | struct meta_type_ops { |
667 | { | ||
668 | void (*destroy)(struct meta_value *); | 662 | void (*destroy)(struct meta_value *); |
669 | int (*compare)(struct meta_obj *, struct meta_obj *); | 663 | int (*compare)(struct meta_obj *, struct meta_obj *); |
670 | int (*change)(struct meta_value *, struct nlattr *); | 664 | int (*change)(struct meta_value *, struct nlattr *); |
@@ -672,7 +666,7 @@ struct meta_type_ops | |||
672 | int (*dump)(struct sk_buff *, struct meta_value *, int); | 666 | int (*dump)(struct sk_buff *, struct meta_value *, int); |
673 | }; | 667 | }; |
674 | 668 | ||
675 | static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = { | 669 | static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { |
676 | [TCF_META_TYPE_VAR] = { | 670 | [TCF_META_TYPE_VAR] = { |
677 | .destroy = meta_var_destroy, | 671 | .destroy = meta_var_destroy, |
678 | .compare = meta_var_compare, | 672 | .compare = meta_var_compare, |
@@ -688,7 +682,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = { | |||
688 | } | 682 | } |
689 | }; | 683 | }; |
690 | 684 | ||
691 | static inline struct meta_type_ops * meta_type_ops(struct meta_value *v) | 685 | static inline struct meta_type_ops *meta_type_ops(struct meta_value *v) |
692 | { | 686 | { |
693 | return &__meta_type_ops[meta_type(v)]; | 687 | return &__meta_type_ops[meta_type(v)]; |
694 | } | 688 | } |
@@ -713,7 +707,7 @@ static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info, | |||
713 | return err; | 707 | return err; |
714 | 708 | ||
715 | if (meta_type_ops(v)->apply_extras) | 709 | if (meta_type_ops(v)->apply_extras) |
716 | meta_type_ops(v)->apply_extras(v, dst); | 710 | meta_type_ops(v)->apply_extras(v, dst); |
717 | 711 | ||
718 | return 0; | 712 | return 0; |
719 | } | 713 | } |
@@ -732,12 +726,12 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m, | |||
732 | r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value); | 726 | r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value); |
733 | 727 | ||
734 | switch (meta->lvalue.hdr.op) { | 728 | switch (meta->lvalue.hdr.op) { |
735 | case TCF_EM_OPND_EQ: | 729 | case TCF_EM_OPND_EQ: |
736 | return !r; | 730 | return !r; |
737 | case TCF_EM_OPND_LT: | 731 | case TCF_EM_OPND_LT: |
738 | return r < 0; | 732 | return r < 0; |
739 | case TCF_EM_OPND_GT: | 733 | case TCF_EM_OPND_GT: |
740 | return r > 0; | 734 | return r > 0; |
741 | } | 735 | } |
742 | 736 | ||
743 | return 0; | 737 | return 0; |
@@ -771,7 +765,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla) | |||
771 | 765 | ||
772 | static inline int meta_is_supported(struct meta_value *val) | 766 | static inline int meta_is_supported(struct meta_value *val) |
773 | { | 767 | { |
774 | return (!meta_id(val) || meta_ops(val)->get); | 768 | return !meta_id(val) || meta_ops(val)->get; |
775 | } | 769 | } |
776 | 770 | ||
777 | static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = { | 771 | static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = { |
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c index 1a4176aee6e5..a3bed07a008b 100644 --- a/net/sched/em_nbyte.c +++ b/net/sched/em_nbyte.c | |||
@@ -18,8 +18,7 @@ | |||
18 | #include <linux/tc_ematch/tc_em_nbyte.h> | 18 | #include <linux/tc_ematch/tc_em_nbyte.h> |
19 | #include <net/pkt_cls.h> | 19 | #include <net/pkt_cls.h> |
20 | 20 | ||
21 | struct nbyte_data | 21 | struct nbyte_data { |
22 | { | ||
23 | struct tcf_em_nbyte hdr; | 22 | struct tcf_em_nbyte hdr; |
24 | char pattern[0]; | 23 | char pattern[0]; |
25 | }; | 24 | }; |
diff --git a/net/sched/em_text.c b/net/sched/em_text.c index ea8f566e720c..15d353d2e4be 100644 --- a/net/sched/em_text.c +++ b/net/sched/em_text.c | |||
@@ -19,8 +19,7 @@ | |||
19 | #include <linux/tc_ematch/tc_em_text.h> | 19 | #include <linux/tc_ematch/tc_em_text.h> |
20 | #include <net/pkt_cls.h> | 20 | #include <net/pkt_cls.h> |
21 | 21 | ||
22 | struct text_match | 22 | struct text_match { |
23 | { | ||
24 | u16 from_offset; | 23 | u16 from_offset; |
25 | u16 to_offset; | 24 | u16 to_offset; |
26 | u8 from_layer; | 25 | u8 from_layer; |
diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c index 953f1479f7da..797bdb88c010 100644 --- a/net/sched/em_u32.c +++ b/net/sched/em_u32.c | |||
@@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em, | |||
35 | if (!tcf_valid_offset(skb, ptr, sizeof(u32))) | 35 | if (!tcf_valid_offset(skb, ptr, sizeof(u32))) |
36 | return 0; | 36 | return 0; |
37 | 37 | ||
38 | return !(((*(__be32*) ptr) ^ key->val) & key->mask); | 38 | return !(((*(__be32 *) ptr) ^ key->val) & key->mask); |
39 | } | 39 | } |
40 | 40 | ||
41 | static struct tcf_ematch_ops em_u32_ops = { | 41 | static struct tcf_ematch_ops em_u32_ops = { |
diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 5e37da961f80..88d93eb92507 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c | |||
@@ -93,7 +93,7 @@ | |||
93 | static LIST_HEAD(ematch_ops); | 93 | static LIST_HEAD(ematch_ops); |
94 | static DEFINE_RWLOCK(ematch_mod_lock); | 94 | static DEFINE_RWLOCK(ematch_mod_lock); |
95 | 95 | ||
96 | static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind) | 96 | static struct tcf_ematch_ops *tcf_em_lookup(u16 kind) |
97 | { | 97 | { |
98 | struct tcf_ematch_ops *e = NULL; | 98 | struct tcf_ematch_ops *e = NULL; |
99 | 99 | ||
@@ -163,8 +163,8 @@ void tcf_em_unregister(struct tcf_ematch_ops *ops) | |||
163 | } | 163 | } |
164 | EXPORT_SYMBOL(tcf_em_unregister); | 164 | EXPORT_SYMBOL(tcf_em_unregister); |
165 | 165 | ||
166 | static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree, | 166 | static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree, |
167 | int index) | 167 | int index) |
168 | { | 168 | { |
169 | return &tree->matches[index]; | 169 | return &tree->matches[index]; |
170 | } | 170 | } |
@@ -184,7 +184,8 @@ static int tcf_em_validate(struct tcf_proto *tp, | |||
184 | 184 | ||
185 | if (em_hdr->kind == TCF_EM_CONTAINER) { | 185 | if (em_hdr->kind == TCF_EM_CONTAINER) { |
186 | /* Special ematch called "container", carries an index | 186 | /* Special ematch called "container", carries an index |
187 | * referencing an external ematch sequence. */ | 187 | * referencing an external ematch sequence. |
188 | */ | ||
188 | u32 ref; | 189 | u32 ref; |
189 | 190 | ||
190 | if (data_len < sizeof(ref)) | 191 | if (data_len < sizeof(ref)) |
@@ -195,7 +196,8 @@ static int tcf_em_validate(struct tcf_proto *tp, | |||
195 | goto errout; | 196 | goto errout; |
196 | 197 | ||
197 | /* We do not allow backward jumps to avoid loops and jumps | 198 | /* We do not allow backward jumps to avoid loops and jumps |
198 | * to our own position are of course illegal. */ | 199 | * to our own position are of course illegal. |
200 | */ | ||
199 | if (ref <= idx) | 201 | if (ref <= idx) |
200 | goto errout; | 202 | goto errout; |
201 | 203 | ||
@@ -208,7 +210,8 @@ static int tcf_em_validate(struct tcf_proto *tp, | |||
208 | * which automatically releases the reference again, therefore | 210 | * which automatically releases the reference again, therefore |
209 | * the module MUST not be given back under any circumstances | 211 | * the module MUST not be given back under any circumstances |
210 | * here. Be aware, the destroy function assumes that the | 212 | * here. Be aware, the destroy function assumes that the |
211 | * module is held if the ops field is non zero. */ | 213 | * module is held if the ops field is non zero. |
214 | */ | ||
212 | em->ops = tcf_em_lookup(em_hdr->kind); | 215 | em->ops = tcf_em_lookup(em_hdr->kind); |
213 | 216 | ||
214 | if (em->ops == NULL) { | 217 | if (em->ops == NULL) { |
@@ -221,7 +224,8 @@ static int tcf_em_validate(struct tcf_proto *tp, | |||
221 | if (em->ops) { | 224 | if (em->ops) { |
222 | /* We dropped the RTNL mutex in order to | 225 | /* We dropped the RTNL mutex in order to |
223 | * perform the module load. Tell the caller | 226 | * perform the module load. Tell the caller |
224 | * to replay the request. */ | 227 | * to replay the request. |
228 | */ | ||
225 | module_put(em->ops->owner); | 229 | module_put(em->ops->owner); |
226 | err = -EAGAIN; | 230 | err = -EAGAIN; |
227 | } | 231 | } |
@@ -230,7 +234,8 @@ static int tcf_em_validate(struct tcf_proto *tp, | |||
230 | } | 234 | } |
231 | 235 | ||
232 | /* ematch module provides expected length of data, so we | 236 | /* ematch module provides expected length of data, so we |
233 | * can do a basic sanity check. */ | 237 | * can do a basic sanity check. |
238 | */ | ||
234 | if (em->ops->datalen && data_len < em->ops->datalen) | 239 | if (em->ops->datalen && data_len < em->ops->datalen) |
235 | goto errout; | 240 | goto errout; |
236 | 241 | ||
@@ -246,7 +251,8 @@ static int tcf_em_validate(struct tcf_proto *tp, | |||
246 | * TCF_EM_SIMPLE may be specified stating that the | 251 | * TCF_EM_SIMPLE may be specified stating that the |
247 | * data only consists of a u32 integer and the module | 252 | * data only consists of a u32 integer and the module |
248 | * does not expected a memory reference but rather | 253 | * does not expected a memory reference but rather |
249 | * the value carried. */ | 254 | * the value carried. |
255 | */ | ||
250 | if (em_hdr->flags & TCF_EM_SIMPLE) { | 256 | if (em_hdr->flags & TCF_EM_SIMPLE) { |
251 | if (data_len < sizeof(u32)) | 257 | if (data_len < sizeof(u32)) |
252 | goto errout; | 258 | goto errout; |
@@ -334,7 +340,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, | |||
334 | * The array of rt attributes is parsed in the order as they are | 340 | * The array of rt attributes is parsed in the order as they are |
335 | * provided, their type must be incremental from 1 to n. Even | 341 | * provided, their type must be incremental from 1 to n. Even |
336 | * if it does not serve any real purpose, a failure of sticking | 342 | * if it does not serve any real purpose, a failure of sticking |
337 | * to this policy will result in parsing failure. */ | 343 | * to this policy will result in parsing failure. |
344 | */ | ||
338 | for (idx = 0; nla_ok(rt_match, list_len); idx++) { | 345 | for (idx = 0; nla_ok(rt_match, list_len); idx++) { |
339 | err = -EINVAL; | 346 | err = -EINVAL; |
340 | 347 | ||
@@ -359,7 +366,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, | |||
359 | /* Check if the number of matches provided by userspace actually | 366 | /* Check if the number of matches provided by userspace actually |
360 | * complies with the array of matches. The number was used for | 367 | * complies with the array of matches. The number was used for |
361 | * the validation of references and a mismatch could lead to | 368 | * the validation of references and a mismatch could lead to |
362 | * undefined references during the matching process. */ | 369 | * undefined references during the matching process. |
370 | */ | ||
363 | if (idx != tree_hdr->nmatches) { | 371 | if (idx != tree_hdr->nmatches) { |
364 | err = -EINVAL; | 372 | err = -EINVAL; |
365 | goto errout_abort; | 373 | goto errout_abort; |
@@ -449,7 +457,7 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) | |||
449 | .flags = em->flags | 457 | .flags = em->flags |
450 | }; | 458 | }; |
451 | 459 | ||
452 | NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr); | 460 | NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr); |
453 | 461 | ||
454 | if (em->ops && em->ops->dump) { | 462 | if (em->ops && em->ops->dump) { |
455 | if (em->ops->dump(skb, em) < 0) | 463 | if (em->ops->dump(skb, em) < 0) |
@@ -478,6 +486,7 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em, | |||
478 | struct tcf_pkt_info *info) | 486 | struct tcf_pkt_info *info) |
479 | { | 487 | { |
480 | int r = em->ops->match(skb, em, info); | 488 | int r = em->ops->match(skb, em, info); |
489 | |||
481 | return tcf_em_is_inverted(em) ? !r : r; | 490 | return tcf_em_is_inverted(em) ? !r : r; |
482 | } | 491 | } |
483 | 492 | ||
@@ -527,8 +536,8 @@ pop_stack: | |||
527 | 536 | ||
528 | stack_overflow: | 537 | stack_overflow: |
529 | if (net_ratelimit()) | 538 | if (net_ratelimit()) |
530 | printk(KERN_WARNING "tc ematch: local stack overflow," | 539 | pr_warning("tc ematch: local stack overflow," |
531 | " increase NET_EMATCH_STACK\n"); | 540 | " increase NET_EMATCH_STACK\n"); |
532 | return -1; | 541 | return -1; |
533 | } | 542 | } |
534 | EXPORT_SYMBOL(__tcf_em_tree_match); | 543 | EXPORT_SYMBOL(__tcf_em_tree_match); |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index b22ca2d1cebc..36ac0ec81ce0 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -187,7 +187,7 @@ int unregister_qdisc(struct Qdisc_ops *qops) | |||
187 | int err = -ENOENT; | 187 | int err = -ENOENT; |
188 | 188 | ||
189 | write_lock(&qdisc_mod_lock); | 189 | write_lock(&qdisc_mod_lock); |
190 | for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next) | 190 | for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) |
191 | if (q == qops) | 191 | if (q == qops) |
192 | break; | 192 | break; |
193 | if (q) { | 193 | if (q) { |
@@ -321,7 +321,9 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab) | |||
321 | if (!tab || --tab->refcnt) | 321 | if (!tab || --tab->refcnt) |
322 | return; | 322 | return; |
323 | 323 | ||
324 | for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) { | 324 | for (rtabp = &qdisc_rtab_list; |
325 | (rtab = *rtabp) != NULL; | ||
326 | rtabp = &rtab->next) { | ||
325 | if (rtab == tab) { | 327 | if (rtab == tab) { |
326 | *rtabp = rtab->next; | 328 | *rtabp = rtab->next; |
327 | kfree(rtab); | 329 | kfree(rtab); |
@@ -459,9 +461,8 @@ EXPORT_SYMBOL(qdisc_calculate_pkt_len); | |||
459 | void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) | 461 | void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) |
460 | { | 462 | { |
461 | if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { | 463 | if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { |
462 | printk(KERN_WARNING | 464 | pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", |
463 | "%s: %s qdisc %X: is non-work-conserving?\n", | 465 | txt, qdisc->ops->id, qdisc->handle >> 16); |
464 | txt, qdisc->ops->id, qdisc->handle >> 16); | ||
465 | qdisc->flags |= TCQ_F_WARN_NONWC; | 466 | qdisc->flags |= TCQ_F_WARN_NONWC; |
466 | } | 467 | } |
467 | } | 468 | } |
@@ -625,7 +626,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev) | |||
625 | autohandle = TC_H_MAKE(0x80000000U, 0); | 626 | autohandle = TC_H_MAKE(0x80000000U, 0); |
626 | } while (qdisc_lookup(dev, autohandle) && --i > 0); | 627 | } while (qdisc_lookup(dev, autohandle) && --i > 0); |
627 | 628 | ||
628 | return i>0 ? autohandle : 0; | 629 | return i > 0 ? autohandle : 0; |
629 | } | 630 | } |
630 | 631 | ||
631 | void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | 632 | void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) |
@@ -915,9 +916,8 @@ out: | |||
915 | return 0; | 916 | return 0; |
916 | } | 917 | } |
917 | 918 | ||
918 | struct check_loop_arg | 919 | struct check_loop_arg { |
919 | { | 920 | struct qdisc_walker w; |
920 | struct qdisc_walker w; | ||
921 | struct Qdisc *p; | 921 | struct Qdisc *p; |
922 | int depth; | 922 | int depth; |
923 | }; | 923 | }; |
@@ -970,7 +970,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
970 | struct Qdisc *p = NULL; | 970 | struct Qdisc *p = NULL; |
971 | int err; | 971 | int err; |
972 | 972 | ||
973 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) | 973 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
974 | if (!dev) | ||
974 | return -ENODEV; | 975 | return -ENODEV; |
975 | 976 | ||
976 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 977 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -980,12 +981,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
980 | if (clid) { | 981 | if (clid) { |
981 | if (clid != TC_H_ROOT) { | 982 | if (clid != TC_H_ROOT) { |
982 | if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { | 983 | if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { |
983 | if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) | 984 | p = qdisc_lookup(dev, TC_H_MAJ(clid)); |
985 | if (!p) | ||
984 | return -ENOENT; | 986 | return -ENOENT; |
985 | q = qdisc_leaf(p, clid); | 987 | q = qdisc_leaf(p, clid); |
986 | } else { /* ingress */ | 988 | } else if (dev_ingress_queue(dev)) { |
987 | if (dev_ingress_queue(dev)) | 989 | q = dev_ingress_queue(dev)->qdisc_sleeping; |
988 | q = dev_ingress_queue(dev)->qdisc_sleeping; | ||
989 | } | 990 | } |
990 | } else { | 991 | } else { |
991 | q = dev->qdisc; | 992 | q = dev->qdisc; |
@@ -996,7 +997,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
996 | if (tcm->tcm_handle && q->handle != tcm->tcm_handle) | 997 | if (tcm->tcm_handle && q->handle != tcm->tcm_handle) |
997 | return -EINVAL; | 998 | return -EINVAL; |
998 | } else { | 999 | } else { |
999 | if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) | 1000 | q = qdisc_lookup(dev, tcm->tcm_handle); |
1001 | if (!q) | ||
1000 | return -ENOENT; | 1002 | return -ENOENT; |
1001 | } | 1003 | } |
1002 | 1004 | ||
@@ -1008,7 +1010,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1008 | return -EINVAL; | 1010 | return -EINVAL; |
1009 | if (q->handle == 0) | 1011 | if (q->handle == 0) |
1010 | return -ENOENT; | 1012 | return -ENOENT; |
1011 | if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) | 1013 | err = qdisc_graft(dev, p, skb, n, clid, NULL, q); |
1014 | if (err != 0) | ||
1012 | return err; | 1015 | return err; |
1013 | } else { | 1016 | } else { |
1014 | qdisc_notify(net, skb, n, clid, NULL, q); | 1017 | qdisc_notify(net, skb, n, clid, NULL, q); |
@@ -1017,7 +1020,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1017 | } | 1020 | } |
1018 | 1021 | ||
1019 | /* | 1022 | /* |
1020 | Create/change qdisc. | 1023 | * Create/change qdisc. |
1021 | */ | 1024 | */ |
1022 | 1025 | ||
1023 | static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | 1026 | static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) |
@@ -1036,7 +1039,8 @@ replay: | |||
1036 | clid = tcm->tcm_parent; | 1039 | clid = tcm->tcm_parent; |
1037 | q = p = NULL; | 1040 | q = p = NULL; |
1038 | 1041 | ||
1039 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) | 1042 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
1043 | if (!dev) | ||
1040 | return -ENODEV; | 1044 | return -ENODEV; |
1041 | 1045 | ||
1042 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 1046 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -1046,12 +1050,12 @@ replay: | |||
1046 | if (clid) { | 1050 | if (clid) { |
1047 | if (clid != TC_H_ROOT) { | 1051 | if (clid != TC_H_ROOT) { |
1048 | if (clid != TC_H_INGRESS) { | 1052 | if (clid != TC_H_INGRESS) { |
1049 | if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) | 1053 | p = qdisc_lookup(dev, TC_H_MAJ(clid)); |
1054 | if (!p) | ||
1050 | return -ENOENT; | 1055 | return -ENOENT; |
1051 | q = qdisc_leaf(p, clid); | 1056 | q = qdisc_leaf(p, clid); |
1052 | } else { /* ingress */ | 1057 | } else if (dev_ingress_queue_create(dev)) { |
1053 | if (dev_ingress_queue_create(dev)) | 1058 | q = dev_ingress_queue(dev)->qdisc_sleeping; |
1054 | q = dev_ingress_queue(dev)->qdisc_sleeping; | ||
1055 | } | 1059 | } |
1056 | } else { | 1060 | } else { |
1057 | q = dev->qdisc; | 1061 | q = dev->qdisc; |
@@ -1063,13 +1067,14 @@ replay: | |||
1063 | 1067 | ||
1064 | if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { | 1068 | if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { |
1065 | if (tcm->tcm_handle) { | 1069 | if (tcm->tcm_handle) { |
1066 | if (q && !(n->nlmsg_flags&NLM_F_REPLACE)) | 1070 | if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) |
1067 | return -EEXIST; | 1071 | return -EEXIST; |
1068 | if (TC_H_MIN(tcm->tcm_handle)) | 1072 | if (TC_H_MIN(tcm->tcm_handle)) |
1069 | return -EINVAL; | 1073 | return -EINVAL; |
1070 | if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) | 1074 | q = qdisc_lookup(dev, tcm->tcm_handle); |
1075 | if (!q) | ||
1071 | goto create_n_graft; | 1076 | goto create_n_graft; |
1072 | if (n->nlmsg_flags&NLM_F_EXCL) | 1077 | if (n->nlmsg_flags & NLM_F_EXCL) |
1073 | return -EEXIST; | 1078 | return -EEXIST; |
1074 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) | 1079 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) |
1075 | return -EINVAL; | 1080 | return -EINVAL; |
@@ -1079,7 +1084,7 @@ replay: | |||
1079 | atomic_inc(&q->refcnt); | 1084 | atomic_inc(&q->refcnt); |
1080 | goto graft; | 1085 | goto graft; |
1081 | } else { | 1086 | } else { |
1082 | if (q == NULL) | 1087 | if (!q) |
1083 | goto create_n_graft; | 1088 | goto create_n_graft; |
1084 | 1089 | ||
1085 | /* This magic test requires explanation. | 1090 | /* This magic test requires explanation. |
@@ -1101,9 +1106,9 @@ replay: | |||
1101 | * For now we select create/graft, if | 1106 | * For now we select create/graft, if |
1102 | * user gave KIND, which does not match existing. | 1107 | * user gave KIND, which does not match existing. |
1103 | */ | 1108 | */ |
1104 | if ((n->nlmsg_flags&NLM_F_CREATE) && | 1109 | if ((n->nlmsg_flags & NLM_F_CREATE) && |
1105 | (n->nlmsg_flags&NLM_F_REPLACE) && | 1110 | (n->nlmsg_flags & NLM_F_REPLACE) && |
1106 | ((n->nlmsg_flags&NLM_F_EXCL) || | 1111 | ((n->nlmsg_flags & NLM_F_EXCL) || |
1107 | (tca[TCA_KIND] && | 1112 | (tca[TCA_KIND] && |
1108 | nla_strcmp(tca[TCA_KIND], q->ops->id)))) | 1113 | nla_strcmp(tca[TCA_KIND], q->ops->id)))) |
1109 | goto create_n_graft; | 1114 | goto create_n_graft; |
@@ -1118,7 +1123,7 @@ replay: | |||
1118 | /* Change qdisc parameters */ | 1123 | /* Change qdisc parameters */ |
1119 | if (q == NULL) | 1124 | if (q == NULL) |
1120 | return -ENOENT; | 1125 | return -ENOENT; |
1121 | if (n->nlmsg_flags&NLM_F_EXCL) | 1126 | if (n->nlmsg_flags & NLM_F_EXCL) |
1122 | return -EEXIST; | 1127 | return -EEXIST; |
1123 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) | 1128 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) |
1124 | return -EINVAL; | 1129 | return -EINVAL; |
@@ -1128,7 +1133,7 @@ replay: | |||
1128 | return err; | 1133 | return err; |
1129 | 1134 | ||
1130 | create_n_graft: | 1135 | create_n_graft: |
1131 | if (!(n->nlmsg_flags&NLM_F_CREATE)) | 1136 | if (!(n->nlmsg_flags & NLM_F_CREATE)) |
1132 | return -ENOENT; | 1137 | return -ENOENT; |
1133 | if (clid == TC_H_INGRESS) { | 1138 | if (clid == TC_H_INGRESS) { |
1134 | if (dev_ingress_queue(dev)) | 1139 | if (dev_ingress_queue(dev)) |
@@ -1234,16 +1239,19 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb, | |||
1234 | return -ENOBUFS; | 1239 | return -ENOBUFS; |
1235 | 1240 | ||
1236 | if (old && !tc_qdisc_dump_ignore(old)) { | 1241 | if (old && !tc_qdisc_dump_ignore(old)) { |
1237 | if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) | 1242 | if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, |
1243 | 0, RTM_DELQDISC) < 0) | ||
1238 | goto err_out; | 1244 | goto err_out; |
1239 | } | 1245 | } |
1240 | if (new && !tc_qdisc_dump_ignore(new)) { | 1246 | if (new && !tc_qdisc_dump_ignore(new)) { |
1241 | if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) | 1247 | if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, |
1248 | old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) | ||
1242 | goto err_out; | 1249 | goto err_out; |
1243 | } | 1250 | } |
1244 | 1251 | ||
1245 | if (skb->len) | 1252 | if (skb->len) |
1246 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 1253 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
1254 | n->nlmsg_flags & NLM_F_ECHO); | ||
1247 | 1255 | ||
1248 | err_out: | 1256 | err_out: |
1249 | kfree_skb(skb); | 1257 | kfree_skb(skb); |
@@ -1275,7 +1283,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, | |||
1275 | q_idx++; | 1283 | q_idx++; |
1276 | continue; | 1284 | continue; |
1277 | } | 1285 | } |
1278 | if (!tc_qdisc_dump_ignore(q) && | 1286 | if (!tc_qdisc_dump_ignore(q) && |
1279 | tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, | 1287 | tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, |
1280 | cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) | 1288 | cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) |
1281 | goto done; | 1289 | goto done; |
@@ -1356,7 +1364,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1356 | u32 qid = TC_H_MAJ(clid); | 1364 | u32 qid = TC_H_MAJ(clid); |
1357 | int err; | 1365 | int err; |
1358 | 1366 | ||
1359 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) | 1367 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
1368 | if (!dev) | ||
1360 | return -ENODEV; | 1369 | return -ENODEV; |
1361 | 1370 | ||
1362 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 1371 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -1391,9 +1400,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1391 | qid = dev->qdisc->handle; | 1400 | qid = dev->qdisc->handle; |
1392 | 1401 | ||
1393 | /* Now qid is genuine qdisc handle consistent | 1402 | /* Now qid is genuine qdisc handle consistent |
1394 | both with parent and child. | 1403 | * both with parent and child. |
1395 | 1404 | * | |
1396 | TC_H_MAJ(pid) still may be unspecified, complete it now. | 1405 | * TC_H_MAJ(pid) still may be unspecified, complete it now. |
1397 | */ | 1406 | */ |
1398 | if (pid) | 1407 | if (pid) |
1399 | pid = TC_H_MAKE(qid, pid); | 1408 | pid = TC_H_MAKE(qid, pid); |
@@ -1403,7 +1412,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1403 | } | 1412 | } |
1404 | 1413 | ||
1405 | /* OK. Locate qdisc */ | 1414 | /* OK. Locate qdisc */ |
1406 | if ((q = qdisc_lookup(dev, qid)) == NULL) | 1415 | q = qdisc_lookup(dev, qid); |
1416 | if (!q) | ||
1407 | return -ENOENT; | 1417 | return -ENOENT; |
1408 | 1418 | ||
1409 | /* An check that it supports classes */ | 1419 | /* An check that it supports classes */ |
@@ -1423,13 +1433,14 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1423 | 1433 | ||
1424 | if (cl == 0) { | 1434 | if (cl == 0) { |
1425 | err = -ENOENT; | 1435 | err = -ENOENT; |
1426 | if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE)) | 1436 | if (n->nlmsg_type != RTM_NEWTCLASS || |
1437 | !(n->nlmsg_flags & NLM_F_CREATE)) | ||
1427 | goto out; | 1438 | goto out; |
1428 | } else { | 1439 | } else { |
1429 | switch (n->nlmsg_type) { | 1440 | switch (n->nlmsg_type) { |
1430 | case RTM_NEWTCLASS: | 1441 | case RTM_NEWTCLASS: |
1431 | err = -EEXIST; | 1442 | err = -EEXIST; |
1432 | if (n->nlmsg_flags&NLM_F_EXCL) | 1443 | if (n->nlmsg_flags & NLM_F_EXCL) |
1433 | goto out; | 1444 | goto out; |
1434 | break; | 1445 | break; |
1435 | case RTM_DELTCLASS: | 1446 | case RTM_DELTCLASS: |
@@ -1521,14 +1532,14 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb, | |||
1521 | return -EINVAL; | 1532 | return -EINVAL; |
1522 | } | 1533 | } |
1523 | 1534 | ||
1524 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 1535 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
1536 | n->nlmsg_flags & NLM_F_ECHO); | ||
1525 | } | 1537 | } |
1526 | 1538 | ||
1527 | struct qdisc_dump_args | 1539 | struct qdisc_dump_args { |
1528 | { | 1540 | struct qdisc_walker w; |
1529 | struct qdisc_walker w; | 1541 | struct sk_buff *skb; |
1530 | struct sk_buff *skb; | 1542 | struct netlink_callback *cb; |
1531 | struct netlink_callback *cb; | ||
1532 | }; | 1543 | }; |
1533 | 1544 | ||
1534 | static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg) | 1545 | static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg) |
@@ -1590,7 +1601,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, | |||
1590 | 1601 | ||
1591 | static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | 1602 | static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) |
1592 | { | 1603 | { |
1593 | struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh); | 1604 | struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh); |
1594 | struct net *net = sock_net(skb->sk); | 1605 | struct net *net = sock_net(skb->sk); |
1595 | struct netdev_queue *dev_queue; | 1606 | struct netdev_queue *dev_queue; |
1596 | struct net_device *dev; | 1607 | struct net_device *dev; |
@@ -1598,7 +1609,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1598 | 1609 | ||
1599 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 1610 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
1600 | return 0; | 1611 | return 0; |
1601 | if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) | 1612 | dev = dev_get_by_index(net, tcm->tcm_ifindex); |
1613 | if (!dev) | ||
1602 | return 0; | 1614 | return 0; |
1603 | 1615 | ||
1604 | s_t = cb->args[0]; | 1616 | s_t = cb->args[0]; |
@@ -1621,19 +1633,22 @@ done: | |||
1621 | } | 1633 | } |
1622 | 1634 | ||
1623 | /* Main classifier routine: scans classifier chain attached | 1635 | /* Main classifier routine: scans classifier chain attached |
1624 | to this qdisc, (optionally) tests for protocol and asks | 1636 | * to this qdisc, (optionally) tests for protocol and asks |
1625 | specific classifiers. | 1637 | * specific classifiers. |
1626 | */ | 1638 | */ |
1627 | int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, | 1639 | int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, |
1628 | struct tcf_result *res) | 1640 | struct tcf_result *res) |
1629 | { | 1641 | { |
1630 | __be16 protocol = skb->protocol; | 1642 | __be16 protocol = skb->protocol; |
1631 | int err = 0; | 1643 | int err; |
1632 | 1644 | ||
1633 | for (; tp; tp = tp->next) { | 1645 | for (; tp; tp = tp->next) { |
1634 | if ((tp->protocol == protocol || | 1646 | if (tp->protocol != protocol && |
1635 | tp->protocol == htons(ETH_P_ALL)) && | 1647 | tp->protocol != htons(ETH_P_ALL)) |
1636 | (err = tp->classify(skb, tp, res)) >= 0) { | 1648 | continue; |
1649 | err = tp->classify(skb, tp, res); | ||
1650 | |||
1651 | if (err >= 0) { | ||
1637 | #ifdef CONFIG_NET_CLS_ACT | 1652 | #ifdef CONFIG_NET_CLS_ACT |
1638 | if (err != TC_ACT_RECLASSIFY && skb->tc_verd) | 1653 | if (err != TC_ACT_RECLASSIFY && skb->tc_verd) |
1639 | skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0); | 1654 | skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0); |
@@ -1664,11 +1679,11 @@ reclassify: | |||
1664 | 1679 | ||
1665 | if (verd++ >= MAX_REC_LOOP) { | 1680 | if (verd++ >= MAX_REC_LOOP) { |
1666 | if (net_ratelimit()) | 1681 | if (net_ratelimit()) |
1667 | printk(KERN_NOTICE | 1682 | pr_notice("%s: packet reclassify loop" |
1668 | "%s: packet reclassify loop" | ||
1669 | " rule prio %u protocol %02x\n", | 1683 | " rule prio %u protocol %02x\n", |
1670 | tp->q->ops->id, | 1684 | tp->q->ops->id, |
1671 | tp->prio & 0xffff, ntohs(tp->protocol)); | 1685 | tp->prio & 0xffff, |
1686 | ntohs(tp->protocol)); | ||
1672 | return TC_ACT_SHOT; | 1687 | return TC_ACT_SHOT; |
1673 | } | 1688 | } |
1674 | skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); | 1689 | skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); |
@@ -1761,7 +1776,7 @@ static int __init pktsched_init(void) | |||
1761 | 1776 | ||
1762 | err = register_pernet_subsys(&psched_net_ops); | 1777 | err = register_pernet_subsys(&psched_net_ops); |
1763 | if (err) { | 1778 | if (err) { |
1764 | printk(KERN_ERR "pktsched_init: " | 1779 | pr_err("pktsched_init: " |
1765 | "cannot initialize per netns operations\n"); | 1780 | "cannot initialize per netns operations\n"); |
1766 | return err; | 1781 | return err; |
1767 | } | 1782 | } |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 943d733409d0..3f08158b8688 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg) | |||
319 | * creation), and one for the reference held when calling delete. | 319 | * creation), and one for the reference held when calling delete. |
320 | */ | 320 | */ |
321 | if (flow->ref < 2) { | 321 | if (flow->ref < 2) { |
322 | printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref); | 322 | pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref); |
323 | return -EINVAL; | 323 | return -EINVAL; |
324 | } | 324 | } |
325 | if (flow->ref > 2) | 325 | if (flow->ref > 2) |
@@ -384,12 +384,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
384 | } | 384 | } |
385 | } | 385 | } |
386 | flow = NULL; | 386 | flow = NULL; |
387 | done: | 387 | done: |
388 | ; | 388 | ; |
389 | } | 389 | } |
390 | if (!flow) | 390 | if (!flow) { |
391 | flow = &p->link; | 391 | flow = &p->link; |
392 | else { | 392 | } else { |
393 | if (flow->vcc) | 393 | if (flow->vcc) |
394 | ATM_SKB(skb)->atm_options = flow->vcc->atm_options; | 394 | ATM_SKB(skb)->atm_options = flow->vcc->atm_options; |
395 | /*@@@ looks good ... but it's not supposed to work :-) */ | 395 | /*@@@ looks good ... but it's not supposed to work :-) */ |
@@ -576,8 +576,7 @@ static void atm_tc_destroy(struct Qdisc *sch) | |||
576 | 576 | ||
577 | list_for_each_entry_safe(flow, tmp, &p->flows, list) { | 577 | list_for_each_entry_safe(flow, tmp, &p->flows, list) { |
578 | if (flow->ref > 1) | 578 | if (flow->ref > 1) |
579 | printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow, | 579 | pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref); |
580 | flow->ref); | ||
581 | atm_tc_put(sch, (unsigned long)flow); | 580 | atm_tc_put(sch, (unsigned long)flow); |
582 | } | 581 | } |
583 | tasklet_kill(&p->task); | 582 | tasklet_kill(&p->task); |
@@ -616,9 +615,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, | |||
616 | } | 615 | } |
617 | if (flow->excess) | 616 | if (flow->excess) |
618 | NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid); | 617 | NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid); |
619 | else { | 618 | else |
620 | NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0); | 619 | NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0); |
621 | } | ||
622 | 620 | ||
623 | nla_nest_end(skb, nest); | 621 | nla_nest_end(skb, nest); |
624 | return skb->len; | 622 | return skb->len; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index c80d1c210c5d..4aaf44c95c52 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -72,8 +72,7 @@ | |||
72 | struct cbq_sched_data; | 72 | struct cbq_sched_data; |
73 | 73 | ||
74 | 74 | ||
75 | struct cbq_class | 75 | struct cbq_class { |
76 | { | ||
77 | struct Qdisc_class_common common; | 76 | struct Qdisc_class_common common; |
78 | struct cbq_class *next_alive; /* next class with backlog in this priority band */ | 77 | struct cbq_class *next_alive; /* next class with backlog in this priority band */ |
79 | 78 | ||
@@ -139,19 +138,18 @@ struct cbq_class | |||
139 | int refcnt; | 138 | int refcnt; |
140 | int filters; | 139 | int filters; |
141 | 140 | ||
142 | struct cbq_class *defaults[TC_PRIO_MAX+1]; | 141 | struct cbq_class *defaults[TC_PRIO_MAX + 1]; |
143 | }; | 142 | }; |
144 | 143 | ||
145 | struct cbq_sched_data | 144 | struct cbq_sched_data { |
146 | { | ||
147 | struct Qdisc_class_hash clhash; /* Hash table of all classes */ | 145 | struct Qdisc_class_hash clhash; /* Hash table of all classes */ |
148 | int nclasses[TC_CBQ_MAXPRIO+1]; | 146 | int nclasses[TC_CBQ_MAXPRIO + 1]; |
149 | unsigned quanta[TC_CBQ_MAXPRIO+1]; | 147 | unsigned int quanta[TC_CBQ_MAXPRIO + 1]; |
150 | 148 | ||
151 | struct cbq_class link; | 149 | struct cbq_class link; |
152 | 150 | ||
153 | unsigned activemask; | 151 | unsigned int activemask; |
154 | struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes | 152 | struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes |
155 | with backlog */ | 153 | with backlog */ |
156 | 154 | ||
157 | #ifdef CONFIG_NET_CLS_ACT | 155 | #ifdef CONFIG_NET_CLS_ACT |
@@ -162,7 +160,7 @@ struct cbq_sched_data | |||
162 | int tx_len; | 160 | int tx_len; |
163 | psched_time_t now; /* Cached timestamp */ | 161 | psched_time_t now; /* Cached timestamp */ |
164 | psched_time_t now_rt; /* Cached real time */ | 162 | psched_time_t now_rt; /* Cached real time */ |
165 | unsigned pmask; | 163 | unsigned int pmask; |
166 | 164 | ||
167 | struct hrtimer delay_timer; | 165 | struct hrtimer delay_timer; |
168 | struct qdisc_watchdog watchdog; /* Watchdog timer, | 166 | struct qdisc_watchdog watchdog; /* Watchdog timer, |
@@ -175,9 +173,9 @@ struct cbq_sched_data | |||
175 | }; | 173 | }; |
176 | 174 | ||
177 | 175 | ||
178 | #define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) | 176 | #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) |
179 | 177 | ||
180 | static __inline__ struct cbq_class * | 178 | static inline struct cbq_class * |
181 | cbq_class_lookup(struct cbq_sched_data *q, u32 classid) | 179 | cbq_class_lookup(struct cbq_sched_data *q, u32 classid) |
182 | { | 180 | { |
183 | struct Qdisc_class_common *clc; | 181 | struct Qdisc_class_common *clc; |
@@ -193,25 +191,27 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid) | |||
193 | static struct cbq_class * | 191 | static struct cbq_class * |
194 | cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) | 192 | cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) |
195 | { | 193 | { |
196 | struct cbq_class *cl, *new; | 194 | struct cbq_class *cl; |
197 | 195 | ||
198 | for (cl = this->tparent; cl; cl = cl->tparent) | 196 | for (cl = this->tparent; cl; cl = cl->tparent) { |
199 | if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this) | 197 | struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; |
200 | return new; | ||
201 | 198 | ||
199 | if (new != NULL && new != this) | ||
200 | return new; | ||
201 | } | ||
202 | return NULL; | 202 | return NULL; |
203 | } | 203 | } |
204 | 204 | ||
205 | #endif | 205 | #endif |
206 | 206 | ||
207 | /* Classify packet. The procedure is pretty complicated, but | 207 | /* Classify packet. The procedure is pretty complicated, but |
208 | it allows us to combine link sharing and priority scheduling | 208 | * it allows us to combine link sharing and priority scheduling |
209 | transparently. | 209 | * transparently. |
210 | 210 | * | |
211 | Namely, you can put link sharing rules (f.e. route based) at root of CBQ, | 211 | * Namely, you can put link sharing rules (f.e. route based) at root of CBQ, |
212 | so that it resolves to split nodes. Then packets are classified | 212 | * so that it resolves to split nodes. Then packets are classified |
213 | by logical priority, or a more specific classifier may be attached | 213 | * by logical priority, or a more specific classifier may be attached |
214 | to the split node. | 214 | * to the split node. |
215 | */ | 215 | */ |
216 | 216 | ||
217 | static struct cbq_class * | 217 | static struct cbq_class * |
@@ -227,7 +227,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
227 | /* | 227 | /* |
228 | * Step 1. If skb->priority points to one of our classes, use it. | 228 | * Step 1. If skb->priority points to one of our classes, use it. |
229 | */ | 229 | */ |
230 | if (TC_H_MAJ(prio^sch->handle) == 0 && | 230 | if (TC_H_MAJ(prio ^ sch->handle) == 0 && |
231 | (cl = cbq_class_lookup(q, prio)) != NULL) | 231 | (cl = cbq_class_lookup(q, prio)) != NULL) |
232 | return cl; | 232 | return cl; |
233 | 233 | ||
@@ -243,10 +243,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
243 | (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) | 243 | (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) |
244 | goto fallback; | 244 | goto fallback; |
245 | 245 | ||
246 | if ((cl = (void*)res.class) == NULL) { | 246 | cl = (void *)res.class; |
247 | if (!cl) { | ||
247 | if (TC_H_MAJ(res.classid)) | 248 | if (TC_H_MAJ(res.classid)) |
248 | cl = cbq_class_lookup(q, res.classid); | 249 | cl = cbq_class_lookup(q, res.classid); |
249 | else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL) | 250 | else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) |
250 | cl = defmap[TC_PRIO_BESTEFFORT]; | 251 | cl = defmap[TC_PRIO_BESTEFFORT]; |
251 | 252 | ||
252 | if (cl == NULL || cl->level >= head->level) | 253 | if (cl == NULL || cl->level >= head->level) |
@@ -282,7 +283,7 @@ fallback: | |||
282 | * Step 4. No success... | 283 | * Step 4. No success... |
283 | */ | 284 | */ |
284 | if (TC_H_MAJ(prio) == 0 && | 285 | if (TC_H_MAJ(prio) == 0 && |
285 | !(cl = head->defaults[prio&TC_PRIO_MAX]) && | 286 | !(cl = head->defaults[prio & TC_PRIO_MAX]) && |
286 | !(cl = head->defaults[TC_PRIO_BESTEFFORT])) | 287 | !(cl = head->defaults[TC_PRIO_BESTEFFORT])) |
287 | return head; | 288 | return head; |
288 | 289 | ||
@@ -290,12 +291,12 @@ fallback: | |||
290 | } | 291 | } |
291 | 292 | ||
292 | /* | 293 | /* |
293 | A packet has just been enqueued on the empty class. | 294 | * A packet has just been enqueued on the empty class. |
294 | cbq_activate_class adds it to the tail of active class list | 295 | * cbq_activate_class adds it to the tail of active class list |
295 | of its priority band. | 296 | * of its priority band. |
296 | */ | 297 | */ |
297 | 298 | ||
298 | static __inline__ void cbq_activate_class(struct cbq_class *cl) | 299 | static inline void cbq_activate_class(struct cbq_class *cl) |
299 | { | 300 | { |
300 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 301 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
301 | int prio = cl->cpriority; | 302 | int prio = cl->cpriority; |
@@ -314,9 +315,9 @@ static __inline__ void cbq_activate_class(struct cbq_class *cl) | |||
314 | } | 315 | } |
315 | 316 | ||
316 | /* | 317 | /* |
317 | Unlink class from active chain. | 318 | * Unlink class from active chain. |
318 | Note that this same procedure is done directly in cbq_dequeue* | 319 | * Note that this same procedure is done directly in cbq_dequeue* |
319 | during round-robin procedure. | 320 | * during round-robin procedure. |
320 | */ | 321 | */ |
321 | 322 | ||
322 | static void cbq_deactivate_class(struct cbq_class *this) | 323 | static void cbq_deactivate_class(struct cbq_class *this) |
@@ -350,7 +351,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) | |||
350 | { | 351 | { |
351 | int toplevel = q->toplevel; | 352 | int toplevel = q->toplevel; |
352 | 353 | ||
353 | if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) { | 354 | if (toplevel > cl->level && !(cl->q->flags & TCQ_F_THROTTLED)) { |
354 | psched_time_t now; | 355 | psched_time_t now; |
355 | psched_tdiff_t incr; | 356 | psched_tdiff_t incr; |
356 | 357 | ||
@@ -363,7 +364,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) | |||
363 | q->toplevel = cl->level; | 364 | q->toplevel = cl->level; |
364 | return; | 365 | return; |
365 | } | 366 | } |
366 | } while ((cl=cl->borrow) != NULL && toplevel > cl->level); | 367 | } while ((cl = cl->borrow) != NULL && toplevel > cl->level); |
367 | } | 368 | } |
368 | } | 369 | } |
369 | 370 | ||
@@ -418,11 +419,11 @@ static void cbq_ovl_classic(struct cbq_class *cl) | |||
418 | delay += cl->offtime; | 419 | delay += cl->offtime; |
419 | 420 | ||
420 | /* | 421 | /* |
421 | Class goes to sleep, so that it will have no | 422 | * Class goes to sleep, so that it will have no |
422 | chance to work avgidle. Let's forgive it 8) | 423 | * chance to work avgidle. Let's forgive it 8) |
423 | 424 | * | |
424 | BTW cbq-2.0 has a crap in this | 425 | * BTW cbq-2.0 has a crap in this |
425 | place, apparently they forgot to shift it by cl->ewma_log. | 426 | * place, apparently they forgot to shift it by cl->ewma_log. |
426 | */ | 427 | */ |
427 | if (cl->avgidle < 0) | 428 | if (cl->avgidle < 0) |
428 | delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); | 429 | delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); |
@@ -439,8 +440,8 @@ static void cbq_ovl_classic(struct cbq_class *cl) | |||
439 | q->wd_expires = delay; | 440 | q->wd_expires = delay; |
440 | 441 | ||
441 | /* Dirty work! We must schedule wakeups based on | 442 | /* Dirty work! We must schedule wakeups based on |
442 | real available rate, rather than leaf rate, | 443 | * real available rate, rather than leaf rate, |
443 | which may be tiny (even zero). | 444 | * which may be tiny (even zero). |
444 | */ | 445 | */ |
445 | if (q->toplevel == TC_CBQ_MAXLEVEL) { | 446 | if (q->toplevel == TC_CBQ_MAXLEVEL) { |
446 | struct cbq_class *b; | 447 | struct cbq_class *b; |
@@ -460,7 +461,7 @@ static void cbq_ovl_classic(struct cbq_class *cl) | |||
460 | } | 461 | } |
461 | 462 | ||
462 | /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when | 463 | /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when |
463 | they go overlimit | 464 | * they go overlimit |
464 | */ | 465 | */ |
465 | 466 | ||
466 | static void cbq_ovl_rclassic(struct cbq_class *cl) | 467 | static void cbq_ovl_rclassic(struct cbq_class *cl) |
@@ -595,7 +596,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) | |||
595 | struct Qdisc *sch = q->watchdog.qdisc; | 596 | struct Qdisc *sch = q->watchdog.qdisc; |
596 | psched_time_t now; | 597 | psched_time_t now; |
597 | psched_tdiff_t delay = 0; | 598 | psched_tdiff_t delay = 0; |
598 | unsigned pmask; | 599 | unsigned int pmask; |
599 | 600 | ||
600 | now = psched_get_time(); | 601 | now = psched_get_time(); |
601 | 602 | ||
@@ -665,15 +666,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | |||
665 | #endif | 666 | #endif |
666 | 667 | ||
667 | /* | 668 | /* |
668 | It is mission critical procedure. | 669 | * It is mission critical procedure. |
669 | 670 | * | |
670 | We "regenerate" toplevel cutoff, if transmitting class | 671 | * We "regenerate" toplevel cutoff, if transmitting class |
671 | has backlog and it is not regulated. It is not part of | 672 | * has backlog and it is not regulated. It is not part of |
672 | original CBQ description, but looks more reasonable. | 673 | * original CBQ description, but looks more reasonable. |
673 | Probably, it is wrong. This question needs further investigation. | 674 | * Probably, it is wrong. This question needs further investigation. |
674 | */ | 675 | */ |
675 | 676 | ||
676 | static __inline__ void | 677 | static inline void |
677 | cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, | 678 | cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, |
678 | struct cbq_class *borrowed) | 679 | struct cbq_class *borrowed) |
679 | { | 680 | { |
@@ -684,7 +685,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, | |||
684 | q->toplevel = borrowed->level; | 685 | q->toplevel = borrowed->level; |
685 | return; | 686 | return; |
686 | } | 687 | } |
687 | } while ((borrowed=borrowed->borrow) != NULL); | 688 | } while ((borrowed = borrowed->borrow) != NULL); |
688 | } | 689 | } |
689 | #if 0 | 690 | #if 0 |
690 | /* It is not necessary now. Uncommenting it | 691 | /* It is not necessary now. Uncommenting it |
@@ -712,10 +713,10 @@ cbq_update(struct cbq_sched_data *q) | |||
712 | cl->bstats.bytes += len; | 713 | cl->bstats.bytes += len; |
713 | 714 | ||
714 | /* | 715 | /* |
715 | (now - last) is total time between packet right edges. | 716 | * (now - last) is total time between packet right edges. |
716 | (last_pktlen/rate) is "virtual" busy time, so that | 717 | * (last_pktlen/rate) is "virtual" busy time, so that |
717 | 718 | * | |
718 | idle = (now - last) - last_pktlen/rate | 719 | * idle = (now - last) - last_pktlen/rate |
719 | */ | 720 | */ |
720 | 721 | ||
721 | idle = q->now - cl->last; | 722 | idle = q->now - cl->last; |
@@ -725,9 +726,9 @@ cbq_update(struct cbq_sched_data *q) | |||
725 | idle -= L2T(cl, len); | 726 | idle -= L2T(cl, len); |
726 | 727 | ||
727 | /* true_avgidle := (1-W)*true_avgidle + W*idle, | 728 | /* true_avgidle := (1-W)*true_avgidle + W*idle, |
728 | where W=2^{-ewma_log}. But cl->avgidle is scaled: | 729 | * where W=2^{-ewma_log}. But cl->avgidle is scaled: |
729 | cl->avgidle == true_avgidle/W, | 730 | * cl->avgidle == true_avgidle/W, |
730 | hence: | 731 | * hence: |
731 | */ | 732 | */ |
732 | avgidle += idle - (avgidle>>cl->ewma_log); | 733 | avgidle += idle - (avgidle>>cl->ewma_log); |
733 | } | 734 | } |
@@ -741,22 +742,22 @@ cbq_update(struct cbq_sched_data *q) | |||
741 | cl->avgidle = avgidle; | 742 | cl->avgidle = avgidle; |
742 | 743 | ||
743 | /* Calculate expected time, when this class | 744 | /* Calculate expected time, when this class |
744 | will be allowed to send. | 745 | * will be allowed to send. |
745 | It will occur, when: | 746 | * It will occur, when: |
746 | (1-W)*true_avgidle + W*delay = 0, i.e. | 747 | * (1-W)*true_avgidle + W*delay = 0, i.e. |
747 | idle = (1/W - 1)*(-true_avgidle) | 748 | * idle = (1/W - 1)*(-true_avgidle) |
748 | or | 749 | * or |
749 | idle = (1 - W)*(-cl->avgidle); | 750 | * idle = (1 - W)*(-cl->avgidle); |
750 | */ | 751 | */ |
751 | idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); | 752 | idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); |
752 | 753 | ||
753 | /* | 754 | /* |
754 | That is not all. | 755 | * That is not all. |
755 | To maintain the rate allocated to the class, | 756 | * To maintain the rate allocated to the class, |
756 | we add to undertime virtual clock, | 757 | * we add to undertime virtual clock, |
757 | necessary to complete transmitted packet. | 758 | * necessary to complete transmitted packet. |
758 | (len/phys_bandwidth has been already passed | 759 | * (len/phys_bandwidth has been already passed |
759 | to the moment of cbq_update) | 760 | * to the moment of cbq_update) |
760 | */ | 761 | */ |
761 | 762 | ||
762 | idle -= L2T(&q->link, len); | 763 | idle -= L2T(&q->link, len); |
@@ -778,7 +779,7 @@ cbq_update(struct cbq_sched_data *q) | |||
778 | cbq_update_toplevel(q, this, q->tx_borrowed); | 779 | cbq_update_toplevel(q, this, q->tx_borrowed); |
779 | } | 780 | } |
780 | 781 | ||
781 | static __inline__ struct cbq_class * | 782 | static inline struct cbq_class * |
782 | cbq_under_limit(struct cbq_class *cl) | 783 | cbq_under_limit(struct cbq_class *cl) |
783 | { | 784 | { |
784 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 785 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
@@ -794,16 +795,17 @@ cbq_under_limit(struct cbq_class *cl) | |||
794 | 795 | ||
795 | do { | 796 | do { |
796 | /* It is very suspicious place. Now overlimit | 797 | /* It is very suspicious place. Now overlimit |
797 | action is generated for not bounded classes | 798 | * action is generated for not bounded classes |
798 | only if link is completely congested. | 799 | * only if link is completely congested. |
799 | Though it is in agree with ancestor-only paradigm, | 800 | * Though it is in agree with ancestor-only paradigm, |
800 | it looks very stupid. Particularly, | 801 | * it looks very stupid. Particularly, |
801 | it means that this chunk of code will either | 802 | * it means that this chunk of code will either |
802 | never be called or result in strong amplification | 803 | * never be called or result in strong amplification |
803 | of burstiness. Dangerous, silly, and, however, | 804 | * of burstiness. Dangerous, silly, and, however, |
804 | no another solution exists. | 805 | * no another solution exists. |
805 | */ | 806 | */ |
806 | if ((cl = cl->borrow) == NULL) { | 807 | cl = cl->borrow; |
808 | if (!cl) { | ||
807 | this_cl->qstats.overlimits++; | 809 | this_cl->qstats.overlimits++; |
808 | this_cl->overlimit(this_cl); | 810 | this_cl->overlimit(this_cl); |
809 | return NULL; | 811 | return NULL; |
@@ -816,7 +818,7 @@ cbq_under_limit(struct cbq_class *cl) | |||
816 | return cl; | 818 | return cl; |
817 | } | 819 | } |
818 | 820 | ||
819 | static __inline__ struct sk_buff * | 821 | static inline struct sk_buff * |
820 | cbq_dequeue_prio(struct Qdisc *sch, int prio) | 822 | cbq_dequeue_prio(struct Qdisc *sch, int prio) |
821 | { | 823 | { |
822 | struct cbq_sched_data *q = qdisc_priv(sch); | 824 | struct cbq_sched_data *q = qdisc_priv(sch); |
@@ -840,7 +842,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) | |||
840 | 842 | ||
841 | if (cl->deficit <= 0) { | 843 | if (cl->deficit <= 0) { |
842 | /* Class exhausted its allotment per | 844 | /* Class exhausted its allotment per |
843 | this round. Switch to the next one. | 845 | * this round. Switch to the next one. |
844 | */ | 846 | */ |
845 | deficit = 1; | 847 | deficit = 1; |
846 | cl->deficit += cl->quantum; | 848 | cl->deficit += cl->quantum; |
@@ -850,8 +852,8 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) | |||
850 | skb = cl->q->dequeue(cl->q); | 852 | skb = cl->q->dequeue(cl->q); |
851 | 853 | ||
852 | /* Class did not give us any skb :-( | 854 | /* Class did not give us any skb :-( |
853 | It could occur even if cl->q->q.qlen != 0 | 855 | * It could occur even if cl->q->q.qlen != 0 |
854 | f.e. if cl->q == "tbf" | 856 | * f.e. if cl->q == "tbf" |
855 | */ | 857 | */ |
856 | if (skb == NULL) | 858 | if (skb == NULL) |
857 | goto skip_class; | 859 | goto skip_class; |
@@ -880,7 +882,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) | |||
880 | skip_class: | 882 | skip_class: |
881 | if (cl->q->q.qlen == 0 || prio != cl->cpriority) { | 883 | if (cl->q->q.qlen == 0 || prio != cl->cpriority) { |
882 | /* Class is empty or penalized. | 884 | /* Class is empty or penalized. |
883 | Unlink it from active chain. | 885 | * Unlink it from active chain. |
884 | */ | 886 | */ |
885 | cl_prev->next_alive = cl->next_alive; | 887 | cl_prev->next_alive = cl->next_alive; |
886 | cl->next_alive = NULL; | 888 | cl->next_alive = NULL; |
@@ -919,14 +921,14 @@ next_class: | |||
919 | return NULL; | 921 | return NULL; |
920 | } | 922 | } |
921 | 923 | ||
922 | static __inline__ struct sk_buff * | 924 | static inline struct sk_buff * |
923 | cbq_dequeue_1(struct Qdisc *sch) | 925 | cbq_dequeue_1(struct Qdisc *sch) |
924 | { | 926 | { |
925 | struct cbq_sched_data *q = qdisc_priv(sch); | 927 | struct cbq_sched_data *q = qdisc_priv(sch); |
926 | struct sk_buff *skb; | 928 | struct sk_buff *skb; |
927 | unsigned activemask; | 929 | unsigned int activemask; |
928 | 930 | ||
929 | activemask = q->activemask&0xFF; | 931 | activemask = q->activemask & 0xFF; |
930 | while (activemask) { | 932 | while (activemask) { |
931 | int prio = ffz(~activemask); | 933 | int prio = ffz(~activemask); |
932 | activemask &= ~(1<<prio); | 934 | activemask &= ~(1<<prio); |
@@ -951,11 +953,11 @@ cbq_dequeue(struct Qdisc *sch) | |||
951 | if (q->tx_class) { | 953 | if (q->tx_class) { |
952 | psched_tdiff_t incr2; | 954 | psched_tdiff_t incr2; |
953 | /* Time integrator. We calculate EOS time | 955 | /* Time integrator. We calculate EOS time |
954 | by adding expected packet transmission time. | 956 | * by adding expected packet transmission time. |
955 | If real time is greater, we warp artificial clock, | 957 | * If real time is greater, we warp artificial clock, |
956 | so that: | 958 | * so that: |
957 | 959 | * | |
958 | cbq_time = max(real_time, work); | 960 | * cbq_time = max(real_time, work); |
959 | */ | 961 | */ |
960 | incr2 = L2T(&q->link, q->tx_len); | 962 | incr2 = L2T(&q->link, q->tx_len); |
961 | q->now += incr2; | 963 | q->now += incr2; |
@@ -977,22 +979,22 @@ cbq_dequeue(struct Qdisc *sch) | |||
977 | } | 979 | } |
978 | 980 | ||
979 | /* All the classes are overlimit. | 981 | /* All the classes are overlimit. |
980 | 982 | * | |
981 | It is possible, if: | 983 | * It is possible, if: |
982 | 984 | * | |
983 | 1. Scheduler is empty. | 985 | * 1. Scheduler is empty. |
984 | 2. Toplevel cutoff inhibited borrowing. | 986 | * 2. Toplevel cutoff inhibited borrowing. |
985 | 3. Root class is overlimit. | 987 | * 3. Root class is overlimit. |
986 | 988 | * | |
987 | Reset 2d and 3d conditions and retry. | 989 | * Reset 2d and 3d conditions and retry. |
988 | 990 | * | |
989 | Note, that NS and cbq-2.0 are buggy, peeking | 991 | * Note, that NS and cbq-2.0 are buggy, peeking |
990 | an arbitrary class is appropriate for ancestor-only | 992 | * an arbitrary class is appropriate for ancestor-only |
991 | sharing, but not for toplevel algorithm. | 993 | * sharing, but not for toplevel algorithm. |
992 | 994 | * | |
993 | Our version is better, but slower, because it requires | 995 | * Our version is better, but slower, because it requires |
994 | two passes, but it is unavoidable with top-level sharing. | 996 | * two passes, but it is unavoidable with top-level sharing. |
995 | */ | 997 | */ |
996 | 998 | ||
997 | if (q->toplevel == TC_CBQ_MAXLEVEL && | 999 | if (q->toplevel == TC_CBQ_MAXLEVEL && |
998 | q->link.undertime == PSCHED_PASTPERFECT) | 1000 | q->link.undertime == PSCHED_PASTPERFECT) |
@@ -1003,7 +1005,8 @@ cbq_dequeue(struct Qdisc *sch) | |||
1003 | } | 1005 | } |
1004 | 1006 | ||
1005 | /* No packets in scheduler or nobody wants to give them to us :-( | 1007 | /* No packets in scheduler or nobody wants to give them to us :-( |
1006 | Sigh... start watchdog timer in the last case. */ | 1008 | * Sigh... start watchdog timer in the last case. |
1009 | */ | ||
1007 | 1010 | ||
1008 | if (sch->q.qlen) { | 1011 | if (sch->q.qlen) { |
1009 | sch->qstats.overlimits++; | 1012 | sch->qstats.overlimits++; |
@@ -1025,13 +1028,14 @@ static void cbq_adjust_levels(struct cbq_class *this) | |||
1025 | int level = 0; | 1028 | int level = 0; |
1026 | struct cbq_class *cl; | 1029 | struct cbq_class *cl; |
1027 | 1030 | ||
1028 | if ((cl = this->children) != NULL) { | 1031 | cl = this->children; |
1032 | if (cl) { | ||
1029 | do { | 1033 | do { |
1030 | if (cl->level > level) | 1034 | if (cl->level > level) |
1031 | level = cl->level; | 1035 | level = cl->level; |
1032 | } while ((cl = cl->sibling) != this->children); | 1036 | } while ((cl = cl->sibling) != this->children); |
1033 | } | 1037 | } |
1034 | this->level = level+1; | 1038 | this->level = level + 1; |
1035 | } while ((this = this->tparent) != NULL); | 1039 | } while ((this = this->tparent) != NULL); |
1036 | } | 1040 | } |
1037 | 1041 | ||
@@ -1047,14 +1051,15 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) | |||
1047 | for (h = 0; h < q->clhash.hashsize; h++) { | 1051 | for (h = 0; h < q->clhash.hashsize; h++) { |
1048 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { | 1052 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { |
1049 | /* BUGGGG... Beware! This expression suffer of | 1053 | /* BUGGGG... Beware! This expression suffer of |
1050 | arithmetic overflows! | 1054 | * arithmetic overflows! |
1051 | */ | 1055 | */ |
1052 | if (cl->priority == prio) { | 1056 | if (cl->priority == prio) { |
1053 | cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ | 1057 | cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ |
1054 | q->quanta[prio]; | 1058 | q->quanta[prio]; |
1055 | } | 1059 | } |
1056 | if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { | 1060 | if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { |
1057 | printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum); | 1061 | pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n", |
1062 | cl->common.classid, cl->quantum); | ||
1058 | cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; | 1063 | cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; |
1059 | } | 1064 | } |
1060 | } | 1065 | } |
@@ -1065,18 +1070,18 @@ static void cbq_sync_defmap(struct cbq_class *cl) | |||
1065 | { | 1070 | { |
1066 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 1071 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
1067 | struct cbq_class *split = cl->split; | 1072 | struct cbq_class *split = cl->split; |
1068 | unsigned h; | 1073 | unsigned int h; |
1069 | int i; | 1074 | int i; |
1070 | 1075 | ||
1071 | if (split == NULL) | 1076 | if (split == NULL) |
1072 | return; | 1077 | return; |
1073 | 1078 | ||
1074 | for (i=0; i<=TC_PRIO_MAX; i++) { | 1079 | for (i = 0; i <= TC_PRIO_MAX; i++) { |
1075 | if (split->defaults[i] == cl && !(cl->defmap&(1<<i))) | 1080 | if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) |
1076 | split->defaults[i] = NULL; | 1081 | split->defaults[i] = NULL; |
1077 | } | 1082 | } |
1078 | 1083 | ||
1079 | for (i=0; i<=TC_PRIO_MAX; i++) { | 1084 | for (i = 0; i <= TC_PRIO_MAX; i++) { |
1080 | int level = split->level; | 1085 | int level = split->level; |
1081 | 1086 | ||
1082 | if (split->defaults[i]) | 1087 | if (split->defaults[i]) |
@@ -1089,7 +1094,7 @@ static void cbq_sync_defmap(struct cbq_class *cl) | |||
1089 | hlist_for_each_entry(c, n, &q->clhash.hash[h], | 1094 | hlist_for_each_entry(c, n, &q->clhash.hash[h], |
1090 | common.hnode) { | 1095 | common.hnode) { |
1091 | if (c->split == split && c->level < level && | 1096 | if (c->split == split && c->level < level && |
1092 | c->defmap&(1<<i)) { | 1097 | c->defmap & (1<<i)) { |
1093 | split->defaults[i] = c; | 1098 | split->defaults[i] = c; |
1094 | level = c->level; | 1099 | level = c->level; |
1095 | } | 1100 | } |
@@ -1103,7 +1108,8 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma | |||
1103 | struct cbq_class *split = NULL; | 1108 | struct cbq_class *split = NULL; |
1104 | 1109 | ||
1105 | if (splitid == 0) { | 1110 | if (splitid == 0) { |
1106 | if ((split = cl->split) == NULL) | 1111 | split = cl->split; |
1112 | if (!split) | ||
1107 | return; | 1113 | return; |
1108 | splitid = split->common.classid; | 1114 | splitid = split->common.classid; |
1109 | } | 1115 | } |
@@ -1121,9 +1127,9 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma | |||
1121 | cl->defmap = 0; | 1127 | cl->defmap = 0; |
1122 | cbq_sync_defmap(cl); | 1128 | cbq_sync_defmap(cl); |
1123 | cl->split = split; | 1129 | cl->split = split; |
1124 | cl->defmap = def&mask; | 1130 | cl->defmap = def & mask; |
1125 | } else | 1131 | } else |
1126 | cl->defmap = (cl->defmap&~mask)|(def&mask); | 1132 | cl->defmap = (cl->defmap & ~mask) | (def & mask); |
1127 | 1133 | ||
1128 | cbq_sync_defmap(cl); | 1134 | cbq_sync_defmap(cl); |
1129 | } | 1135 | } |
@@ -1136,7 +1142,7 @@ static void cbq_unlink_class(struct cbq_class *this) | |||
1136 | qdisc_class_hash_remove(&q->clhash, &this->common); | 1142 | qdisc_class_hash_remove(&q->clhash, &this->common); |
1137 | 1143 | ||
1138 | if (this->tparent) { | 1144 | if (this->tparent) { |
1139 | clp=&this->sibling; | 1145 | clp = &this->sibling; |
1140 | cl = *clp; | 1146 | cl = *clp; |
1141 | do { | 1147 | do { |
1142 | if (cl == this) { | 1148 | if (cl == this) { |
@@ -1175,7 +1181,7 @@ static void cbq_link_class(struct cbq_class *this) | |||
1175 | } | 1181 | } |
1176 | } | 1182 | } |
1177 | 1183 | ||
1178 | static unsigned int cbq_drop(struct Qdisc* sch) | 1184 | static unsigned int cbq_drop(struct Qdisc *sch) |
1179 | { | 1185 | { |
1180 | struct cbq_sched_data *q = qdisc_priv(sch); | 1186 | struct cbq_sched_data *q = qdisc_priv(sch); |
1181 | struct cbq_class *cl, *cl_head; | 1187 | struct cbq_class *cl, *cl_head; |
@@ -1183,7 +1189,8 @@ static unsigned int cbq_drop(struct Qdisc* sch) | |||
1183 | unsigned int len; | 1189 | unsigned int len; |
1184 | 1190 | ||
1185 | for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { | 1191 | for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { |
1186 | if ((cl_head = q->active[prio]) == NULL) | 1192 | cl_head = q->active[prio]; |
1193 | if (!cl_head) | ||
1187 | continue; | 1194 | continue; |
1188 | 1195 | ||
1189 | cl = cl_head; | 1196 | cl = cl_head; |
@@ -1200,13 +1207,13 @@ static unsigned int cbq_drop(struct Qdisc* sch) | |||
1200 | } | 1207 | } |
1201 | 1208 | ||
1202 | static void | 1209 | static void |
1203 | cbq_reset(struct Qdisc* sch) | 1210 | cbq_reset(struct Qdisc *sch) |
1204 | { | 1211 | { |
1205 | struct cbq_sched_data *q = qdisc_priv(sch); | 1212 | struct cbq_sched_data *q = qdisc_priv(sch); |
1206 | struct cbq_class *cl; | 1213 | struct cbq_class *cl; |
1207 | struct hlist_node *n; | 1214 | struct hlist_node *n; |
1208 | int prio; | 1215 | int prio; |
1209 | unsigned h; | 1216 | unsigned int h; |
1210 | 1217 | ||
1211 | q->activemask = 0; | 1218 | q->activemask = 0; |
1212 | q->pmask = 0; | 1219 | q->pmask = 0; |
@@ -1238,21 +1245,21 @@ cbq_reset(struct Qdisc* sch) | |||
1238 | 1245 | ||
1239 | static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) | 1246 | static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) |
1240 | { | 1247 | { |
1241 | if (lss->change&TCF_CBQ_LSS_FLAGS) { | 1248 | if (lss->change & TCF_CBQ_LSS_FLAGS) { |
1242 | cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; | 1249 | cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; |
1243 | cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; | 1250 | cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; |
1244 | } | 1251 | } |
1245 | if (lss->change&TCF_CBQ_LSS_EWMA) | 1252 | if (lss->change & TCF_CBQ_LSS_EWMA) |
1246 | cl->ewma_log = lss->ewma_log; | 1253 | cl->ewma_log = lss->ewma_log; |
1247 | if (lss->change&TCF_CBQ_LSS_AVPKT) | 1254 | if (lss->change & TCF_CBQ_LSS_AVPKT) |
1248 | cl->avpkt = lss->avpkt; | 1255 | cl->avpkt = lss->avpkt; |
1249 | if (lss->change&TCF_CBQ_LSS_MINIDLE) | 1256 | if (lss->change & TCF_CBQ_LSS_MINIDLE) |
1250 | cl->minidle = -(long)lss->minidle; | 1257 | cl->minidle = -(long)lss->minidle; |
1251 | if (lss->change&TCF_CBQ_LSS_MAXIDLE) { | 1258 | if (lss->change & TCF_CBQ_LSS_MAXIDLE) { |
1252 | cl->maxidle = lss->maxidle; | 1259 | cl->maxidle = lss->maxidle; |
1253 | cl->avgidle = lss->maxidle; | 1260 | cl->avgidle = lss->maxidle; |
1254 | } | 1261 | } |
1255 | if (lss->change&TCF_CBQ_LSS_OFFTIME) | 1262 | if (lss->change & TCF_CBQ_LSS_OFFTIME) |
1256 | cl->offtime = lss->offtime; | 1263 | cl->offtime = lss->offtime; |
1257 | return 0; | 1264 | return 0; |
1258 | } | 1265 | } |
@@ -1280,10 +1287,10 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr) | |||
1280 | if (wrr->weight) | 1287 | if (wrr->weight) |
1281 | cl->weight = wrr->weight; | 1288 | cl->weight = wrr->weight; |
1282 | if (wrr->priority) { | 1289 | if (wrr->priority) { |
1283 | cl->priority = wrr->priority-1; | 1290 | cl->priority = wrr->priority - 1; |
1284 | cl->cpriority = cl->priority; | 1291 | cl->cpriority = cl->priority; |
1285 | if (cl->priority >= cl->priority2) | 1292 | if (cl->priority >= cl->priority2) |
1286 | cl->priority2 = TC_CBQ_MAXPRIO-1; | 1293 | cl->priority2 = TC_CBQ_MAXPRIO - 1; |
1287 | } | 1294 | } |
1288 | 1295 | ||
1289 | cbq_addprio(q, cl); | 1296 | cbq_addprio(q, cl); |
@@ -1300,10 +1307,10 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) | |||
1300 | cl->overlimit = cbq_ovl_delay; | 1307 | cl->overlimit = cbq_ovl_delay; |
1301 | break; | 1308 | break; |
1302 | case TC_CBQ_OVL_LOWPRIO: | 1309 | case TC_CBQ_OVL_LOWPRIO: |
1303 | if (ovl->priority2-1 >= TC_CBQ_MAXPRIO || | 1310 | if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO || |
1304 | ovl->priority2-1 <= cl->priority) | 1311 | ovl->priority2 - 1 <= cl->priority) |
1305 | return -EINVAL; | 1312 | return -EINVAL; |
1306 | cl->priority2 = ovl->priority2-1; | 1313 | cl->priority2 = ovl->priority2 - 1; |
1307 | cl->overlimit = cbq_ovl_lowprio; | 1314 | cl->overlimit = cbq_ovl_lowprio; |
1308 | break; | 1315 | break; |
1309 | case TC_CBQ_OVL_DROP: | 1316 | case TC_CBQ_OVL_DROP: |
@@ -1382,9 +1389,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1382 | if (!q->link.q) | 1389 | if (!q->link.q) |
1383 | q->link.q = &noop_qdisc; | 1390 | q->link.q = &noop_qdisc; |
1384 | 1391 | ||
1385 | q->link.priority = TC_CBQ_MAXPRIO-1; | 1392 | q->link.priority = TC_CBQ_MAXPRIO - 1; |
1386 | q->link.priority2 = TC_CBQ_MAXPRIO-1; | 1393 | q->link.priority2 = TC_CBQ_MAXPRIO - 1; |
1387 | q->link.cpriority = TC_CBQ_MAXPRIO-1; | 1394 | q->link.cpriority = TC_CBQ_MAXPRIO - 1; |
1388 | q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; | 1395 | q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; |
1389 | q->link.overlimit = cbq_ovl_classic; | 1396 | q->link.overlimit = cbq_ovl_classic; |
1390 | q->link.allot = psched_mtu(qdisc_dev(sch)); | 1397 | q->link.allot = psched_mtu(qdisc_dev(sch)); |
@@ -1415,7 +1422,7 @@ put_rtab: | |||
1415 | return err; | 1422 | return err; |
1416 | } | 1423 | } |
1417 | 1424 | ||
1418 | static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) | 1425 | static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) |
1419 | { | 1426 | { |
1420 | unsigned char *b = skb_tail_pointer(skb); | 1427 | unsigned char *b = skb_tail_pointer(skb); |
1421 | 1428 | ||
@@ -1427,7 +1434,7 @@ nla_put_failure: | |||
1427 | return -1; | 1434 | return -1; |
1428 | } | 1435 | } |
1429 | 1436 | ||
1430 | static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) | 1437 | static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) |
1431 | { | 1438 | { |
1432 | unsigned char *b = skb_tail_pointer(skb); | 1439 | unsigned char *b = skb_tail_pointer(skb); |
1433 | struct tc_cbq_lssopt opt; | 1440 | struct tc_cbq_lssopt opt; |
@@ -1452,15 +1459,15 @@ nla_put_failure: | |||
1452 | return -1; | 1459 | return -1; |
1453 | } | 1460 | } |
1454 | 1461 | ||
1455 | static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) | 1462 | static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) |
1456 | { | 1463 | { |
1457 | unsigned char *b = skb_tail_pointer(skb); | 1464 | unsigned char *b = skb_tail_pointer(skb); |
1458 | struct tc_cbq_wrropt opt; | 1465 | struct tc_cbq_wrropt opt; |
1459 | 1466 | ||
1460 | opt.flags = 0; | 1467 | opt.flags = 0; |
1461 | opt.allot = cl->allot; | 1468 | opt.allot = cl->allot; |
1462 | opt.priority = cl->priority+1; | 1469 | opt.priority = cl->priority + 1; |
1463 | opt.cpriority = cl->cpriority+1; | 1470 | opt.cpriority = cl->cpriority + 1; |
1464 | opt.weight = cl->weight; | 1471 | opt.weight = cl->weight; |
1465 | NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); | 1472 | NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); |
1466 | return skb->len; | 1473 | return skb->len; |
@@ -1470,13 +1477,13 @@ nla_put_failure: | |||
1470 | return -1; | 1477 | return -1; |
1471 | } | 1478 | } |
1472 | 1479 | ||
1473 | static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) | 1480 | static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) |
1474 | { | 1481 | { |
1475 | unsigned char *b = skb_tail_pointer(skb); | 1482 | unsigned char *b = skb_tail_pointer(skb); |
1476 | struct tc_cbq_ovl opt; | 1483 | struct tc_cbq_ovl opt; |
1477 | 1484 | ||
1478 | opt.strategy = cl->ovl_strategy; | 1485 | opt.strategy = cl->ovl_strategy; |
1479 | opt.priority2 = cl->priority2+1; | 1486 | opt.priority2 = cl->priority2 + 1; |
1480 | opt.pad = 0; | 1487 | opt.pad = 0; |
1481 | opt.penalty = cl->penalty; | 1488 | opt.penalty = cl->penalty; |
1482 | NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); | 1489 | NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); |
@@ -1487,7 +1494,7 @@ nla_put_failure: | |||
1487 | return -1; | 1494 | return -1; |
1488 | } | 1495 | } |
1489 | 1496 | ||
1490 | static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) | 1497 | static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) |
1491 | { | 1498 | { |
1492 | unsigned char *b = skb_tail_pointer(skb); | 1499 | unsigned char *b = skb_tail_pointer(skb); |
1493 | struct tc_cbq_fopt opt; | 1500 | struct tc_cbq_fopt opt; |
@@ -1506,7 +1513,7 @@ nla_put_failure: | |||
1506 | } | 1513 | } |
1507 | 1514 | ||
1508 | #ifdef CONFIG_NET_CLS_ACT | 1515 | #ifdef CONFIG_NET_CLS_ACT |
1509 | static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) | 1516 | static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) |
1510 | { | 1517 | { |
1511 | unsigned char *b = skb_tail_pointer(skb); | 1518 | unsigned char *b = skb_tail_pointer(skb); |
1512 | struct tc_cbq_police opt; | 1519 | struct tc_cbq_police opt; |
@@ -1570,7 +1577,7 @@ static int | |||
1570 | cbq_dump_class(struct Qdisc *sch, unsigned long arg, | 1577 | cbq_dump_class(struct Qdisc *sch, unsigned long arg, |
1571 | struct sk_buff *skb, struct tcmsg *tcm) | 1578 | struct sk_buff *skb, struct tcmsg *tcm) |
1572 | { | 1579 | { |
1573 | struct cbq_class *cl = (struct cbq_class*)arg; | 1580 | struct cbq_class *cl = (struct cbq_class *)arg; |
1574 | struct nlattr *nest; | 1581 | struct nlattr *nest; |
1575 | 1582 | ||
1576 | if (cl->tparent) | 1583 | if (cl->tparent) |
@@ -1598,7 +1605,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1598 | struct gnet_dump *d) | 1605 | struct gnet_dump *d) |
1599 | { | 1606 | { |
1600 | struct cbq_sched_data *q = qdisc_priv(sch); | 1607 | struct cbq_sched_data *q = qdisc_priv(sch); |
1601 | struct cbq_class *cl = (struct cbq_class*)arg; | 1608 | struct cbq_class *cl = (struct cbq_class *)arg; |
1602 | 1609 | ||
1603 | cl->qstats.qlen = cl->q->q.qlen; | 1610 | cl->qstats.qlen = cl->q->q.qlen; |
1604 | cl->xstats.avgidle = cl->avgidle; | 1611 | cl->xstats.avgidle = cl->avgidle; |
@@ -1618,7 +1625,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1618 | static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | 1625 | static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, |
1619 | struct Qdisc **old) | 1626 | struct Qdisc **old) |
1620 | { | 1627 | { |
1621 | struct cbq_class *cl = (struct cbq_class*)arg; | 1628 | struct cbq_class *cl = (struct cbq_class *)arg; |
1622 | 1629 | ||
1623 | if (new == NULL) { | 1630 | if (new == NULL) { |
1624 | new = qdisc_create_dflt(sch->dev_queue, | 1631 | new = qdisc_create_dflt(sch->dev_queue, |
@@ -1641,10 +1648,9 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1641 | return 0; | 1648 | return 0; |
1642 | } | 1649 | } |
1643 | 1650 | ||
1644 | static struct Qdisc * | 1651 | static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg) |
1645 | cbq_leaf(struct Qdisc *sch, unsigned long arg) | ||
1646 | { | 1652 | { |
1647 | struct cbq_class *cl = (struct cbq_class*)arg; | 1653 | struct cbq_class *cl = (struct cbq_class *)arg; |
1648 | 1654 | ||
1649 | return cl->q; | 1655 | return cl->q; |
1650 | } | 1656 | } |
@@ -1683,13 +1689,12 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) | |||
1683 | kfree(cl); | 1689 | kfree(cl); |
1684 | } | 1690 | } |
1685 | 1691 | ||
1686 | static void | 1692 | static void cbq_destroy(struct Qdisc *sch) |
1687 | cbq_destroy(struct Qdisc* sch) | ||
1688 | { | 1693 | { |
1689 | struct cbq_sched_data *q = qdisc_priv(sch); | 1694 | struct cbq_sched_data *q = qdisc_priv(sch); |
1690 | struct hlist_node *n, *next; | 1695 | struct hlist_node *n, *next; |
1691 | struct cbq_class *cl; | 1696 | struct cbq_class *cl; |
1692 | unsigned h; | 1697 | unsigned int h; |
1693 | 1698 | ||
1694 | #ifdef CONFIG_NET_CLS_ACT | 1699 | #ifdef CONFIG_NET_CLS_ACT |
1695 | q->rx_class = NULL; | 1700 | q->rx_class = NULL; |
@@ -1713,7 +1718,7 @@ cbq_destroy(struct Qdisc* sch) | |||
1713 | 1718 | ||
1714 | static void cbq_put(struct Qdisc *sch, unsigned long arg) | 1719 | static void cbq_put(struct Qdisc *sch, unsigned long arg) |
1715 | { | 1720 | { |
1716 | struct cbq_class *cl = (struct cbq_class*)arg; | 1721 | struct cbq_class *cl = (struct cbq_class *)arg; |
1717 | 1722 | ||
1718 | if (--cl->refcnt == 0) { | 1723 | if (--cl->refcnt == 0) { |
1719 | #ifdef CONFIG_NET_CLS_ACT | 1724 | #ifdef CONFIG_NET_CLS_ACT |
@@ -1736,7 +1741,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1736 | { | 1741 | { |
1737 | int err; | 1742 | int err; |
1738 | struct cbq_sched_data *q = qdisc_priv(sch); | 1743 | struct cbq_sched_data *q = qdisc_priv(sch); |
1739 | struct cbq_class *cl = (struct cbq_class*)*arg; | 1744 | struct cbq_class *cl = (struct cbq_class *)*arg; |
1740 | struct nlattr *opt = tca[TCA_OPTIONS]; | 1745 | struct nlattr *opt = tca[TCA_OPTIONS]; |
1741 | struct nlattr *tb[TCA_CBQ_MAX + 1]; | 1746 | struct nlattr *tb[TCA_CBQ_MAX + 1]; |
1742 | struct cbq_class *parent; | 1747 | struct cbq_class *parent; |
@@ -1828,13 +1833,14 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1828 | 1833 | ||
1829 | if (classid) { | 1834 | if (classid) { |
1830 | err = -EINVAL; | 1835 | err = -EINVAL; |
1831 | if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid)) | 1836 | if (TC_H_MAJ(classid ^ sch->handle) || |
1837 | cbq_class_lookup(q, classid)) | ||
1832 | goto failure; | 1838 | goto failure; |
1833 | } else { | 1839 | } else { |
1834 | int i; | 1840 | int i; |
1835 | classid = TC_H_MAKE(sch->handle,0x8000); | 1841 | classid = TC_H_MAKE(sch->handle, 0x8000); |
1836 | 1842 | ||
1837 | for (i=0; i<0x8000; i++) { | 1843 | for (i = 0; i < 0x8000; i++) { |
1838 | if (++q->hgenerator >= 0x8000) | 1844 | if (++q->hgenerator >= 0x8000) |
1839 | q->hgenerator = 1; | 1845 | q->hgenerator = 1; |
1840 | if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) | 1846 | if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) |
@@ -1891,11 +1897,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1891 | cl->minidle = -0x7FFFFFFF; | 1897 | cl->minidle = -0x7FFFFFFF; |
1892 | cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); | 1898 | cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); |
1893 | cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); | 1899 | cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); |
1894 | if (cl->ewma_log==0) | 1900 | if (cl->ewma_log == 0) |
1895 | cl->ewma_log = q->link.ewma_log; | 1901 | cl->ewma_log = q->link.ewma_log; |
1896 | if (cl->maxidle==0) | 1902 | if (cl->maxidle == 0) |
1897 | cl->maxidle = q->link.maxidle; | 1903 | cl->maxidle = q->link.maxidle; |
1898 | if (cl->avpkt==0) | 1904 | if (cl->avpkt == 0) |
1899 | cl->avpkt = q->link.avpkt; | 1905 | cl->avpkt = q->link.avpkt; |
1900 | cl->overlimit = cbq_ovl_classic; | 1906 | cl->overlimit = cbq_ovl_classic; |
1901 | if (tb[TCA_CBQ_OVL_STRATEGY]) | 1907 | if (tb[TCA_CBQ_OVL_STRATEGY]) |
@@ -1921,7 +1927,7 @@ failure: | |||
1921 | static int cbq_delete(struct Qdisc *sch, unsigned long arg) | 1927 | static int cbq_delete(struct Qdisc *sch, unsigned long arg) |
1922 | { | 1928 | { |
1923 | struct cbq_sched_data *q = qdisc_priv(sch); | 1929 | struct cbq_sched_data *q = qdisc_priv(sch); |
1924 | struct cbq_class *cl = (struct cbq_class*)arg; | 1930 | struct cbq_class *cl = (struct cbq_class *)arg; |
1925 | unsigned int qlen; | 1931 | unsigned int qlen; |
1926 | 1932 | ||
1927 | if (cl->filters || cl->children || cl == &q->link) | 1933 | if (cl->filters || cl->children || cl == &q->link) |
@@ -1979,7 +1985,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, | |||
1979 | u32 classid) | 1985 | u32 classid) |
1980 | { | 1986 | { |
1981 | struct cbq_sched_data *q = qdisc_priv(sch); | 1987 | struct cbq_sched_data *q = qdisc_priv(sch); |
1982 | struct cbq_class *p = (struct cbq_class*)parent; | 1988 | struct cbq_class *p = (struct cbq_class *)parent; |
1983 | struct cbq_class *cl = cbq_class_lookup(q, classid); | 1989 | struct cbq_class *cl = cbq_class_lookup(q, classid); |
1984 | 1990 | ||
1985 | if (cl) { | 1991 | if (cl) { |
@@ -1993,7 +1999,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, | |||
1993 | 1999 | ||
1994 | static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) | 2000 | static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) |
1995 | { | 2001 | { |
1996 | struct cbq_class *cl = (struct cbq_class*)arg; | 2002 | struct cbq_class *cl = (struct cbq_class *)arg; |
1997 | 2003 | ||
1998 | cl->filters--; | 2004 | cl->filters--; |
1999 | } | 2005 | } |
@@ -2003,7 +2009,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
2003 | struct cbq_sched_data *q = qdisc_priv(sch); | 2009 | struct cbq_sched_data *q = qdisc_priv(sch); |
2004 | struct cbq_class *cl; | 2010 | struct cbq_class *cl; |
2005 | struct hlist_node *n; | 2011 | struct hlist_node *n; |
2006 | unsigned h; | 2012 | unsigned int h; |
2007 | 2013 | ||
2008 | if (arg->stop) | 2014 | if (arg->stop) |
2009 | return; | 2015 | return; |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 60f4bdd4408e..4970d56b4aa7 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -137,10 +137,10 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent, | |||
137 | mask = nla_get_u8(tb[TCA_DSMARK_MASK]); | 137 | mask = nla_get_u8(tb[TCA_DSMARK_MASK]); |
138 | 138 | ||
139 | if (tb[TCA_DSMARK_VALUE]) | 139 | if (tb[TCA_DSMARK_VALUE]) |
140 | p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]); | 140 | p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]); |
141 | 141 | ||
142 | if (tb[TCA_DSMARK_MASK]) | 142 | if (tb[TCA_DSMARK_MASK]) |
143 | p->mask[*arg-1] = mask; | 143 | p->mask[*arg - 1] = mask; |
144 | 144 | ||
145 | err = 0; | 145 | err = 0; |
146 | 146 | ||
@@ -155,8 +155,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg) | |||
155 | if (!dsmark_valid_index(p, arg)) | 155 | if (!dsmark_valid_index(p, arg)) |
156 | return -EINVAL; | 156 | return -EINVAL; |
157 | 157 | ||
158 | p->mask[arg-1] = 0xff; | 158 | p->mask[arg - 1] = 0xff; |
159 | p->value[arg-1] = 0; | 159 | p->value[arg - 1] = 0; |
160 | 160 | ||
161 | return 0; | 161 | return 0; |
162 | } | 162 | } |
@@ -175,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |||
175 | if (p->mask[i] == 0xff && !p->value[i]) | 175 | if (p->mask[i] == 0xff && !p->value[i]) |
176 | goto ignore; | 176 | goto ignore; |
177 | if (walker->count >= walker->skip) { | 177 | if (walker->count >= walker->skip) { |
178 | if (walker->fn(sch, i+1, walker) < 0) { | 178 | if (walker->fn(sch, i + 1, walker) < 0) { |
179 | walker->stop = 1; | 179 | walker->stop = 1; |
180 | break; | 180 | break; |
181 | } | 181 | } |
@@ -304,9 +304,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) | |||
304 | * and don't need yet another qdisc as a bypass. | 304 | * and don't need yet another qdisc as a bypass. |
305 | */ | 305 | */ |
306 | if (p->mask[index] != 0xff || p->value[index]) | 306 | if (p->mask[index] != 0xff || p->value[index]) |
307 | printk(KERN_WARNING | 307 | pr_warning("dsmark_dequeue: unsupported protocol %d\n", |
308 | "dsmark_dequeue: unsupported protocol %d\n", | 308 | ntohs(skb->protocol)); |
309 | ntohs(skb->protocol)); | ||
310 | break; | 309 | break; |
311 | } | 310 | } |
312 | 311 | ||
@@ -424,14 +423,14 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl, | |||
424 | if (!dsmark_valid_index(p, cl)) | 423 | if (!dsmark_valid_index(p, cl)) |
425 | return -EINVAL; | 424 | return -EINVAL; |
426 | 425 | ||
427 | tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1); | 426 | tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1); |
428 | tcm->tcm_info = p->q->handle; | 427 | tcm->tcm_info = p->q->handle; |
429 | 428 | ||
430 | opts = nla_nest_start(skb, TCA_OPTIONS); | 429 | opts = nla_nest_start(skb, TCA_OPTIONS); |
431 | if (opts == NULL) | 430 | if (opts == NULL) |
432 | goto nla_put_failure; | 431 | goto nla_put_failure; |
433 | NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]); | 432 | NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]); |
434 | NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]); | 433 | NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]); |
435 | 434 | ||
436 | return nla_nest_end(skb, opts); | 435 | return nla_nest_end(skb, opts); |
437 | 436 | ||
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index aa4d6337e43c..b3075f8a196b 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -19,12 +19,11 @@ | |||
19 | 19 | ||
20 | /* 1 band FIFO pseudo-"scheduler" */ | 20 | /* 1 band FIFO pseudo-"scheduler" */ |
21 | 21 | ||
22 | struct fifo_sched_data | 22 | struct fifo_sched_data { |
23 | { | ||
24 | u32 limit; | 23 | u32 limit; |
25 | }; | 24 | }; |
26 | 25 | ||
27 | static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 26 | static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
28 | { | 27 | { |
29 | struct fifo_sched_data *q = qdisc_priv(sch); | 28 | struct fifo_sched_data *q = qdisc_priv(sch); |
30 | 29 | ||
@@ -34,7 +33,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
34 | return qdisc_reshape_fail(skb, sch); | 33 | return qdisc_reshape_fail(skb, sch); |
35 | } | 34 | } |
36 | 35 | ||
37 | static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 36 | static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
38 | { | 37 | { |
39 | struct fifo_sched_data *q = qdisc_priv(sch); | 38 | struct fifo_sched_data *q = qdisc_priv(sch); |
40 | 39 | ||
@@ -44,7 +43,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
44 | return qdisc_reshape_fail(skb, sch); | 43 | return qdisc_reshape_fail(skb, sch); |
45 | } | 44 | } |
46 | 45 | ||
47 | static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 46 | static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
48 | { | 47 | { |
49 | struct sk_buff *skb_head; | 48 | struct sk_buff *skb_head; |
50 | struct fifo_sched_data *q = qdisc_priv(sch); | 49 | struct fifo_sched_data *q = qdisc_priv(sch); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 723b27849a50..2f1cb62130da 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
87 | */ | 87 | */ |
88 | kfree_skb(skb); | 88 | kfree_skb(skb); |
89 | if (net_ratelimit()) | 89 | if (net_ratelimit()) |
90 | printk(KERN_WARNING "Dead loop on netdevice %s, " | 90 | pr_warning("Dead loop on netdevice %s, fix it urgently!\n", |
91 | "fix it urgently!\n", dev_queue->dev->name); | 91 | dev_queue->dev->name); |
92 | ret = qdisc_qlen(q); | 92 | ret = qdisc_qlen(q); |
93 | } else { | 93 | } else { |
94 | /* | 94 | /* |
@@ -137,8 +137,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
137 | } else { | 137 | } else { |
138 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ | 138 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ |
139 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) | 139 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) |
140 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", | 140 | pr_warning("BUG %s code %d qlen %d\n", |
141 | dev->name, ret, q->q.qlen); | 141 | dev->name, ret, q->q.qlen); |
142 | 142 | ||
143 | ret = dev_requeue_skb(skb, q); | 143 | ret = dev_requeue_skb(skb, q); |
144 | } | 144 | } |
@@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = { | |||
412 | }; | 412 | }; |
413 | 413 | ||
414 | 414 | ||
415 | static const u8 prio2band[TC_PRIO_MAX+1] = | 415 | static const u8 prio2band[TC_PRIO_MAX + 1] = { |
416 | { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; | 416 | 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 |
417 | }; | ||
417 | 418 | ||
418 | /* 3-band FIFO queue: old style, but should be a bit faster than | 419 | /* 3-band FIFO queue: old style, but should be a bit faster than |
419 | generic prio+fifo combination. | 420 | generic prio+fifo combination. |
@@ -445,7 +446,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, | |||
445 | return priv->q + band; | 446 | return priv->q + band; |
446 | } | 447 | } |
447 | 448 | ||
448 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | 449 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) |
449 | { | 450 | { |
450 | if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { | 451 | if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { |
451 | int band = prio2band[skb->priority & TC_PRIO_MAX]; | 452 | int band = prio2band[skb->priority & TC_PRIO_MAX]; |
@@ -460,7 +461,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | |||
460 | return qdisc_drop(skb, qdisc); | 461 | return qdisc_drop(skb, qdisc); |
461 | } | 462 | } |
462 | 463 | ||
463 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | 464 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) |
464 | { | 465 | { |
465 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 466 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
466 | int band = bitmap2band[priv->bitmap]; | 467 | int band = bitmap2band[priv->bitmap]; |
@@ -479,7 +480,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | |||
479 | return NULL; | 480 | return NULL; |
480 | } | 481 | } |
481 | 482 | ||
482 | static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | 483 | static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) |
483 | { | 484 | { |
484 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 485 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
485 | int band = bitmap2band[priv->bitmap]; | 486 | int band = bitmap2band[priv->bitmap]; |
@@ -493,7 +494,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | |||
493 | return NULL; | 494 | return NULL; |
494 | } | 495 | } |
495 | 496 | ||
496 | static void pfifo_fast_reset(struct Qdisc* qdisc) | 497 | static void pfifo_fast_reset(struct Qdisc *qdisc) |
497 | { | 498 | { |
498 | int prio; | 499 | int prio; |
499 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 500 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
@@ -510,7 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) | |||
510 | { | 511 | { |
511 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; | 512 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; |
512 | 513 | ||
513 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); | 514 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); |
514 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 515 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
515 | return skb->len; | 516 | return skb->len; |
516 | 517 | ||
@@ -681,20 +682,18 @@ static void attach_one_default_qdisc(struct net_device *dev, | |||
681 | struct netdev_queue *dev_queue, | 682 | struct netdev_queue *dev_queue, |
682 | void *_unused) | 683 | void *_unused) |
683 | { | 684 | { |
684 | struct Qdisc *qdisc; | 685 | struct Qdisc *qdisc = &noqueue_qdisc; |
685 | 686 | ||
686 | if (dev->tx_queue_len) { | 687 | if (dev->tx_queue_len) { |
687 | qdisc = qdisc_create_dflt(dev_queue, | 688 | qdisc = qdisc_create_dflt(dev_queue, |
688 | &pfifo_fast_ops, TC_H_ROOT); | 689 | &pfifo_fast_ops, TC_H_ROOT); |
689 | if (!qdisc) { | 690 | if (!qdisc) { |
690 | printk(KERN_INFO "%s: activation failed\n", dev->name); | 691 | netdev_info(dev, "activation failed\n"); |
691 | return; | 692 | return; |
692 | } | 693 | } |
693 | 694 | ||
694 | /* Can by-pass the queue discipline for default qdisc */ | 695 | /* Can by-pass the queue discipline for default qdisc */ |
695 | qdisc->flags |= TCQ_F_CAN_BYPASS; | 696 | qdisc->flags |= TCQ_F_CAN_BYPASS; |
696 | } else { | ||
697 | qdisc = &noqueue_qdisc; | ||
698 | } | 697 | } |
699 | dev_queue->qdisc_sleeping = qdisc; | 698 | dev_queue->qdisc_sleeping = qdisc; |
700 | } | 699 | } |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 51dcc2aa5c92..b9493a09a870 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -32,8 +32,7 @@ | |||
32 | struct gred_sched_data; | 32 | struct gred_sched_data; |
33 | struct gred_sched; | 33 | struct gred_sched; |
34 | 34 | ||
35 | struct gred_sched_data | 35 | struct gred_sched_data { |
36 | { | ||
37 | u32 limit; /* HARD maximal queue length */ | 36 | u32 limit; /* HARD maximal queue length */ |
38 | u32 DP; /* the drop pramaters */ | 37 | u32 DP; /* the drop pramaters */ |
39 | u32 bytesin; /* bytes seen on virtualQ so far*/ | 38 | u32 bytesin; /* bytes seen on virtualQ so far*/ |
@@ -50,8 +49,7 @@ enum { | |||
50 | GRED_RIO_MODE, | 49 | GRED_RIO_MODE, |
51 | }; | 50 | }; |
52 | 51 | ||
53 | struct gred_sched | 52 | struct gred_sched { |
54 | { | ||
55 | struct gred_sched_data *tab[MAX_DPs]; | 53 | struct gred_sched_data *tab[MAX_DPs]; |
56 | unsigned long flags; | 54 | unsigned long flags; |
57 | u32 red_flags; | 55 | u32 red_flags; |
@@ -150,17 +148,18 @@ static inline int gred_use_harddrop(struct gred_sched *t) | |||
150 | return t->red_flags & TC_RED_HARDDROP; | 148 | return t->red_flags & TC_RED_HARDDROP; |
151 | } | 149 | } |
152 | 150 | ||
153 | static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 151 | static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
154 | { | 152 | { |
155 | struct gred_sched_data *q=NULL; | 153 | struct gred_sched_data *q = NULL; |
156 | struct gred_sched *t= qdisc_priv(sch); | 154 | struct gred_sched *t = qdisc_priv(sch); |
157 | unsigned long qavg = 0; | 155 | unsigned long qavg = 0; |
158 | u16 dp = tc_index_to_dp(skb); | 156 | u16 dp = tc_index_to_dp(skb); |
159 | 157 | ||
160 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { | 158 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { |
161 | dp = t->def; | 159 | dp = t->def; |
162 | 160 | ||
163 | if ((q = t->tab[dp]) == NULL) { | 161 | q = t->tab[dp]; |
162 | if (!q) { | ||
164 | /* Pass through packets not assigned to a DP | 163 | /* Pass through packets not assigned to a DP |
165 | * if no default DP has been configured. This | 164 | * if no default DP has been configured. This |
166 | * allows for DP flows to be left untouched. | 165 | * allows for DP flows to be left untouched. |
@@ -183,7 +182,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
183 | for (i = 0; i < t->DPs; i++) { | 182 | for (i = 0; i < t->DPs; i++) { |
184 | if (t->tab[i] && t->tab[i]->prio < q->prio && | 183 | if (t->tab[i] && t->tab[i]->prio < q->prio && |
185 | !red_is_idling(&t->tab[i]->parms)) | 184 | !red_is_idling(&t->tab[i]->parms)) |
186 | qavg +=t->tab[i]->parms.qavg; | 185 | qavg += t->tab[i]->parms.qavg; |
187 | } | 186 | } |
188 | 187 | ||
189 | } | 188 | } |
@@ -203,28 +202,28 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
203 | gred_store_wred_set(t, q); | 202 | gred_store_wred_set(t, q); |
204 | 203 | ||
205 | switch (red_action(&q->parms, q->parms.qavg + qavg)) { | 204 | switch (red_action(&q->parms, q->parms.qavg + qavg)) { |
206 | case RED_DONT_MARK: | 205 | case RED_DONT_MARK: |
207 | break; | 206 | break; |
208 | 207 | ||
209 | case RED_PROB_MARK: | 208 | case RED_PROB_MARK: |
210 | sch->qstats.overlimits++; | 209 | sch->qstats.overlimits++; |
211 | if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { | 210 | if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { |
212 | q->stats.prob_drop++; | 211 | q->stats.prob_drop++; |
213 | goto congestion_drop; | 212 | goto congestion_drop; |
214 | } | 213 | } |
215 | 214 | ||
216 | q->stats.prob_mark++; | 215 | q->stats.prob_mark++; |
217 | break; | 216 | break; |
218 | 217 | ||
219 | case RED_HARD_MARK: | 218 | case RED_HARD_MARK: |
220 | sch->qstats.overlimits++; | 219 | sch->qstats.overlimits++; |
221 | if (gred_use_harddrop(t) || !gred_use_ecn(t) || | 220 | if (gred_use_harddrop(t) || !gred_use_ecn(t) || |
222 | !INET_ECN_set_ce(skb)) { | 221 | !INET_ECN_set_ce(skb)) { |
223 | q->stats.forced_drop++; | 222 | q->stats.forced_drop++; |
224 | goto congestion_drop; | 223 | goto congestion_drop; |
225 | } | 224 | } |
226 | q->stats.forced_mark++; | 225 | q->stats.forced_mark++; |
227 | break; | 226 | break; |
228 | } | 227 | } |
229 | 228 | ||
230 | if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { | 229 | if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { |
@@ -241,7 +240,7 @@ congestion_drop: | |||
241 | return NET_XMIT_CN; | 240 | return NET_XMIT_CN; |
242 | } | 241 | } |
243 | 242 | ||
244 | static struct sk_buff *gred_dequeue(struct Qdisc* sch) | 243 | static struct sk_buff *gred_dequeue(struct Qdisc *sch) |
245 | { | 244 | { |
246 | struct sk_buff *skb; | 245 | struct sk_buff *skb; |
247 | struct gred_sched *t = qdisc_priv(sch); | 246 | struct gred_sched *t = qdisc_priv(sch); |
@@ -254,9 +253,9 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch) | |||
254 | 253 | ||
255 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { | 254 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { |
256 | if (net_ratelimit()) | 255 | if (net_ratelimit()) |
257 | printk(KERN_WARNING "GRED: Unable to relocate " | 256 | pr_warning("GRED: Unable to relocate VQ 0x%x " |
258 | "VQ 0x%x after dequeue, screwing up " | 257 | "after dequeue, screwing up " |
259 | "backlog.\n", tc_index_to_dp(skb)); | 258 | "backlog.\n", tc_index_to_dp(skb)); |
260 | } else { | 259 | } else { |
261 | q->backlog -= qdisc_pkt_len(skb); | 260 | q->backlog -= qdisc_pkt_len(skb); |
262 | 261 | ||
@@ -273,7 +272,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch) | |||
273 | return NULL; | 272 | return NULL; |
274 | } | 273 | } |
275 | 274 | ||
276 | static unsigned int gred_drop(struct Qdisc* sch) | 275 | static unsigned int gred_drop(struct Qdisc *sch) |
277 | { | 276 | { |
278 | struct sk_buff *skb; | 277 | struct sk_buff *skb; |
279 | struct gred_sched *t = qdisc_priv(sch); | 278 | struct gred_sched *t = qdisc_priv(sch); |
@@ -286,9 +285,9 @@ static unsigned int gred_drop(struct Qdisc* sch) | |||
286 | 285 | ||
287 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { | 286 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { |
288 | if (net_ratelimit()) | 287 | if (net_ratelimit()) |
289 | printk(KERN_WARNING "GRED: Unable to relocate " | 288 | pr_warning("GRED: Unable to relocate VQ 0x%x " |
290 | "VQ 0x%x while dropping, screwing up " | 289 | "while dropping, screwing up " |
291 | "backlog.\n", tc_index_to_dp(skb)); | 290 | "backlog.\n", tc_index_to_dp(skb)); |
292 | } else { | 291 | } else { |
293 | q->backlog -= len; | 292 | q->backlog -= len; |
294 | q->stats.other++; | 293 | q->stats.other++; |
@@ -308,7 +307,7 @@ static unsigned int gred_drop(struct Qdisc* sch) | |||
308 | 307 | ||
309 | } | 308 | } |
310 | 309 | ||
311 | static void gred_reset(struct Qdisc* sch) | 310 | static void gred_reset(struct Qdisc *sch) |
312 | { | 311 | { |
313 | int i; | 312 | int i; |
314 | struct gred_sched *t = qdisc_priv(sch); | 313 | struct gred_sched *t = qdisc_priv(sch); |
@@ -369,8 +368,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) | |||
369 | 368 | ||
370 | for (i = table->DPs; i < MAX_DPs; i++) { | 369 | for (i = table->DPs; i < MAX_DPs; i++) { |
371 | if (table->tab[i]) { | 370 | if (table->tab[i]) { |
372 | printk(KERN_WARNING "GRED: Warning: Destroying " | 371 | pr_warning("GRED: Warning: Destroying " |
373 | "shadowed VQ 0x%x\n", i); | 372 | "shadowed VQ 0x%x\n", i); |
374 | gred_destroy_vq(table->tab[i]); | 373 | gred_destroy_vq(table->tab[i]); |
375 | table->tab[i] = NULL; | 374 | table->tab[i] = NULL; |
376 | } | 375 | } |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 2e45791d4f6c..dea4009615f9 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -81,8 +81,7 @@ | |||
81 | * that are expensive on 32-bit architectures. | 81 | * that are expensive on 32-bit architectures. |
82 | */ | 82 | */ |
83 | 83 | ||
84 | struct internal_sc | 84 | struct internal_sc { |
85 | { | ||
86 | u64 sm1; /* scaled slope of the 1st segment */ | 85 | u64 sm1; /* scaled slope of the 1st segment */ |
87 | u64 ism1; /* scaled inverse-slope of the 1st segment */ | 86 | u64 ism1; /* scaled inverse-slope of the 1st segment */ |
88 | u64 dx; /* the x-projection of the 1st segment */ | 87 | u64 dx; /* the x-projection of the 1st segment */ |
@@ -92,8 +91,7 @@ struct internal_sc | |||
92 | }; | 91 | }; |
93 | 92 | ||
94 | /* runtime service curve */ | 93 | /* runtime service curve */ |
95 | struct runtime_sc | 94 | struct runtime_sc { |
96 | { | ||
97 | u64 x; /* current starting position on x-axis */ | 95 | u64 x; /* current starting position on x-axis */ |
98 | u64 y; /* current starting position on y-axis */ | 96 | u64 y; /* current starting position on y-axis */ |
99 | u64 sm1; /* scaled slope of the 1st segment */ | 97 | u64 sm1; /* scaled slope of the 1st segment */ |
@@ -104,15 +102,13 @@ struct runtime_sc | |||
104 | u64 ism2; /* scaled inverse-slope of the 2nd segment */ | 102 | u64 ism2; /* scaled inverse-slope of the 2nd segment */ |
105 | }; | 103 | }; |
106 | 104 | ||
107 | enum hfsc_class_flags | 105 | enum hfsc_class_flags { |
108 | { | ||
109 | HFSC_RSC = 0x1, | 106 | HFSC_RSC = 0x1, |
110 | HFSC_FSC = 0x2, | 107 | HFSC_FSC = 0x2, |
111 | HFSC_USC = 0x4 | 108 | HFSC_USC = 0x4 |
112 | }; | 109 | }; |
113 | 110 | ||
114 | struct hfsc_class | 111 | struct hfsc_class { |
115 | { | ||
116 | struct Qdisc_class_common cl_common; | 112 | struct Qdisc_class_common cl_common; |
117 | unsigned int refcnt; /* usage count */ | 113 | unsigned int refcnt; /* usage count */ |
118 | 114 | ||
@@ -140,8 +136,8 @@ struct hfsc_class | |||
140 | u64 cl_cumul; /* cumulative work in bytes done by | 136 | u64 cl_cumul; /* cumulative work in bytes done by |
141 | real-time criteria */ | 137 | real-time criteria */ |
142 | 138 | ||
143 | u64 cl_d; /* deadline*/ | 139 | u64 cl_d; /* deadline*/ |
144 | u64 cl_e; /* eligible time */ | 140 | u64 cl_e; /* eligible time */ |
145 | u64 cl_vt; /* virtual time */ | 141 | u64 cl_vt; /* virtual time */ |
146 | u64 cl_f; /* time when this class will fit for | 142 | u64 cl_f; /* time when this class will fit for |
147 | link-sharing, max(myf, cfmin) */ | 143 | link-sharing, max(myf, cfmin) */ |
@@ -176,8 +172,7 @@ struct hfsc_class | |||
176 | unsigned long cl_nactive; /* number of active children */ | 172 | unsigned long cl_nactive; /* number of active children */ |
177 | }; | 173 | }; |
178 | 174 | ||
179 | struct hfsc_sched | 175 | struct hfsc_sched { |
180 | { | ||
181 | u16 defcls; /* default class id */ | 176 | u16 defcls; /* default class id */ |
182 | struct hfsc_class root; /* root class */ | 177 | struct hfsc_class root; /* root class */ |
183 | struct Qdisc_class_hash clhash; /* class hash */ | 178 | struct Qdisc_class_hash clhash; /* class hash */ |
@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len) | |||
693 | if (go_active) { | 688 | if (go_active) { |
694 | n = rb_last(&cl->cl_parent->vt_tree); | 689 | n = rb_last(&cl->cl_parent->vt_tree); |
695 | if (n != NULL) { | 690 | if (n != NULL) { |
696 | max_cl = rb_entry(n, struct hfsc_class,vt_node); | 691 | max_cl = rb_entry(n, struct hfsc_class, vt_node); |
697 | /* | 692 | /* |
698 | * set vt to the average of the min and max | 693 | * set vt to the average of the min and max |
699 | * classes. if the parent's period didn't | 694 | * classes. if the parent's period didn't |
@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
1177 | return NULL; | 1172 | return NULL; |
1178 | } | 1173 | } |
1179 | #endif | 1174 | #endif |
1180 | if ((cl = (struct hfsc_class *)res.class) == NULL) { | 1175 | cl = (struct hfsc_class *)res.class; |
1181 | if ((cl = hfsc_find_class(res.classid, sch)) == NULL) | 1176 | if (!cl) { |
1177 | cl = hfsc_find_class(res.classid, sch); | ||
1178 | if (!cl) | ||
1182 | break; /* filter selected invalid classid */ | 1179 | break; /* filter selected invalid classid */ |
1183 | if (cl->level >= head->level) | 1180 | if (cl->level >= head->level) |
1184 | break; /* filter may only point downwards */ | 1181 | break; /* filter may only point downwards */ |
@@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) | |||
1316 | return -1; | 1313 | return -1; |
1317 | } | 1314 | } |
1318 | 1315 | ||
1319 | static inline int | 1316 | static int |
1320 | hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) | 1317 | hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) |
1321 | { | 1318 | { |
1322 | if ((cl->cl_flags & HFSC_RSC) && | 1319 | if ((cl->cl_flags & HFSC_RSC) && |
@@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch) | |||
1420 | struct hfsc_class *cl; | 1417 | struct hfsc_class *cl; |
1421 | u64 next_time = 0; | 1418 | u64 next_time = 0; |
1422 | 1419 | ||
1423 | if ((cl = eltree_get_minel(q)) != NULL) | 1420 | cl = eltree_get_minel(q); |
1421 | if (cl) | ||
1424 | next_time = cl->cl_e; | 1422 | next_time = cl->cl_e; |
1425 | if (q->root.cl_cfmin != 0) { | 1423 | if (q->root.cl_cfmin != 0) { |
1426 | if (next_time == 0 || next_time > q->root.cl_cfmin) | 1424 | if (next_time == 0 || next_time > q->root.cl_cfmin) |
@@ -1626,7 +1624,8 @@ hfsc_dequeue(struct Qdisc *sch) | |||
1626 | * find the class with the minimum deadline among | 1624 | * find the class with the minimum deadline among |
1627 | * the eligible classes. | 1625 | * the eligible classes. |
1628 | */ | 1626 | */ |
1629 | if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { | 1627 | cl = eltree_get_mindl(q, cur_time); |
1628 | if (cl) { | ||
1630 | realtime = 1; | 1629 | realtime = 1; |
1631 | } else { | 1630 | } else { |
1632 | /* | 1631 | /* |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 984c1b0c6836..3e86fd3a1b78 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -99,9 +99,10 @@ struct htb_class { | |||
99 | struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */ | 99 | struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */ |
100 | struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */ | 100 | struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */ |
101 | /* When class changes from state 1->2 and disconnects from | 101 | /* When class changes from state 1->2 and disconnects from |
102 | parent's feed then we lost ptr value and start from the | 102 | * parent's feed then we lost ptr value and start from the |
103 | first child again. Here we store classid of the | 103 | * first child again. Here we store classid of the |
104 | last valid ptr (used when ptr is NULL). */ | 104 | * last valid ptr (used when ptr is NULL). |
105 | */ | ||
105 | u32 last_ptr_id[TC_HTB_NUMPRIO]; | 106 | u32 last_ptr_id[TC_HTB_NUMPRIO]; |
106 | } inner; | 107 | } inner; |
107 | } un; | 108 | } un; |
@@ -185,7 +186,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) | |||
185 | * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull | 186 | * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull |
186 | * then finish and return direct queue. | 187 | * then finish and return direct queue. |
187 | */ | 188 | */ |
188 | #define HTB_DIRECT (struct htb_class*)-1 | 189 | #define HTB_DIRECT ((struct htb_class *)-1L) |
189 | 190 | ||
190 | static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, | 191 | static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, |
191 | int *qerr) | 192 | int *qerr) |
@@ -197,11 +198,13 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
197 | int result; | 198 | int result; |
198 | 199 | ||
199 | /* allow to select class by setting skb->priority to valid classid; | 200 | /* allow to select class by setting skb->priority to valid classid; |
200 | note that nfmark can be used too by attaching filter fw with no | 201 | * note that nfmark can be used too by attaching filter fw with no |
201 | rules in it */ | 202 | * rules in it |
203 | */ | ||
202 | if (skb->priority == sch->handle) | 204 | if (skb->priority == sch->handle) |
203 | return HTB_DIRECT; /* X:0 (direct flow) selected */ | 205 | return HTB_DIRECT; /* X:0 (direct flow) selected */ |
204 | if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) | 206 | cl = htb_find(skb->priority, sch); |
207 | if (cl && cl->level == 0) | ||
205 | return cl; | 208 | return cl; |
206 | 209 | ||
207 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 210 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
@@ -216,10 +219,12 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
216 | return NULL; | 219 | return NULL; |
217 | } | 220 | } |
218 | #endif | 221 | #endif |
219 | if ((cl = (void *)res.class) == NULL) { | 222 | cl = (void *)res.class; |
223 | if (!cl) { | ||
220 | if (res.classid == sch->handle) | 224 | if (res.classid == sch->handle) |
221 | return HTB_DIRECT; /* X:0 (direct flow) */ | 225 | return HTB_DIRECT; /* X:0 (direct flow) */ |
222 | if ((cl = htb_find(res.classid, sch)) == NULL) | 226 | cl = htb_find(res.classid, sch); |
227 | if (!cl) | ||
223 | break; /* filter selected invalid classid */ | 228 | break; /* filter selected invalid classid */ |
224 | } | 229 | } |
225 | if (!cl->level) | 230 | if (!cl->level) |
@@ -378,7 +383,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) | |||
378 | 383 | ||
379 | if (p->un.inner.feed[prio].rb_node) | 384 | if (p->un.inner.feed[prio].rb_node) |
380 | /* parent already has its feed in use so that | 385 | /* parent already has its feed in use so that |
381 | reset bit in mask as parent is already ok */ | 386 | * reset bit in mask as parent is already ok |
387 | */ | ||
382 | mask &= ~(1 << prio); | 388 | mask &= ~(1 << prio); |
383 | 389 | ||
384 | htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); | 390 | htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); |
@@ -413,8 +419,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) | |||
413 | 419 | ||
414 | if (p->un.inner.ptr[prio] == cl->node + prio) { | 420 | if (p->un.inner.ptr[prio] == cl->node + prio) { |
415 | /* we are removing child which is pointed to from | 421 | /* we are removing child which is pointed to from |
416 | parent feed - forget the pointer but remember | 422 | * parent feed - forget the pointer but remember |
417 | classid */ | 423 | * classid |
424 | */ | ||
418 | p->un.inner.last_ptr_id[prio] = cl->common.classid; | 425 | p->un.inner.last_ptr_id[prio] = cl->common.classid; |
419 | p->un.inner.ptr[prio] = NULL; | 426 | p->un.inner.ptr[prio] = NULL; |
420 | } | 427 | } |
@@ -664,8 +671,9 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, | |||
664 | unsigned long start) | 671 | unsigned long start) |
665 | { | 672 | { |
666 | /* don't run for longer than 2 jiffies; 2 is used instead of | 673 | /* don't run for longer than 2 jiffies; 2 is used instead of |
667 | 1 to simplify things when jiffy is going to be incremented | 674 | * 1 to simplify things when jiffy is going to be incremented |
668 | too soon */ | 675 | * too soon |
676 | */ | ||
669 | unsigned long stop_at = start + 2; | 677 | unsigned long stop_at = start + 2; |
670 | while (time_before(jiffies, stop_at)) { | 678 | while (time_before(jiffies, stop_at)) { |
671 | struct htb_class *cl; | 679 | struct htb_class *cl; |
@@ -688,7 +696,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, | |||
688 | 696 | ||
689 | /* too much load - let's continue after a break for scheduling */ | 697 | /* too much load - let's continue after a break for scheduling */ |
690 | if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { | 698 | if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { |
691 | printk(KERN_WARNING "htb: too many events!\n"); | 699 | pr_warning("htb: too many events!\n"); |
692 | q->warned |= HTB_WARN_TOOMANYEVENTS; | 700 | q->warned |= HTB_WARN_TOOMANYEVENTS; |
693 | } | 701 | } |
694 | 702 | ||
@@ -696,7 +704,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, | |||
696 | } | 704 | } |
697 | 705 | ||
698 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL | 706 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL |
699 | is no such one exists. */ | 707 | * is no such one exists. |
708 | */ | ||
700 | static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, | 709 | static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, |
701 | u32 id) | 710 | u32 id) |
702 | { | 711 | { |
@@ -740,12 +749,14 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, | |||
740 | for (i = 0; i < 65535; i++) { | 749 | for (i = 0; i < 65535; i++) { |
741 | if (!*sp->pptr && *sp->pid) { | 750 | if (!*sp->pptr && *sp->pid) { |
742 | /* ptr was invalidated but id is valid - try to recover | 751 | /* ptr was invalidated but id is valid - try to recover |
743 | the original or next ptr */ | 752 | * the original or next ptr |
753 | */ | ||
744 | *sp->pptr = | 754 | *sp->pptr = |
745 | htb_id_find_next_upper(prio, sp->root, *sp->pid); | 755 | htb_id_find_next_upper(prio, sp->root, *sp->pid); |
746 | } | 756 | } |
747 | *sp->pid = 0; /* ptr is valid now so that remove this hint as it | 757 | *sp->pid = 0; /* ptr is valid now so that remove this hint as it |
748 | can become out of date quickly */ | 758 | * can become out of date quickly |
759 | */ | ||
749 | if (!*sp->pptr) { /* we are at right end; rewind & go up */ | 760 | if (!*sp->pptr) { /* we are at right end; rewind & go up */ |
750 | *sp->pptr = sp->root; | 761 | *sp->pptr = sp->root; |
751 | while ((*sp->pptr)->rb_left) | 762 | while ((*sp->pptr)->rb_left) |
@@ -773,7 +784,8 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, | |||
773 | } | 784 | } |
774 | 785 | ||
775 | /* dequeues packet at given priority and level; call only if | 786 | /* dequeues packet at given priority and level; call only if |
776 | you are sure that there is active class at prio/level */ | 787 | * you are sure that there is active class at prio/level |
788 | */ | ||
777 | static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, | 789 | static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, |
778 | int level) | 790 | int level) |
779 | { | 791 | { |
@@ -790,9 +802,10 @@ next: | |||
790 | return NULL; | 802 | return NULL; |
791 | 803 | ||
792 | /* class can be empty - it is unlikely but can be true if leaf | 804 | /* class can be empty - it is unlikely but can be true if leaf |
793 | qdisc drops packets in enqueue routine or if someone used | 805 | * qdisc drops packets in enqueue routine or if someone used |
794 | graft operation on the leaf since last dequeue; | 806 | * graft operation on the leaf since last dequeue; |
795 | simply deactivate and skip such class */ | 807 | * simply deactivate and skip such class |
808 | */ | ||
796 | if (unlikely(cl->un.leaf.q->q.qlen == 0)) { | 809 | if (unlikely(cl->un.leaf.q->q.qlen == 0)) { |
797 | struct htb_class *next; | 810 | struct htb_class *next; |
798 | htb_deactivate(q, cl); | 811 | htb_deactivate(q, cl); |
@@ -832,7 +845,8 @@ next: | |||
832 | ptr[0]) + prio); | 845 | ptr[0]) + prio); |
833 | } | 846 | } |
834 | /* this used to be after charge_class but this constelation | 847 | /* this used to be after charge_class but this constelation |
835 | gives us slightly better performance */ | 848 | * gives us slightly better performance |
849 | */ | ||
836 | if (!cl->un.leaf.q->q.qlen) | 850 | if (!cl->un.leaf.q->q.qlen) |
837 | htb_deactivate(q, cl); | 851 | htb_deactivate(q, cl); |
838 | htb_charge_class(q, cl, level, skb); | 852 | htb_charge_class(q, cl, level, skb); |
@@ -882,6 +896,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
882 | m = ~q->row_mask[level]; | 896 | m = ~q->row_mask[level]; |
883 | while (m != (int)(-1)) { | 897 | while (m != (int)(-1)) { |
884 | int prio = ffz(m); | 898 | int prio = ffz(m); |
899 | |||
885 | m |= 1 << prio; | 900 | m |= 1 << prio; |
886 | skb = htb_dequeue_tree(q, prio, level); | 901 | skb = htb_dequeue_tree(q, prio, level); |
887 | if (likely(skb != NULL)) { | 902 | if (likely(skb != NULL)) { |
@@ -989,13 +1004,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) | |||
989 | return err; | 1004 | return err; |
990 | 1005 | ||
991 | if (tb[TCA_HTB_INIT] == NULL) { | 1006 | if (tb[TCA_HTB_INIT] == NULL) { |
992 | printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); | 1007 | pr_err("HTB: hey probably you have bad tc tool ?\n"); |
993 | return -EINVAL; | 1008 | return -EINVAL; |
994 | } | 1009 | } |
995 | gopt = nla_data(tb[TCA_HTB_INIT]); | 1010 | gopt = nla_data(tb[TCA_HTB_INIT]); |
996 | if (gopt->version != HTB_VER >> 16) { | 1011 | if (gopt->version != HTB_VER >> 16) { |
997 | printk(KERN_ERR | 1012 | pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n", |
998 | "HTB: need tc/htb version %d (minor is %d), you have %d\n", | ||
999 | HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); | 1013 | HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); |
1000 | return -EINVAL; | 1014 | return -EINVAL; |
1001 | } | 1015 | } |
@@ -1208,9 +1222,10 @@ static void htb_destroy(struct Qdisc *sch) | |||
1208 | cancel_work_sync(&q->work); | 1222 | cancel_work_sync(&q->work); |
1209 | qdisc_watchdog_cancel(&q->watchdog); | 1223 | qdisc_watchdog_cancel(&q->watchdog); |
1210 | /* This line used to be after htb_destroy_class call below | 1224 | /* This line used to be after htb_destroy_class call below |
1211 | and surprisingly it worked in 2.4. But it must precede it | 1225 | * and surprisingly it worked in 2.4. But it must precede it |
1212 | because filter need its target class alive to be able to call | 1226 | * because filter need its target class alive to be able to call |
1213 | unbind_filter on it (without Oops). */ | 1227 | * unbind_filter on it (without Oops). |
1228 | */ | ||
1214 | tcf_destroy_chain(&q->filter_list); | 1229 | tcf_destroy_chain(&q->filter_list); |
1215 | 1230 | ||
1216 | for (i = 0; i < q->clhash.hashsize; i++) { | 1231 | for (i = 0; i < q->clhash.hashsize; i++) { |
@@ -1344,11 +1359,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1344 | 1359 | ||
1345 | /* check maximal depth */ | 1360 | /* check maximal depth */ |
1346 | if (parent && parent->parent && parent->parent->level < 2) { | 1361 | if (parent && parent->parent && parent->parent->level < 2) { |
1347 | printk(KERN_ERR "htb: tree is too deep\n"); | 1362 | pr_err("htb: tree is too deep\n"); |
1348 | goto failure; | 1363 | goto failure; |
1349 | } | 1364 | } |
1350 | err = -ENOBUFS; | 1365 | err = -ENOBUFS; |
1351 | if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) | 1366 | cl = kzalloc(sizeof(*cl), GFP_KERNEL); |
1367 | if (!cl) | ||
1352 | goto failure; | 1368 | goto failure; |
1353 | 1369 | ||
1354 | err = gen_new_estimator(&cl->bstats, &cl->rate_est, | 1370 | err = gen_new_estimator(&cl->bstats, &cl->rate_est, |
@@ -1368,8 +1384,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1368 | RB_CLEAR_NODE(&cl->node[prio]); | 1384 | RB_CLEAR_NODE(&cl->node[prio]); |
1369 | 1385 | ||
1370 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) | 1386 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) |
1371 | so that can't be used inside of sch_tree_lock | 1387 | * so that can't be used inside of sch_tree_lock |
1372 | -- thanks to Karlis Peisenieks */ | 1388 | * -- thanks to Karlis Peisenieks |
1389 | */ | ||
1373 | new_q = qdisc_create_dflt(sch->dev_queue, | 1390 | new_q = qdisc_create_dflt(sch->dev_queue, |
1374 | &pfifo_qdisc_ops, classid); | 1391 | &pfifo_qdisc_ops, classid); |
1375 | sch_tree_lock(sch); | 1392 | sch_tree_lock(sch); |
@@ -1421,17 +1438,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1421 | } | 1438 | } |
1422 | 1439 | ||
1423 | /* it used to be a nasty bug here, we have to check that node | 1440 | /* it used to be a nasty bug here, we have to check that node |
1424 | is really leaf before changing cl->un.leaf ! */ | 1441 | * is really leaf before changing cl->un.leaf ! |
1442 | */ | ||
1425 | if (!cl->level) { | 1443 | if (!cl->level) { |
1426 | cl->quantum = rtab->rate.rate / q->rate2quantum; | 1444 | cl->quantum = rtab->rate.rate / q->rate2quantum; |
1427 | if (!hopt->quantum && cl->quantum < 1000) { | 1445 | if (!hopt->quantum && cl->quantum < 1000) { |
1428 | printk(KERN_WARNING | 1446 | pr_warning( |
1429 | "HTB: quantum of class %X is small. Consider r2q change.\n", | 1447 | "HTB: quantum of class %X is small. Consider r2q change.\n", |
1430 | cl->common.classid); | 1448 | cl->common.classid); |
1431 | cl->quantum = 1000; | 1449 | cl->quantum = 1000; |
1432 | } | 1450 | } |
1433 | if (!hopt->quantum && cl->quantum > 200000) { | 1451 | if (!hopt->quantum && cl->quantum > 200000) { |
1434 | printk(KERN_WARNING | 1452 | pr_warning( |
1435 | "HTB: quantum of class %X is big. Consider r2q change.\n", | 1453 | "HTB: quantum of class %X is big. Consider r2q change.\n", |
1436 | cl->common.classid); | 1454 | cl->common.classid); |
1437 | cl->quantum = 200000; | 1455 | cl->quantum = 200000; |
@@ -1480,13 +1498,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, | |||
1480 | struct htb_class *cl = htb_find(classid, sch); | 1498 | struct htb_class *cl = htb_find(classid, sch); |
1481 | 1499 | ||
1482 | /*if (cl && !cl->level) return 0; | 1500 | /*if (cl && !cl->level) return 0; |
1483 | The line above used to be there to prevent attaching filters to | 1501 | * The line above used to be there to prevent attaching filters to |
1484 | leaves. But at least tc_index filter uses this just to get class | 1502 | * leaves. But at least tc_index filter uses this just to get class |
1485 | for other reasons so that we have to allow for it. | 1503 | * for other reasons so that we have to allow for it. |
1486 | ---- | 1504 | * ---- |
1487 | 19.6.2002 As Werner explained it is ok - bind filter is just | 1505 | * 19.6.2002 As Werner explained it is ok - bind filter is just |
1488 | another way to "lock" the class - unlike "get" this lock can | 1506 | * another way to "lock" the class - unlike "get" this lock can |
1489 | be broken by class during destroy IIUC. | 1507 | * be broken by class during destroy IIUC. |
1490 | */ | 1508 | */ |
1491 | if (cl) | 1509 | if (cl) |
1492 | cl->filter_cnt++; | 1510 | cl->filter_cnt++; |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 21f13da24763..820f2a7ca14d 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -156,7 +156,7 @@ static unsigned int multiq_drop(struct Qdisc *sch) | |||
156 | unsigned int len; | 156 | unsigned int len; |
157 | struct Qdisc *qdisc; | 157 | struct Qdisc *qdisc; |
158 | 158 | ||
159 | for (band = q->bands-1; band >= 0; band--) { | 159 | for (band = q->bands - 1; band >= 0; band--) { |
160 | qdisc = q->queues[band]; | 160 | qdisc = q->queues[band]; |
161 | if (qdisc->ops->drop) { | 161 | if (qdisc->ops->drop) { |
162 | len = qdisc->ops->drop(qdisc); | 162 | len = qdisc->ops->drop(qdisc); |
@@ -265,7 +265,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt) | |||
265 | for (i = 0; i < q->max_bands; i++) | 265 | for (i = 0; i < q->max_bands; i++) |
266 | q->queues[i] = &noop_qdisc; | 266 | q->queues[i] = &noop_qdisc; |
267 | 267 | ||
268 | err = multiq_tune(sch,opt); | 268 | err = multiq_tune(sch, opt); |
269 | 269 | ||
270 | if (err) | 270 | if (err) |
271 | kfree(q->queues); | 271 | kfree(q->queues); |
@@ -346,7 +346,7 @@ static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, | |||
346 | struct multiq_sched_data *q = qdisc_priv(sch); | 346 | struct multiq_sched_data *q = qdisc_priv(sch); |
347 | 347 | ||
348 | tcm->tcm_handle |= TC_H_MIN(cl); | 348 | tcm->tcm_handle |= TC_H_MIN(cl); |
349 | tcm->tcm_info = q->queues[cl-1]->handle; | 349 | tcm->tcm_info = q->queues[cl - 1]->handle; |
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
352 | 352 | ||
@@ -378,7 +378,7 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
378 | arg->count++; | 378 | arg->count++; |
379 | continue; | 379 | continue; |
380 | } | 380 | } |
381 | if (arg->fn(sch, band+1, arg) < 0) { | 381 | if (arg->fn(sch, band + 1, arg) < 0) { |
382 | arg->stop = 1; | 382 | arg->stop = 1; |
383 | break; | 383 | break; |
384 | } | 384 | } |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 1c4bce863479..c2bbbe60d544 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -211,8 +211,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
211 | } | 211 | } |
212 | 212 | ||
213 | cb = netem_skb_cb(skb); | 213 | cb = netem_skb_cb(skb); |
214 | if (q->gap == 0 || /* not doing reordering */ | 214 | if (q->gap == 0 || /* not doing reordering */ |
215 | q->counter < q->gap || /* inside last reordering gap */ | 215 | q->counter < q->gap || /* inside last reordering gap */ |
216 | q->reorder < get_crandom(&q->reorder_cor)) { | 216 | q->reorder < get_crandom(&q->reorder_cor)) { |
217 | psched_time_t now; | 217 | psched_time_t now; |
218 | psched_tdiff_t delay; | 218 | psched_tdiff_t delay; |
@@ -249,7 +249,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
249 | return ret; | 249 | return ret; |
250 | } | 250 | } |
251 | 251 | ||
252 | static unsigned int netem_drop(struct Qdisc* sch) | 252 | static unsigned int netem_drop(struct Qdisc *sch) |
253 | { | 253 | { |
254 | struct netem_sched_data *q = qdisc_priv(sch); | 254 | struct netem_sched_data *q = qdisc_priv(sch); |
255 | unsigned int len = 0; | 255 | unsigned int len = 0; |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 966158d49dd1..3bea31e101b5 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -22,8 +22,7 @@ | |||
22 | #include <net/pkt_sched.h> | 22 | #include <net/pkt_sched.h> |
23 | 23 | ||
24 | 24 | ||
25 | struct prio_sched_data | 25 | struct prio_sched_data { |
26 | { | ||
27 | int bands; | 26 | int bands; |
28 | struct tcf_proto *filter_list; | 27 | struct tcf_proto *filter_list; |
29 | u8 prio2band[TC_PRIO_MAX+1]; | 28 | u8 prio2band[TC_PRIO_MAX+1]; |
@@ -54,7 +53,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
54 | if (!q->filter_list || err < 0) { | 53 | if (!q->filter_list || err < 0) { |
55 | if (TC_H_MAJ(band)) | 54 | if (TC_H_MAJ(band)) |
56 | band = 0; | 55 | band = 0; |
57 | return q->queues[q->prio2band[band&TC_PRIO_MAX]]; | 56 | return q->queues[q->prio2band[band & TC_PRIO_MAX]]; |
58 | } | 57 | } |
59 | band = res.classid; | 58 | band = res.classid; |
60 | } | 59 | } |
@@ -107,7 +106,7 @@ static struct sk_buff *prio_peek(struct Qdisc *sch) | |||
107 | return NULL; | 106 | return NULL; |
108 | } | 107 | } |
109 | 108 | ||
110 | static struct sk_buff *prio_dequeue(struct Qdisc* sch) | 109 | static struct sk_buff *prio_dequeue(struct Qdisc *sch) |
111 | { | 110 | { |
112 | struct prio_sched_data *q = qdisc_priv(sch); | 111 | struct prio_sched_data *q = qdisc_priv(sch); |
113 | int prio; | 112 | int prio; |
@@ -124,7 +123,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch) | |||
124 | 123 | ||
125 | } | 124 | } |
126 | 125 | ||
127 | static unsigned int prio_drop(struct Qdisc* sch) | 126 | static unsigned int prio_drop(struct Qdisc *sch) |
128 | { | 127 | { |
129 | struct prio_sched_data *q = qdisc_priv(sch); | 128 | struct prio_sched_data *q = qdisc_priv(sch); |
130 | int prio; | 129 | int prio; |
@@ -143,24 +142,24 @@ static unsigned int prio_drop(struct Qdisc* sch) | |||
143 | 142 | ||
144 | 143 | ||
145 | static void | 144 | static void |
146 | prio_reset(struct Qdisc* sch) | 145 | prio_reset(struct Qdisc *sch) |
147 | { | 146 | { |
148 | int prio; | 147 | int prio; |
149 | struct prio_sched_data *q = qdisc_priv(sch); | 148 | struct prio_sched_data *q = qdisc_priv(sch); |
150 | 149 | ||
151 | for (prio=0; prio<q->bands; prio++) | 150 | for (prio = 0; prio < q->bands; prio++) |
152 | qdisc_reset(q->queues[prio]); | 151 | qdisc_reset(q->queues[prio]); |
153 | sch->q.qlen = 0; | 152 | sch->q.qlen = 0; |
154 | } | 153 | } |
155 | 154 | ||
156 | static void | 155 | static void |
157 | prio_destroy(struct Qdisc* sch) | 156 | prio_destroy(struct Qdisc *sch) |
158 | { | 157 | { |
159 | int prio; | 158 | int prio; |
160 | struct prio_sched_data *q = qdisc_priv(sch); | 159 | struct prio_sched_data *q = qdisc_priv(sch); |
161 | 160 | ||
162 | tcf_destroy_chain(&q->filter_list); | 161 | tcf_destroy_chain(&q->filter_list); |
163 | for (prio=0; prio<q->bands; prio++) | 162 | for (prio = 0; prio < q->bands; prio++) |
164 | qdisc_destroy(q->queues[prio]); | 163 | qdisc_destroy(q->queues[prio]); |
165 | } | 164 | } |
166 | 165 | ||
@@ -177,7 +176,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | |||
177 | if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) | 176 | if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) |
178 | return -EINVAL; | 177 | return -EINVAL; |
179 | 178 | ||
180 | for (i=0; i<=TC_PRIO_MAX; i++) { | 179 | for (i = 0; i <= TC_PRIO_MAX; i++) { |
181 | if (qopt->priomap[i] >= qopt->bands) | 180 | if (qopt->priomap[i] >= qopt->bands) |
182 | return -EINVAL; | 181 | return -EINVAL; |
183 | } | 182 | } |
@@ -186,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | |||
186 | q->bands = qopt->bands; | 185 | q->bands = qopt->bands; |
187 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); | 186 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); |
188 | 187 | ||
189 | for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { | 188 | for (i = q->bands; i < TCQ_PRIO_BANDS; i++) { |
190 | struct Qdisc *child = q->queues[i]; | 189 | struct Qdisc *child = q->queues[i]; |
191 | q->queues[i] = &noop_qdisc; | 190 | q->queues[i] = &noop_qdisc; |
192 | if (child != &noop_qdisc) { | 191 | if (child != &noop_qdisc) { |
@@ -196,9 +195,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | |||
196 | } | 195 | } |
197 | sch_tree_unlock(sch); | 196 | sch_tree_unlock(sch); |
198 | 197 | ||
199 | for (i=0; i<q->bands; i++) { | 198 | for (i = 0; i < q->bands; i++) { |
200 | if (q->queues[i] == &noop_qdisc) { | 199 | if (q->queues[i] == &noop_qdisc) { |
201 | struct Qdisc *child, *old; | 200 | struct Qdisc *child, *old; |
201 | |||
202 | child = qdisc_create_dflt(sch->dev_queue, | 202 | child = qdisc_create_dflt(sch->dev_queue, |
203 | &pfifo_qdisc_ops, | 203 | &pfifo_qdisc_ops, |
204 | TC_H_MAKE(sch->handle, i + 1)); | 204 | TC_H_MAKE(sch->handle, i + 1)); |
@@ -224,7 +224,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt) | |||
224 | struct prio_sched_data *q = qdisc_priv(sch); | 224 | struct prio_sched_data *q = qdisc_priv(sch); |
225 | int i; | 225 | int i; |
226 | 226 | ||
227 | for (i=0; i<TCQ_PRIO_BANDS; i++) | 227 | for (i = 0; i < TCQ_PRIO_BANDS; i++) |
228 | q->queues[i] = &noop_qdisc; | 228 | q->queues[i] = &noop_qdisc; |
229 | 229 | ||
230 | if (opt == NULL) { | 230 | if (opt == NULL) { |
@@ -232,7 +232,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt) | |||
232 | } else { | 232 | } else { |
233 | int err; | 233 | int err; |
234 | 234 | ||
235 | if ((err= prio_tune(sch, opt)) != 0) | 235 | if ((err = prio_tune(sch, opt)) != 0) |
236 | return err; | 236 | return err; |
237 | } | 237 | } |
238 | return 0; | 238 | return 0; |
@@ -245,7 +245,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
245 | struct tc_prio_qopt opt; | 245 | struct tc_prio_qopt opt; |
246 | 246 | ||
247 | opt.bands = q->bands; | 247 | opt.bands = q->bands; |
248 | memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); | 248 | memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1); |
249 | 249 | ||
250 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 250 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
251 | 251 | ||
@@ -342,7 +342,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
342 | arg->count++; | 342 | arg->count++; |
343 | continue; | 343 | continue; |
344 | } | 344 | } |
345 | if (arg->fn(sch, prio+1, arg) < 0) { | 345 | if (arg->fn(sch, prio + 1, arg) < 0) { |
346 | arg->stop = 1; | 346 | arg->stop = 1; |
347 | break; | 347 | break; |
348 | } | 348 | } |
@@ -350,7 +350,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
350 | } | 350 | } |
351 | } | 351 | } |
352 | 352 | ||
353 | static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl) | 353 | static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl) |
354 | { | 354 | { |
355 | struct prio_sched_data *q = qdisc_priv(sch); | 355 | struct prio_sched_data *q = qdisc_priv(sch); |
356 | 356 | ||
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a6009c5a2c97..689157555fa4 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -36,8 +36,7 @@ | |||
36 | if RED works correctly. | 36 | if RED works correctly. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | struct red_sched_data | 39 | struct red_sched_data { |
40 | { | ||
41 | u32 limit; /* HARD maximal queue length */ | 40 | u32 limit; /* HARD maximal queue length */ |
42 | unsigned char flags; | 41 | unsigned char flags; |
43 | struct red_parms parms; | 42 | struct red_parms parms; |
@@ -55,7 +54,7 @@ static inline int red_use_harddrop(struct red_sched_data *q) | |||
55 | return q->flags & TC_RED_HARDDROP; | 54 | return q->flags & TC_RED_HARDDROP; |
56 | } | 55 | } |
57 | 56 | ||
58 | static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 57 | static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
59 | { | 58 | { |
60 | struct red_sched_data *q = qdisc_priv(sch); | 59 | struct red_sched_data *q = qdisc_priv(sch); |
61 | struct Qdisc *child = q->qdisc; | 60 | struct Qdisc *child = q->qdisc; |
@@ -67,29 +66,29 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
67 | red_end_of_idle_period(&q->parms); | 66 | red_end_of_idle_period(&q->parms); |
68 | 67 | ||
69 | switch (red_action(&q->parms, q->parms.qavg)) { | 68 | switch (red_action(&q->parms, q->parms.qavg)) { |
70 | case RED_DONT_MARK: | 69 | case RED_DONT_MARK: |
71 | break; | 70 | break; |
72 | 71 | ||
73 | case RED_PROB_MARK: | 72 | case RED_PROB_MARK: |
74 | sch->qstats.overlimits++; | 73 | sch->qstats.overlimits++; |
75 | if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { | 74 | if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { |
76 | q->stats.prob_drop++; | 75 | q->stats.prob_drop++; |
77 | goto congestion_drop; | 76 | goto congestion_drop; |
78 | } | 77 | } |
79 | 78 | ||
80 | q->stats.prob_mark++; | 79 | q->stats.prob_mark++; |
81 | break; | 80 | break; |
82 | 81 | ||
83 | case RED_HARD_MARK: | 82 | case RED_HARD_MARK: |
84 | sch->qstats.overlimits++; | 83 | sch->qstats.overlimits++; |
85 | if (red_use_harddrop(q) || !red_use_ecn(q) || | 84 | if (red_use_harddrop(q) || !red_use_ecn(q) || |
86 | !INET_ECN_set_ce(skb)) { | 85 | !INET_ECN_set_ce(skb)) { |
87 | q->stats.forced_drop++; | 86 | q->stats.forced_drop++; |
88 | goto congestion_drop; | 87 | goto congestion_drop; |
89 | } | 88 | } |
90 | 89 | ||
91 | q->stats.forced_mark++; | 90 | q->stats.forced_mark++; |
92 | break; | 91 | break; |
93 | } | 92 | } |
94 | 93 | ||
95 | ret = qdisc_enqueue(skb, child); | 94 | ret = qdisc_enqueue(skb, child); |
@@ -107,7 +106,7 @@ congestion_drop: | |||
107 | return NET_XMIT_CN; | 106 | return NET_XMIT_CN; |
108 | } | 107 | } |
109 | 108 | ||
110 | static struct sk_buff * red_dequeue(struct Qdisc* sch) | 109 | static struct sk_buff *red_dequeue(struct Qdisc *sch) |
111 | { | 110 | { |
112 | struct sk_buff *skb; | 111 | struct sk_buff *skb; |
113 | struct red_sched_data *q = qdisc_priv(sch); | 112 | struct red_sched_data *q = qdisc_priv(sch); |
@@ -122,7 +121,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch) | |||
122 | return skb; | 121 | return skb; |
123 | } | 122 | } |
124 | 123 | ||
125 | static struct sk_buff * red_peek(struct Qdisc* sch) | 124 | static struct sk_buff *red_peek(struct Qdisc *sch) |
126 | { | 125 | { |
127 | struct red_sched_data *q = qdisc_priv(sch); | 126 | struct red_sched_data *q = qdisc_priv(sch); |
128 | struct Qdisc *child = q->qdisc; | 127 | struct Qdisc *child = q->qdisc; |
@@ -130,7 +129,7 @@ static struct sk_buff * red_peek(struct Qdisc* sch) | |||
130 | return child->ops->peek(child); | 129 | return child->ops->peek(child); |
131 | } | 130 | } |
132 | 131 | ||
133 | static unsigned int red_drop(struct Qdisc* sch) | 132 | static unsigned int red_drop(struct Qdisc *sch) |
134 | { | 133 | { |
135 | struct red_sched_data *q = qdisc_priv(sch); | 134 | struct red_sched_data *q = qdisc_priv(sch); |
136 | struct Qdisc *child = q->qdisc; | 135 | struct Qdisc *child = q->qdisc; |
@@ -149,7 +148,7 @@ static unsigned int red_drop(struct Qdisc* sch) | |||
149 | return 0; | 148 | return 0; |
150 | } | 149 | } |
151 | 150 | ||
152 | static void red_reset(struct Qdisc* sch) | 151 | static void red_reset(struct Qdisc *sch) |
153 | { | 152 | { |
154 | struct red_sched_data *q = qdisc_priv(sch); | 153 | struct red_sched_data *q = qdisc_priv(sch); |
155 | 154 | ||
@@ -216,7 +215,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) | |||
216 | return 0; | 215 | return 0; |
217 | } | 216 | } |
218 | 217 | ||
219 | static int red_init(struct Qdisc* sch, struct nlattr *opt) | 218 | static int red_init(struct Qdisc *sch, struct nlattr *opt) |
220 | { | 219 | { |
221 | struct red_sched_data *q = qdisc_priv(sch); | 220 | struct red_sched_data *q = qdisc_priv(sch); |
222 | 221 | ||
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 239ec53a634d..54a36f43a1f1 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -92,8 +92,7 @@ typedef unsigned char sfq_index; | |||
92 | * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1] | 92 | * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1] |
93 | * are 'pointers' to dep[] array | 93 | * are 'pointers' to dep[] array |
94 | */ | 94 | */ |
95 | struct sfq_head | 95 | struct sfq_head { |
96 | { | ||
97 | sfq_index next; | 96 | sfq_index next; |
98 | sfq_index prev; | 97 | sfq_index prev; |
99 | }; | 98 | }; |
@@ -108,11 +107,10 @@ struct sfq_slot { | |||
108 | short allot; /* credit for this slot */ | 107 | short allot; /* credit for this slot */ |
109 | }; | 108 | }; |
110 | 109 | ||
111 | struct sfq_sched_data | 110 | struct sfq_sched_data { |
112 | { | ||
113 | /* Parameters */ | 111 | /* Parameters */ |
114 | int perturb_period; | 112 | int perturb_period; |
115 | unsigned quantum; /* Allotment per round: MUST BE >= MTU */ | 113 | unsigned int quantum; /* Allotment per round: MUST BE >= MTU */ |
116 | int limit; | 114 | int limit; |
117 | 115 | ||
118 | /* Variables */ | 116 | /* Variables */ |
@@ -137,12 +135,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index | |||
137 | return &q->dep[val - SFQ_SLOTS]; | 135 | return &q->dep[val - SFQ_SLOTS]; |
138 | } | 136 | } |
139 | 137 | ||
140 | static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) | 138 | static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) |
141 | { | 139 | { |
142 | return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); | 140 | return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); |
143 | } | 141 | } |
144 | 142 | ||
145 | static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | 143 | static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) |
146 | { | 144 | { |
147 | u32 h, h2; | 145 | u32 h, h2; |
148 | 146 | ||
@@ -157,13 +155,13 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
157 | iph = ip_hdr(skb); | 155 | iph = ip_hdr(skb); |
158 | h = (__force u32)iph->daddr; | 156 | h = (__force u32)iph->daddr; |
159 | h2 = (__force u32)iph->saddr ^ iph->protocol; | 157 | h2 = (__force u32)iph->saddr ^ iph->protocol; |
160 | if (iph->frag_off & htons(IP_MF|IP_OFFSET)) | 158 | if (iph->frag_off & htons(IP_MF | IP_OFFSET)) |
161 | break; | 159 | break; |
162 | poff = proto_ports_offset(iph->protocol); | 160 | poff = proto_ports_offset(iph->protocol); |
163 | if (poff >= 0 && | 161 | if (poff >= 0 && |
164 | pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) { | 162 | pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) { |
165 | iph = ip_hdr(skb); | 163 | iph = ip_hdr(skb); |
166 | h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff); | 164 | h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff); |
167 | } | 165 | } |
168 | break; | 166 | break; |
169 | } | 167 | } |
@@ -181,7 +179,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
181 | if (poff >= 0 && | 179 | if (poff >= 0 && |
182 | pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) { | 180 | pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) { |
183 | iph = ipv6_hdr(skb); | 181 | iph = ipv6_hdr(skb); |
184 | h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff); | 182 | h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff); |
185 | } | 183 | } |
186 | break; | 184 | break; |
187 | } | 185 | } |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 77565e721811..475edfb69c22 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -97,8 +97,7 @@ | |||
97 | changed the limit is not effective anymore. | 97 | changed the limit is not effective anymore. |
98 | */ | 98 | */ |
99 | 99 | ||
100 | struct tbf_sched_data | 100 | struct tbf_sched_data { |
101 | { | ||
102 | /* Parameters */ | 101 | /* Parameters */ |
103 | u32 limit; /* Maximal length of backlog: bytes */ | 102 | u32 limit; /* Maximal length of backlog: bytes */ |
104 | u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ | 103 | u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ |
@@ -115,10 +114,10 @@ struct tbf_sched_data | |||
115 | struct qdisc_watchdog watchdog; /* Watchdog timer */ | 114 | struct qdisc_watchdog watchdog; /* Watchdog timer */ |
116 | }; | 115 | }; |
117 | 116 | ||
118 | #define L2T(q,L) qdisc_l2t((q)->R_tab,L) | 117 | #define L2T(q, L) qdisc_l2t((q)->R_tab, L) |
119 | #define L2T_P(q,L) qdisc_l2t((q)->P_tab,L) | 118 | #define L2T_P(q, L) qdisc_l2t((q)->P_tab, L) |
120 | 119 | ||
121 | static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 120 | static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
122 | { | 121 | { |
123 | struct tbf_sched_data *q = qdisc_priv(sch); | 122 | struct tbf_sched_data *q = qdisc_priv(sch); |
124 | int ret; | 123 | int ret; |
@@ -138,7 +137,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
138 | return NET_XMIT_SUCCESS; | 137 | return NET_XMIT_SUCCESS; |
139 | } | 138 | } |
140 | 139 | ||
141 | static unsigned int tbf_drop(struct Qdisc* sch) | 140 | static unsigned int tbf_drop(struct Qdisc *sch) |
142 | { | 141 | { |
143 | struct tbf_sched_data *q = qdisc_priv(sch); | 142 | struct tbf_sched_data *q = qdisc_priv(sch); |
144 | unsigned int len = 0; | 143 | unsigned int len = 0; |
@@ -150,7 +149,7 @@ static unsigned int tbf_drop(struct Qdisc* sch) | |||
150 | return len; | 149 | return len; |
151 | } | 150 | } |
152 | 151 | ||
153 | static struct sk_buff *tbf_dequeue(struct Qdisc* sch) | 152 | static struct sk_buff *tbf_dequeue(struct Qdisc *sch) |
154 | { | 153 | { |
155 | struct tbf_sched_data *q = qdisc_priv(sch); | 154 | struct tbf_sched_data *q = qdisc_priv(sch); |
156 | struct sk_buff *skb; | 155 | struct sk_buff *skb; |
@@ -209,7 +208,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) | |||
209 | return NULL; | 208 | return NULL; |
210 | } | 209 | } |
211 | 210 | ||
212 | static void tbf_reset(struct Qdisc* sch) | 211 | static void tbf_reset(struct Qdisc *sch) |
213 | { | 212 | { |
214 | struct tbf_sched_data *q = qdisc_priv(sch); | 213 | struct tbf_sched_data *q = qdisc_priv(sch); |
215 | 214 | ||
@@ -227,7 +226,7 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { | |||
227 | [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, | 226 | [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, |
228 | }; | 227 | }; |
229 | 228 | ||
230 | static int tbf_change(struct Qdisc* sch, struct nlattr *opt) | 229 | static int tbf_change(struct Qdisc *sch, struct nlattr *opt) |
231 | { | 230 | { |
232 | int err; | 231 | int err; |
233 | struct tbf_sched_data *q = qdisc_priv(sch); | 232 | struct tbf_sched_data *q = qdisc_priv(sch); |
@@ -236,7 +235,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt) | |||
236 | struct qdisc_rate_table *rtab = NULL; | 235 | struct qdisc_rate_table *rtab = NULL; |
237 | struct qdisc_rate_table *ptab = NULL; | 236 | struct qdisc_rate_table *ptab = NULL; |
238 | struct Qdisc *child = NULL; | 237 | struct Qdisc *child = NULL; |
239 | int max_size,n; | 238 | int max_size, n; |
240 | 239 | ||
241 | err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy); | 240 | err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy); |
242 | if (err < 0) | 241 | if (err < 0) |
@@ -259,15 +258,18 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt) | |||
259 | } | 258 | } |
260 | 259 | ||
261 | for (n = 0; n < 256; n++) | 260 | for (n = 0; n < 256; n++) |
262 | if (rtab->data[n] > qopt->buffer) break; | 261 | if (rtab->data[n] > qopt->buffer) |
263 | max_size = (n << qopt->rate.cell_log)-1; | 262 | break; |
263 | max_size = (n << qopt->rate.cell_log) - 1; | ||
264 | if (ptab) { | 264 | if (ptab) { |
265 | int size; | 265 | int size; |
266 | 266 | ||
267 | for (n = 0; n < 256; n++) | 267 | for (n = 0; n < 256; n++) |
268 | if (ptab->data[n] > qopt->mtu) break; | 268 | if (ptab->data[n] > qopt->mtu) |
269 | size = (n << qopt->peakrate.cell_log)-1; | 269 | break; |
270 | if (size < max_size) max_size = size; | 270 | size = (n << qopt->peakrate.cell_log) - 1; |
271 | if (size < max_size) | ||
272 | max_size = size; | ||
271 | } | 273 | } |
272 | if (max_size < 0) | 274 | if (max_size < 0) |
273 | goto done; | 275 | goto done; |
@@ -310,7 +312,7 @@ done: | |||
310 | return err; | 312 | return err; |
311 | } | 313 | } |
312 | 314 | ||
313 | static int tbf_init(struct Qdisc* sch, struct nlattr *opt) | 315 | static int tbf_init(struct Qdisc *sch, struct nlattr *opt) |
314 | { | 316 | { |
315 | struct tbf_sched_data *q = qdisc_priv(sch); | 317 | struct tbf_sched_data *q = qdisc_priv(sch); |
316 | 318 | ||
@@ -422,8 +424,7 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |||
422 | } | 424 | } |
423 | } | 425 | } |
424 | 426 | ||
425 | static const struct Qdisc_class_ops tbf_class_ops = | 427 | static const struct Qdisc_class_ops tbf_class_ops = { |
426 | { | ||
427 | .graft = tbf_graft, | 428 | .graft = tbf_graft, |
428 | .leaf = tbf_leaf, | 429 | .leaf = tbf_leaf, |
429 | .get = tbf_get, | 430 | .get = tbf_get, |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 84ce48eadff4..64c071ded0f4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -53,8 +53,7 @@ | |||
53 | which will not break load balancing, though native slave | 53 | which will not break load balancing, though native slave |
54 | traffic will have the highest priority. */ | 54 | traffic will have the highest priority. */ |
55 | 55 | ||
56 | struct teql_master | 56 | struct teql_master { |
57 | { | ||
58 | struct Qdisc_ops qops; | 57 | struct Qdisc_ops qops; |
59 | struct net_device *dev; | 58 | struct net_device *dev; |
60 | struct Qdisc *slaves; | 59 | struct Qdisc *slaves; |
@@ -65,22 +64,21 @@ struct teql_master | |||
65 | unsigned long tx_dropped; | 64 | unsigned long tx_dropped; |
66 | }; | 65 | }; |
67 | 66 | ||
68 | struct teql_sched_data | 67 | struct teql_sched_data { |
69 | { | ||
70 | struct Qdisc *next; | 68 | struct Qdisc *next; |
71 | struct teql_master *m; | 69 | struct teql_master *m; |
72 | struct neighbour *ncache; | 70 | struct neighbour *ncache; |
73 | struct sk_buff_head q; | 71 | struct sk_buff_head q; |
74 | }; | 72 | }; |
75 | 73 | ||
76 | #define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next) | 74 | #define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next) |
77 | 75 | ||
78 | #define FMASK (IFF_BROADCAST|IFF_POINTOPOINT) | 76 | #define FMASK (IFF_BROADCAST | IFF_POINTOPOINT) |
79 | 77 | ||
80 | /* "teql*" qdisc routines */ | 78 | /* "teql*" qdisc routines */ |
81 | 79 | ||
82 | static int | 80 | static int |
83 | teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 81 | teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
84 | { | 82 | { |
85 | struct net_device *dev = qdisc_dev(sch); | 83 | struct net_device *dev = qdisc_dev(sch); |
86 | struct teql_sched_data *q = qdisc_priv(sch); | 84 | struct teql_sched_data *q = qdisc_priv(sch); |
@@ -97,7 +95,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
97 | } | 95 | } |
98 | 96 | ||
99 | static struct sk_buff * | 97 | static struct sk_buff * |
100 | teql_dequeue(struct Qdisc* sch) | 98 | teql_dequeue(struct Qdisc *sch) |
101 | { | 99 | { |
102 | struct teql_sched_data *dat = qdisc_priv(sch); | 100 | struct teql_sched_data *dat = qdisc_priv(sch); |
103 | struct netdev_queue *dat_queue; | 101 | struct netdev_queue *dat_queue; |
@@ -117,13 +115,13 @@ teql_dequeue(struct Qdisc* sch) | |||
117 | } | 115 | } |
118 | 116 | ||
119 | static struct sk_buff * | 117 | static struct sk_buff * |
120 | teql_peek(struct Qdisc* sch) | 118 | teql_peek(struct Qdisc *sch) |
121 | { | 119 | { |
122 | /* teql is meant to be used as root qdisc */ | 120 | /* teql is meant to be used as root qdisc */ |
123 | return NULL; | 121 | return NULL; |
124 | } | 122 | } |
125 | 123 | ||
126 | static __inline__ void | 124 | static inline void |
127 | teql_neigh_release(struct neighbour *n) | 125 | teql_neigh_release(struct neighbour *n) |
128 | { | 126 | { |
129 | if (n) | 127 | if (n) |
@@ -131,7 +129,7 @@ teql_neigh_release(struct neighbour *n) | |||
131 | } | 129 | } |
132 | 130 | ||
133 | static void | 131 | static void |
134 | teql_reset(struct Qdisc* sch) | 132 | teql_reset(struct Qdisc *sch) |
135 | { | 133 | { |
136 | struct teql_sched_data *dat = qdisc_priv(sch); | 134 | struct teql_sched_data *dat = qdisc_priv(sch); |
137 | 135 | ||
@@ -141,13 +139,14 @@ teql_reset(struct Qdisc* sch) | |||
141 | } | 139 | } |
142 | 140 | ||
143 | static void | 141 | static void |
144 | teql_destroy(struct Qdisc* sch) | 142 | teql_destroy(struct Qdisc *sch) |
145 | { | 143 | { |
146 | struct Qdisc *q, *prev; | 144 | struct Qdisc *q, *prev; |
147 | struct teql_sched_data *dat = qdisc_priv(sch); | 145 | struct teql_sched_data *dat = qdisc_priv(sch); |
148 | struct teql_master *master = dat->m; | 146 | struct teql_master *master = dat->m; |
149 | 147 | ||
150 | if ((prev = master->slaves) != NULL) { | 148 | prev = master->slaves; |
149 | if (prev) { | ||
151 | do { | 150 | do { |
152 | q = NEXT_SLAVE(prev); | 151 | q = NEXT_SLAVE(prev); |
153 | if (q == sch) { | 152 | if (q == sch) { |
@@ -179,7 +178,7 @@ teql_destroy(struct Qdisc* sch) | |||
179 | static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) | 178 | static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) |
180 | { | 179 | { |
181 | struct net_device *dev = qdisc_dev(sch); | 180 | struct net_device *dev = qdisc_dev(sch); |
182 | struct teql_master *m = (struct teql_master*)sch->ops; | 181 | struct teql_master *m = (struct teql_master *)sch->ops; |
183 | struct teql_sched_data *q = qdisc_priv(sch); | 182 | struct teql_sched_data *q = qdisc_priv(sch); |
184 | 183 | ||
185 | if (dev->hard_header_len > m->dev->hard_header_len) | 184 | if (dev->hard_header_len > m->dev->hard_header_len) |
@@ -290,7 +289,8 @@ restart: | |||
290 | nores = 0; | 289 | nores = 0; |
291 | busy = 0; | 290 | busy = 0; |
292 | 291 | ||
293 | if ((q = start) == NULL) | 292 | q = start; |
293 | if (!q) | ||
294 | goto drop; | 294 | goto drop; |
295 | 295 | ||
296 | do { | 296 | do { |
@@ -355,10 +355,10 @@ drop: | |||
355 | 355 | ||
356 | static int teql_master_open(struct net_device *dev) | 356 | static int teql_master_open(struct net_device *dev) |
357 | { | 357 | { |
358 | struct Qdisc * q; | 358 | struct Qdisc *q; |
359 | struct teql_master *m = netdev_priv(dev); | 359 | struct teql_master *m = netdev_priv(dev); |
360 | int mtu = 0xFFFE; | 360 | int mtu = 0xFFFE; |
361 | unsigned flags = IFF_NOARP|IFF_MULTICAST; | 361 | unsigned int flags = IFF_NOARP | IFF_MULTICAST; |
362 | 362 | ||
363 | if (m->slaves == NULL) | 363 | if (m->slaves == NULL) |
364 | return -EUNATCH; | 364 | return -EUNATCH; |
@@ -426,7 +426,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu) | |||
426 | do { | 426 | do { |
427 | if (new_mtu > qdisc_dev(q)->mtu) | 427 | if (new_mtu > qdisc_dev(q)->mtu) |
428 | return -EINVAL; | 428 | return -EINVAL; |
429 | } while ((q=NEXT_SLAVE(q)) != m->slaves); | 429 | } while ((q = NEXT_SLAVE(q)) != m->slaves); |
430 | } | 430 | } |
431 | 431 | ||
432 | dev->mtu = new_mtu; | 432 | dev->mtu = new_mtu; |