diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-08 10:55:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-08 10:55:01 -0500 |
commit | d7fc02c7bae7b1cf69269992cf880a43a350cdaa (patch) | |
tree | a43d56fa72913a1cc98a0bbebe054d08581b3a7c /net/sched | |
parent | ee1262dbc65ce0b6234a915d8432171e8d77f518 (diff) | |
parent | 28b4d5cc17c20786848cdc07b7ea237a309776bb (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1815 commits)
mac80211: fix reorder buffer release
iwmc3200wifi: Enable wimax core through module parameter
iwmc3200wifi: Add wifi-wimax coexistence mode as a module parameter
iwmc3200wifi: Coex table command does not expect a response
iwmc3200wifi: Update wiwi priority table
iwlwifi: driver version track kernel version
iwlwifi: indicate uCode type when fail dump error/event log
iwl3945: remove duplicated event logging code
b43: fix two warnings
ipw2100: fix rebooting hang with driver loaded
cfg80211: indent regulatory messages with spaces
iwmc3200wifi: fix NULL pointer dereference in pmkid update
mac80211: Fix TX status reporting for injected data frames
ath9k: enable 2GHz band only if the device supports it
airo: Fix integer overflow warning
rt2x00: Fix padding bug on L2PAD devices.
WE: Fix set events not propagated
b43legacy: avoid PPC fault during resume
b43: avoid PPC fault during resume
tcp: fix a timewait refcnt race
...
Fix up conflicts due to sysctl cleanups (dead sysctl_check code and
CTL_UNNUMBERED removed) in
kernel/sysctl_check.c
net/ipv4/sysctl_net_ipv4.c
net/ipv6/addrconf.c
net/sctp/sysctl.c
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/act_api.c | 7 | ||||
-rw-r--r-- | net/sched/act_mirred.c | 107 | ||||
-rw-r--r-- | net/sched/act_skbedit.c | 17 | ||||
-rw-r--r-- | net/sched/cls_api.c | 8 | ||||
-rw-r--r-- | net/sched/cls_flow.c | 2 | ||||
-rw-r--r-- | net/sched/cls_rsvp.h | 28 | ||||
-rw-r--r-- | net/sched/em_meta.c | 13 | ||||
-rw-r--r-- | net/sched/sch_api.c | 19 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 2 | ||||
-rw-r--r-- | net/sched/sch_drr.c | 2 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 18 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 2 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 6 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 12 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 11 |
15 files changed, 138 insertions, 116 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 2dfb3e7a040d..2a740035aa6b 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -618,7 +618,8 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, | |||
618 | goto errout; | 618 | goto errout; |
619 | 619 | ||
620 | if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || | 620 | if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || |
621 | gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 || | 621 | gnet_stats_copy_rate_est(&d, &h->tcf_bstats, |
622 | &h->tcf_rate_est) < 0 || | ||
622 | gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) | 623 | gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) |
623 | goto errout; | 624 | goto errout; |
624 | 625 | ||
@@ -968,7 +969,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
968 | u32 pid = skb ? NETLINK_CB(skb).pid : 0; | 969 | u32 pid = skb ? NETLINK_CB(skb).pid : 0; |
969 | int ret = 0, ovr = 0; | 970 | int ret = 0, ovr = 0; |
970 | 971 | ||
971 | if (net != &init_net) | 972 | if (!net_eq(net, &init_net)) |
972 | return -EINVAL; | 973 | return -EINVAL; |
973 | 974 | ||
974 | ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); | 975 | ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); |
@@ -1051,7 +1052,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | |||
1051 | struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); | 1052 | struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); |
1052 | struct nlattr *kind = find_dump_kind(cb->nlh); | 1053 | struct nlattr *kind = find_dump_kind(cb->nlh); |
1053 | 1054 | ||
1054 | if (net != &init_net) | 1055 | if (!net_eq(net, &init_net)) |
1055 | return 0; | 1056 | return 0; |
1056 | 1057 | ||
1057 | if (kind == NULL) { | 1058 | if (kind == NULL) { |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index b9aaab4e0354..d329170243cb 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -65,48 +65,53 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est, | |||
65 | struct tc_mirred *parm; | 65 | struct tc_mirred *parm; |
66 | struct tcf_mirred *m; | 66 | struct tcf_mirred *m; |
67 | struct tcf_common *pc; | 67 | struct tcf_common *pc; |
68 | struct net_device *dev = NULL; | 68 | struct net_device *dev; |
69 | int ret = 0, err; | 69 | int ret, ok_push = 0; |
70 | int ok_push = 0; | ||
71 | 70 | ||
72 | if (nla == NULL) | 71 | if (nla == NULL) |
73 | return -EINVAL; | 72 | return -EINVAL; |
74 | 73 | ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); | |
75 | err = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); | 74 | if (ret < 0) |
76 | if (err < 0) | 75 | return ret; |
77 | return err; | ||
78 | |||
79 | if (tb[TCA_MIRRED_PARMS] == NULL) | 76 | if (tb[TCA_MIRRED_PARMS] == NULL) |
80 | return -EINVAL; | 77 | return -EINVAL; |
81 | parm = nla_data(tb[TCA_MIRRED_PARMS]); | 78 | parm = nla_data(tb[TCA_MIRRED_PARMS]); |
82 | 79 | switch (parm->eaction) { | |
80 | case TCA_EGRESS_MIRROR: | ||
81 | case TCA_EGRESS_REDIR: | ||
82 | break; | ||
83 | default: | ||
84 | return -EINVAL; | ||
85 | } | ||
83 | if (parm->ifindex) { | 86 | if (parm->ifindex) { |
84 | dev = __dev_get_by_index(&init_net, parm->ifindex); | 87 | dev = __dev_get_by_index(&init_net, parm->ifindex); |
85 | if (dev == NULL) | 88 | if (dev == NULL) |
86 | return -ENODEV; | 89 | return -ENODEV; |
87 | switch (dev->type) { | 90 | switch (dev->type) { |
88 | case ARPHRD_TUNNEL: | 91 | case ARPHRD_TUNNEL: |
89 | case ARPHRD_TUNNEL6: | 92 | case ARPHRD_TUNNEL6: |
90 | case ARPHRD_SIT: | 93 | case ARPHRD_SIT: |
91 | case ARPHRD_IPGRE: | 94 | case ARPHRD_IPGRE: |
92 | case ARPHRD_VOID: | 95 | case ARPHRD_VOID: |
93 | case ARPHRD_NONE: | 96 | case ARPHRD_NONE: |
94 | ok_push = 0; | 97 | ok_push = 0; |
95 | break; | 98 | break; |
96 | default: | 99 | default: |
97 | ok_push = 1; | 100 | ok_push = 1; |
98 | break; | 101 | break; |
99 | } | 102 | } |
103 | } else { | ||
104 | dev = NULL; | ||
100 | } | 105 | } |
101 | 106 | ||
102 | pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info); | 107 | pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info); |
103 | if (!pc) { | 108 | if (!pc) { |
104 | if (!parm->ifindex) | 109 | if (dev == NULL) |
105 | return -EINVAL; | 110 | return -EINVAL; |
106 | pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind, | 111 | pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind, |
107 | &mirred_idx_gen, &mirred_hash_info); | 112 | &mirred_idx_gen, &mirred_hash_info); |
108 | if (IS_ERR(pc)) | 113 | if (IS_ERR(pc)) |
109 | return PTR_ERR(pc); | 114 | return PTR_ERR(pc); |
110 | ret = ACT_P_CREATED; | 115 | ret = ACT_P_CREATED; |
111 | } else { | 116 | } else { |
112 | if (!ovr) { | 117 | if (!ovr) { |
@@ -119,12 +124,12 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est, | |||
119 | spin_lock_bh(&m->tcf_lock); | 124 | spin_lock_bh(&m->tcf_lock); |
120 | m->tcf_action = parm->action; | 125 | m->tcf_action = parm->action; |
121 | m->tcfm_eaction = parm->eaction; | 126 | m->tcfm_eaction = parm->eaction; |
122 | if (parm->ifindex) { | 127 | if (dev != NULL) { |
123 | m->tcfm_ifindex = parm->ifindex; | 128 | m->tcfm_ifindex = parm->ifindex; |
124 | if (ret != ACT_P_CREATED) | 129 | if (ret != ACT_P_CREATED) |
125 | dev_put(m->tcfm_dev); | 130 | dev_put(m->tcfm_dev); |
126 | m->tcfm_dev = dev; | ||
127 | dev_hold(dev); | 131 | dev_hold(dev); |
132 | m->tcfm_dev = dev; | ||
128 | m->tcfm_ok_push = ok_push; | 133 | m->tcfm_ok_push = ok_push; |
129 | } | 134 | } |
130 | spin_unlock_bh(&m->tcf_lock); | 135 | spin_unlock_bh(&m->tcf_lock); |
@@ -148,57 +153,57 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, | |||
148 | { | 153 | { |
149 | struct tcf_mirred *m = a->priv; | 154 | struct tcf_mirred *m = a->priv; |
150 | struct net_device *dev; | 155 | struct net_device *dev; |
151 | struct sk_buff *skb2 = NULL; | 156 | struct sk_buff *skb2; |
152 | u32 at = G_TC_AT(skb->tc_verd); | 157 | u32 at; |
158 | int retval, err = 1; | ||
153 | 159 | ||
154 | spin_lock(&m->tcf_lock); | 160 | spin_lock(&m->tcf_lock); |
155 | |||
156 | dev = m->tcfm_dev; | ||
157 | m->tcf_tm.lastuse = jiffies; | 161 | m->tcf_tm.lastuse = jiffies; |
158 | 162 | ||
159 | if (!(dev->flags&IFF_UP) ) { | 163 | dev = m->tcfm_dev; |
164 | if (!(dev->flags & IFF_UP)) { | ||
160 | if (net_ratelimit()) | 165 | if (net_ratelimit()) |
161 | printk("mirred to Houston: device %s is gone!\n", | 166 | printk("mirred to Houston: device %s is gone!\n", |
162 | dev->name); | 167 | dev->name); |
163 | bad_mirred: | 168 | goto out; |
164 | if (skb2 != NULL) | ||
165 | kfree_skb(skb2); | ||
166 | m->tcf_qstats.overlimits++; | ||
167 | m->tcf_bstats.bytes += qdisc_pkt_len(skb); | ||
168 | m->tcf_bstats.packets++; | ||
169 | spin_unlock(&m->tcf_lock); | ||
170 | /* should we be asking for packet to be dropped? | ||
171 | * may make sense for redirect case only | ||
172 | */ | ||
173 | return TC_ACT_SHOT; | ||
174 | } | 169 | } |
175 | 170 | ||
176 | skb2 = skb_act_clone(skb, GFP_ATOMIC); | 171 | skb2 = skb_act_clone(skb, GFP_ATOMIC); |
177 | if (skb2 == NULL) | 172 | if (skb2 == NULL) |
178 | goto bad_mirred; | 173 | goto out; |
179 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR && | ||
180 | m->tcfm_eaction != TCA_EGRESS_REDIR) { | ||
181 | if (net_ratelimit()) | ||
182 | printk("tcf_mirred unknown action %d\n", | ||
183 | m->tcfm_eaction); | ||
184 | goto bad_mirred; | ||
185 | } | ||
186 | 174 | ||
187 | m->tcf_bstats.bytes += qdisc_pkt_len(skb2); | 175 | m->tcf_bstats.bytes += qdisc_pkt_len(skb2); |
188 | m->tcf_bstats.packets++; | 176 | m->tcf_bstats.packets++; |
189 | if (!(at & AT_EGRESS)) | 177 | at = G_TC_AT(skb->tc_verd); |
178 | if (!(at & AT_EGRESS)) { | ||
190 | if (m->tcfm_ok_push) | 179 | if (m->tcfm_ok_push) |
191 | skb_push(skb2, skb2->dev->hard_header_len); | 180 | skb_push(skb2, skb2->dev->hard_header_len); |
181 | } | ||
192 | 182 | ||
193 | /* mirror is always swallowed */ | 183 | /* mirror is always swallowed */ |
194 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR) | 184 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR) |
195 | skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at); | 185 | skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at); |
196 | 186 | ||
197 | skb2->dev = dev; | 187 | skb2->dev = dev; |
198 | skb2->iif = skb->dev->ifindex; | 188 | skb2->skb_iif = skb->dev->ifindex; |
199 | dev_queue_xmit(skb2); | 189 | dev_queue_xmit(skb2); |
190 | err = 0; | ||
191 | |||
192 | out: | ||
193 | if (err) { | ||
194 | m->tcf_qstats.overlimits++; | ||
195 | m->tcf_bstats.bytes += qdisc_pkt_len(skb); | ||
196 | m->tcf_bstats.packets++; | ||
197 | /* should we be asking for packet to be dropped? | ||
198 | * may make sense for redirect case only | ||
199 | */ | ||
200 | retval = TC_ACT_SHOT; | ||
201 | } else { | ||
202 | retval = m->tcf_action; | ||
203 | } | ||
200 | spin_unlock(&m->tcf_lock); | 204 | spin_unlock(&m->tcf_lock); |
201 | return m->tcf_action; | 205 | |
206 | return retval; | ||
202 | } | 207 | } |
203 | 208 | ||
204 | static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | 209 | static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 4ab916b8074b..e9607fe55b58 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -54,6 +54,8 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, | |||
54 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING && | 54 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING && |
55 | skb->dev->real_num_tx_queues > d->queue_mapping) | 55 | skb->dev->real_num_tx_queues > d->queue_mapping) |
56 | skb_set_queue_mapping(skb, d->queue_mapping); | 56 | skb_set_queue_mapping(skb, d->queue_mapping); |
57 | if (d->flags & SKBEDIT_F_MARK) | ||
58 | skb->mark = d->mark; | ||
57 | 59 | ||
58 | spin_unlock(&d->tcf_lock); | 60 | spin_unlock(&d->tcf_lock); |
59 | return d->tcf_action; | 61 | return d->tcf_action; |
@@ -63,6 +65,7 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { | |||
63 | [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, | 65 | [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, |
64 | [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, | 66 | [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, |
65 | [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, | 67 | [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, |
68 | [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, | ||
66 | }; | 69 | }; |
67 | 70 | ||
68 | static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | 71 | static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, |
@@ -72,7 +75,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
72 | struct tc_skbedit *parm; | 75 | struct tc_skbedit *parm; |
73 | struct tcf_skbedit *d; | 76 | struct tcf_skbedit *d; |
74 | struct tcf_common *pc; | 77 | struct tcf_common *pc; |
75 | u32 flags = 0, *priority = NULL; | 78 | u32 flags = 0, *priority = NULL, *mark = NULL; |
76 | u16 *queue_mapping = NULL; | 79 | u16 *queue_mapping = NULL; |
77 | int ret = 0, err; | 80 | int ret = 0, err; |
78 | 81 | ||
@@ -95,6 +98,12 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
95 | flags |= SKBEDIT_F_QUEUE_MAPPING; | 98 | flags |= SKBEDIT_F_QUEUE_MAPPING; |
96 | queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); | 99 | queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); |
97 | } | 100 | } |
101 | |||
102 | if (tb[TCA_SKBEDIT_MARK] != NULL) { | ||
103 | flags |= SKBEDIT_F_MARK; | ||
104 | mark = nla_data(tb[TCA_SKBEDIT_MARK]); | ||
105 | } | ||
106 | |||
98 | if (!flags) | 107 | if (!flags) |
99 | return -EINVAL; | 108 | return -EINVAL; |
100 | 109 | ||
@@ -124,6 +133,9 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
124 | d->priority = *priority; | 133 | d->priority = *priority; |
125 | if (flags & SKBEDIT_F_QUEUE_MAPPING) | 134 | if (flags & SKBEDIT_F_QUEUE_MAPPING) |
126 | d->queue_mapping = *queue_mapping; | 135 | d->queue_mapping = *queue_mapping; |
136 | if (flags & SKBEDIT_F_MARK) | ||
137 | d->mark = *mark; | ||
138 | |||
127 | d->tcf_action = parm->action; | 139 | d->tcf_action = parm->action; |
128 | 140 | ||
129 | spin_unlock_bh(&d->tcf_lock); | 141 | spin_unlock_bh(&d->tcf_lock); |
@@ -161,6 +173,9 @@ static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, | |||
161 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING) | 173 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING) |
162 | NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, | 174 | NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, |
163 | sizeof(d->queue_mapping), &d->queue_mapping); | 175 | sizeof(d->queue_mapping), &d->queue_mapping); |
176 | if (d->flags & SKBEDIT_F_MARK) | ||
177 | NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), | ||
178 | &d->mark); | ||
164 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); | 179 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); |
165 | t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); | 180 | t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); |
166 | t.expires = jiffies_to_clock_t(d->tcf_tm.expires); | 181 | t.expires = jiffies_to_clock_t(d->tcf_tm.expires); |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 7cf6c0fbc7a6..3725d8fa29db 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -137,7 +137,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
137 | int err; | 137 | int err; |
138 | int tp_created = 0; | 138 | int tp_created = 0; |
139 | 139 | ||
140 | if (net != &init_net) | 140 | if (!net_eq(net, &init_net)) |
141 | return -EINVAL; | 141 | return -EINVAL; |
142 | 142 | ||
143 | replay: | 143 | replay: |
@@ -404,6 +404,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, | |||
404 | a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER); | 404 | a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER); |
405 | } | 405 | } |
406 | 406 | ||
407 | /* called with RTNL */ | ||
407 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | 408 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) |
408 | { | 409 | { |
409 | struct net *net = sock_net(skb->sk); | 410 | struct net *net = sock_net(skb->sk); |
@@ -417,12 +418,12 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
417 | const struct Qdisc_class_ops *cops; | 418 | const struct Qdisc_class_ops *cops; |
418 | struct tcf_dump_args arg; | 419 | struct tcf_dump_args arg; |
419 | 420 | ||
420 | if (net != &init_net) | 421 | if (!net_eq(net, &init_net)) |
421 | return 0; | 422 | return 0; |
422 | 423 | ||
423 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 424 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
424 | return skb->len; | 425 | return skb->len; |
425 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 426 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
426 | return skb->len; | 427 | return skb->len; |
427 | 428 | ||
428 | if (!tcm->tcm_parent) | 429 | if (!tcm->tcm_parent) |
@@ -484,7 +485,6 @@ errout: | |||
484 | if (cl) | 485 | if (cl) |
485 | cops->put(q, cl); | 486 | cops->put(q, cl); |
486 | out: | 487 | out: |
487 | dev_put(dev); | ||
488 | return skb->len; | 488 | return skb->len; |
489 | } | 489 | } |
490 | 490 | ||
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 9402a7fd3785..e054c62857e1 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c | |||
@@ -171,7 +171,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb) | |||
171 | 171 | ||
172 | static u32 flow_get_iif(const struct sk_buff *skb) | 172 | static u32 flow_get_iif(const struct sk_buff *skb) |
173 | { | 173 | { |
174 | return skb->iif; | 174 | return skb->skb_iif; |
175 | } | 175 | } |
176 | 176 | ||
177 | static u32 flow_get_priority(const struct sk_buff *skb) | 177 | static u32 flow_get_priority(const struct sk_buff *skb) |
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 7034ea4530e5..dd9414e44200 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h | |||
@@ -170,21 +170,23 @@ restart: | |||
170 | for (s = sht[h1]; s; s = s->next) { | 170 | for (s = sht[h1]; s; s = s->next) { |
171 | if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && | 171 | if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && |
172 | protocol == s->protocol && | 172 | protocol == s->protocol && |
173 | !(s->dpi.mask & (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) | 173 | !(s->dpi.mask & |
174 | (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) && | ||
174 | #if RSVP_DST_LEN == 4 | 175 | #if RSVP_DST_LEN == 4 |
175 | && dst[0] == s->dst[0] | 176 | dst[0] == s->dst[0] && |
176 | && dst[1] == s->dst[1] | 177 | dst[1] == s->dst[1] && |
177 | && dst[2] == s->dst[2] | 178 | dst[2] == s->dst[2] && |
178 | #endif | 179 | #endif |
179 | && tunnelid == s->tunnelid) { | 180 | tunnelid == s->tunnelid) { |
180 | 181 | ||
181 | for (f = s->ht[h2]; f; f = f->next) { | 182 | for (f = s->ht[h2]; f; f = f->next) { |
182 | if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] && | 183 | if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] && |
183 | !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key)) | 184 | !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key)) |
184 | #if RSVP_DST_LEN == 4 | 185 | #if RSVP_DST_LEN == 4 |
185 | && src[0] == f->src[0] | 186 | && |
186 | && src[1] == f->src[1] | 187 | src[0] == f->src[0] && |
187 | && src[2] == f->src[2] | 188 | src[1] == f->src[1] && |
189 | src[2] == f->src[2] | ||
188 | #endif | 190 | #endif |
189 | ) { | 191 | ) { |
190 | *res = f->res; | 192 | *res = f->res; |
@@ -493,13 +495,13 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, | |||
493 | for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) { | 495 | for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) { |
494 | if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && | 496 | if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && |
495 | pinfo && pinfo->protocol == s->protocol && | 497 | pinfo && pinfo->protocol == s->protocol && |
496 | memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 | 498 | memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 && |
497 | #if RSVP_DST_LEN == 4 | 499 | #if RSVP_DST_LEN == 4 |
498 | && dst[0] == s->dst[0] | 500 | dst[0] == s->dst[0] && |
499 | && dst[1] == s->dst[1] | 501 | dst[1] == s->dst[1] && |
500 | && dst[2] == s->dst[2] | 502 | dst[2] == s->dst[2] && |
501 | #endif | 503 | #endif |
502 | && pinfo->tunnelid == s->tunnelid) { | 504 | pinfo->tunnelid == s->tunnelid) { |
503 | 505 | ||
504 | insert: | 506 | insert: |
505 | /* OK, we found appropriate session */ | 507 | /* OK, we found appropriate session */ |
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 18d85d259104..24dce8b648a4 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c | |||
@@ -303,17 +303,18 @@ META_COLLECTOR(var_sk_bound_if) | |||
303 | { | 303 | { |
304 | SKIP_NONLOCAL(skb); | 304 | SKIP_NONLOCAL(skb); |
305 | 305 | ||
306 | if (skb->sk->sk_bound_dev_if == 0) { | 306 | if (skb->sk->sk_bound_dev_if == 0) { |
307 | dst->value = (unsigned long) "any"; | 307 | dst->value = (unsigned long) "any"; |
308 | dst->len = 3; | 308 | dst->len = 3; |
309 | } else { | 309 | } else { |
310 | struct net_device *dev; | 310 | struct net_device *dev; |
311 | 311 | ||
312 | dev = dev_get_by_index(&init_net, skb->sk->sk_bound_dev_if); | 312 | rcu_read_lock(); |
313 | dev = dev_get_by_index_rcu(sock_net(skb->sk), | ||
314 | skb->sk->sk_bound_dev_if); | ||
313 | *err = var_dev(dev, dst); | 315 | *err = var_dev(dev, dst); |
314 | if (dev) | 316 | rcu_read_unlock(); |
315 | dev_put(dev); | 317 | } |
316 | } | ||
317 | } | 318 | } |
318 | 319 | ||
319 | META_COLLECTOR(int_sk_refcnt) | 320 | META_COLLECTOR(int_sk_refcnt) |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 903e4188b6ca..75fd1c672c61 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -947,7 +947,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
947 | struct Qdisc *p = NULL; | 947 | struct Qdisc *p = NULL; |
948 | int err; | 948 | int err; |
949 | 949 | ||
950 | if (net != &init_net) | 950 | if (!net_eq(net, &init_net)) |
951 | return -EINVAL; | 951 | return -EINVAL; |
952 | 952 | ||
953 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 953 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
@@ -1009,7 +1009,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1009 | struct Qdisc *q, *p; | 1009 | struct Qdisc *q, *p; |
1010 | int err; | 1010 | int err; |
1011 | 1011 | ||
1012 | if (net != &init_net) | 1012 | if (!net_eq(net, &init_net)) |
1013 | return -EINVAL; | 1013 | return -EINVAL; |
1014 | 1014 | ||
1015 | replay: | 1015 | replay: |
@@ -1179,7 +1179,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | |||
1179 | goto nla_put_failure; | 1179 | goto nla_put_failure; |
1180 | 1180 | ||
1181 | if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || | 1181 | if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || |
1182 | gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || | 1182 | gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || |
1183 | gnet_stats_copy_queue(&d, &q->qstats) < 0) | 1183 | gnet_stats_copy_queue(&d, &q->qstats) < 0) |
1184 | goto nla_put_failure; | 1184 | goto nla_put_failure; |
1185 | 1185 | ||
@@ -1274,14 +1274,15 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
1274 | int s_idx, s_q_idx; | 1274 | int s_idx, s_q_idx; |
1275 | struct net_device *dev; | 1275 | struct net_device *dev; |
1276 | 1276 | ||
1277 | if (net != &init_net) | 1277 | if (!net_eq(net, &init_net)) |
1278 | return 0; | 1278 | return 0; |
1279 | 1279 | ||
1280 | s_idx = cb->args[0]; | 1280 | s_idx = cb->args[0]; |
1281 | s_q_idx = q_idx = cb->args[1]; | 1281 | s_q_idx = q_idx = cb->args[1]; |
1282 | read_lock(&dev_base_lock); | 1282 | |
1283 | rcu_read_lock(); | ||
1283 | idx = 0; | 1284 | idx = 0; |
1284 | for_each_netdev(&init_net, dev) { | 1285 | for_each_netdev_rcu(&init_net, dev) { |
1285 | struct netdev_queue *dev_queue; | 1286 | struct netdev_queue *dev_queue; |
1286 | 1287 | ||
1287 | if (idx < s_idx) | 1288 | if (idx < s_idx) |
@@ -1302,7 +1303,7 @@ cont: | |||
1302 | } | 1303 | } |
1303 | 1304 | ||
1304 | done: | 1305 | done: |
1305 | read_unlock(&dev_base_lock); | 1306 | rcu_read_unlock(); |
1306 | 1307 | ||
1307 | cb->args[0] = idx; | 1308 | cb->args[0] = idx; |
1308 | cb->args[1] = q_idx; | 1309 | cb->args[1] = q_idx; |
@@ -1333,7 +1334,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1333 | u32 qid = TC_H_MAJ(clid); | 1334 | u32 qid = TC_H_MAJ(clid); |
1334 | int err; | 1335 | int err; |
1335 | 1336 | ||
1336 | if (net != &init_net) | 1337 | if (!net_eq(net, &init_net)) |
1337 | return -EINVAL; | 1338 | return -EINVAL; |
1338 | 1339 | ||
1339 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 1340 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
@@ -1575,7 +1576,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1575 | struct net_device *dev; | 1576 | struct net_device *dev; |
1576 | int t, s_t; | 1577 | int t, s_t; |
1577 | 1578 | ||
1578 | if (net != &init_net) | 1579 | if (!net_eq(net, &init_net)) |
1579 | return 0; | 1580 | return 0; |
1580 | 1581 | ||
1581 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 1582 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 5b132c473264..3846d65bc03e 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -1609,7 +1609,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1609 | cl->xstats.undertime = cl->undertime - q->now; | 1609 | cl->xstats.undertime = cl->undertime - q->now; |
1610 | 1610 | ||
1611 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 1611 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
1612 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1612 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
1613 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | 1613 | gnet_stats_copy_queue(d, &cl->qstats) < 0) |
1614 | return -1; | 1614 | return -1; |
1615 | 1615 | ||
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 5a888af7e5da..a65604f8f2b8 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -280,7 +280,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
280 | } | 280 | } |
281 | 281 | ||
282 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 282 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
283 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 283 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
284 | gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) | 284 | gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) |
285 | return -1; | 285 | return -1; |
286 | 286 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 4ae6aa562f2b..5173c1e1b19c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -119,32 +119,26 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
119 | spin_unlock(root_lock); | 119 | spin_unlock(root_lock); |
120 | 120 | ||
121 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 121 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
122 | if (!netif_tx_queue_stopped(txq) && | 122 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) |
123 | !netif_tx_queue_frozen(txq)) | ||
124 | ret = dev_hard_start_xmit(skb, dev, txq); | 123 | ret = dev_hard_start_xmit(skb, dev, txq); |
124 | |||
125 | HARD_TX_UNLOCK(dev, txq); | 125 | HARD_TX_UNLOCK(dev, txq); |
126 | 126 | ||
127 | spin_lock(root_lock); | 127 | spin_lock(root_lock); |
128 | 128 | ||
129 | switch (ret) { | 129 | if (dev_xmit_complete(ret)) { |
130 | case NETDEV_TX_OK: | 130 | /* Driver sent out skb successfully or skb was consumed */ |
131 | /* Driver sent out skb successfully */ | ||
132 | ret = qdisc_qlen(q); | 131 | ret = qdisc_qlen(q); |
133 | break; | 132 | } else if (ret == NETDEV_TX_LOCKED) { |
134 | |||
135 | case NETDEV_TX_LOCKED: | ||
136 | /* Driver try lock failed */ | 133 | /* Driver try lock failed */ |
137 | ret = handle_dev_cpu_collision(skb, txq, q); | 134 | ret = handle_dev_cpu_collision(skb, txq, q); |
138 | break; | 135 | } else { |
139 | |||
140 | default: | ||
141 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ | 136 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ |
142 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) | 137 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) |
143 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", | 138 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", |
144 | dev->name, ret, q->q.qlen); | 139 | dev->name, ret, q->q.qlen); |
145 | 140 | ||
146 | ret = dev_requeue_skb(skb, q); | 141 | ret = dev_requeue_skb(skb, q); |
147 | break; | ||
148 | } | 142 | } |
149 | 143 | ||
150 | if (ret && (netif_tx_queue_stopped(txq) || | 144 | if (ret && (netif_tx_queue_stopped(txq) || |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 2c5c76be18f8..b38b39c60752 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1375,7 +1375,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1375 | xstats.rtwork = cl->cl_cumul; | 1375 | xstats.rtwork = cl->cl_cumul; |
1376 | 1376 | ||
1377 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 1377 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
1378 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1378 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
1379 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | 1379 | gnet_stats_copy_queue(d, &cl->qstats) < 0) |
1380 | return -1; | 1380 | return -1; |
1381 | 1381 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 85acab9dc6fd..508cf5f3a6d5 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1105,7 +1105,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) | |||
1105 | cl->xstats.ctokens = cl->ctokens; | 1105 | cl->xstats.ctokens = cl->ctokens; |
1106 | 1106 | ||
1107 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 1107 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
1108 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1108 | gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || |
1109 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | 1109 | gnet_stats_copy_queue(d, &cl->qstats) < 0) |
1110 | return -1; | 1110 | return -1; |
1111 | 1111 | ||
@@ -1344,8 +1344,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1344 | }; | 1344 | }; |
1345 | 1345 | ||
1346 | /* check for valid classid */ | 1346 | /* check for valid classid */ |
1347 | if (!classid || TC_H_MAJ(classid ^ sch->handle) | 1347 | if (!classid || TC_H_MAJ(classid ^ sch->handle) || |
1348 | || htb_find(classid, sch)) | 1348 | htb_find(classid, sch)) |
1349 | goto failure; | 1349 | goto failure; |
1350 | 1350 | ||
1351 | /* check maximal depth */ | 1351 | /* check maximal depth */ |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 2b88295cb7b7..d8b10e054627 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -199,9 +199,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
199 | * do it now in software before we mangle it. | 199 | * do it now in software before we mangle it. |
200 | */ | 200 | */ |
201 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { | 201 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { |
202 | if (!(skb = skb_unshare(skb, GFP_ATOMIC)) | 202 | if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || |
203 | || (skb->ip_summed == CHECKSUM_PARTIAL | 203 | (skb->ip_summed == CHECKSUM_PARTIAL && |
204 | && skb_checksum_help(skb))) { | 204 | skb_checksum_help(skb))) { |
205 | sch->qstats.drops++; | 205 | sch->qstats.drops++; |
206 | return NET_XMIT_DROP; | 206 | return NET_XMIT_DROP; |
207 | } | 207 | } |
@@ -210,9 +210,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
210 | } | 210 | } |
211 | 211 | ||
212 | cb = netem_skb_cb(skb); | 212 | cb = netem_skb_cb(skb); |
213 | if (q->gap == 0 /* not doing reordering */ | 213 | if (q->gap == 0 || /* not doing reordering */ |
214 | || q->counter < q->gap /* inside last reordering gap */ | 214 | q->counter < q->gap || /* inside last reordering gap */ |
215 | || q->reorder < get_crandom(&q->reorder_cor)) { | 215 | q->reorder < get_crandom(&q->reorder_cor)) { |
216 | psched_time_t now; | 216 | psched_time_t now; |
217 | psched_tdiff_t delay; | 217 | psched_tdiff_t delay; |
218 | 218 | ||
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 5a002c247231..db69637069c4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -190,10 +190,13 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) | |||
190 | 190 | ||
191 | if (m->slaves) { | 191 | if (m->slaves) { |
192 | if (m->dev->flags & IFF_UP) { | 192 | if (m->dev->flags & IFF_UP) { |
193 | if ((m->dev->flags&IFF_POINTOPOINT && !(dev->flags&IFF_POINTOPOINT)) | 193 | if ((m->dev->flags & IFF_POINTOPOINT && |
194 | || (m->dev->flags&IFF_BROADCAST && !(dev->flags&IFF_BROADCAST)) | 194 | !(dev->flags & IFF_POINTOPOINT)) || |
195 | || (m->dev->flags&IFF_MULTICAST && !(dev->flags&IFF_MULTICAST)) | 195 | (m->dev->flags & IFF_BROADCAST && |
196 | || dev->mtu < m->dev->mtu) | 196 | !(dev->flags & IFF_BROADCAST)) || |
197 | (m->dev->flags & IFF_MULTICAST && | ||
198 | !(dev->flags & IFF_MULTICAST)) || | ||
199 | dev->mtu < m->dev->mtu) | ||
197 | return -EINVAL; | 200 | return -EINVAL; |
198 | } else { | 201 | } else { |
199 | if (!(dev->flags&IFF_POINTOPOINT)) | 202 | if (!(dev->flags&IFF_POINTOPOINT)) |