aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/ax25/sysctl_net_ax25.c14
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/bridge/br_stp.c25
-rw-r--r--net/core/dev.c32
-rw-r--r--net/core/neighbour.c13
-rw-r--r--net/core/pktgen.c10
-rw-r--r--net/ipv4/sysctl_net_ipv4.c1
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/syncookies.c22
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/main.c5
-rw-r--r--net/mac80211/mlme.c39
-rw-r--r--net/mac80211/tx.c17
-rw-r--r--net/mac80211/util.c1
-rw-r--r--net/mac80211/wme.c6
-rw-r--r--net/rfkill/rfkill-input.c54
-rw-r--r--net/rfkill/rfkill.c15
-rw-r--r--net/sched/sch_atm.c14
-rw-r--r--net/sched/sch_cbq.c27
-rw-r--r--net/sched/sch_dsmark.c10
-rw-r--r--net/sched/sch_generic.c12
-rw-r--r--net/sched/sch_hfsc.c12
-rw-r--r--net/sched/sch_htb.c24
-rw-r--r--net/sched/sch_netem.c5
-rw-r--r--net/sched/sch_prio.c14
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c8
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sctp/ipv6.c8
-rw-r--r--net/sctp/output.c6
-rw-r--r--net/sctp/protocol.c9
32 files changed, 273 insertions, 143 deletions
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index f597987b2424..f288fc4aef9b 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -36,6 +36,7 @@ static struct ctl_path ax25_path[] = {
36 { .procname = "ax25", .ctl_name = NET_AX25, }, 36 { .procname = "ax25", .ctl_name = NET_AX25, },
37 { } 37 { }
38}; 38};
39
39static const ctl_table ax25_param_table[] = { 40static const ctl_table ax25_param_table[] = {
40 { 41 {
41 .ctl_name = NET_AX25_IP_DEFAULT_MODE, 42 .ctl_name = NET_AX25_IP_DEFAULT_MODE,
@@ -167,6 +168,7 @@ static const ctl_table ax25_param_table[] = {
167 .extra1 = &min_proto, 168 .extra1 = &min_proto,
168 .extra2 = &max_proto 169 .extra2 = &max_proto
169 }, 170 },
171#ifdef CONFIG_AX25_DAMA_SLAVE
170 { 172 {
171 .ctl_name = NET_AX25_DAMA_SLAVE_TIMEOUT, 173 .ctl_name = NET_AX25_DAMA_SLAVE_TIMEOUT,
172 .procname = "dama_slave_timeout", 174 .procname = "dama_slave_timeout",
@@ -177,6 +179,8 @@ static const ctl_table ax25_param_table[] = {
177 .extra1 = &min_ds_timeout, 179 .extra1 = &min_ds_timeout,
178 .extra2 = &max_ds_timeout 180 .extra2 = &max_ds_timeout
179 }, 181 },
182#endif
183
180 { .ctl_name = 0 } /* that's all, folks! */ 184 { .ctl_name = 0 } /* that's all, folks! */
181}; 185};
182 186
@@ -210,16 +214,6 @@ void ax25_register_sysctl(void)
210 ax25_table[n].procname = ax25_dev->dev->name; 214 ax25_table[n].procname = ax25_dev->dev->name;
211 ax25_table[n].mode = 0555; 215 ax25_table[n].mode = 0555;
212 216
213#ifndef CONFIG_AX25_DAMA_SLAVE
214 /*
215 * We do not wish to have a representation of this parameter
216 * in /proc/sys/ when configured *not* to include the
217 * AX.25 DAMA slave code, do we?
218 */
219
220 child[AX25_VALUES_DS_TIMEOUT].procname = NULL;
221#endif
222
223 child[AX25_MAX_VALUES].ctl_name = 0; /* just in case... */ 217 child[AX25_MAX_VALUES].ctl_name = 0; /* just in case... */
224 218
225 for (k = 0; k < AX25_MAX_VALUES; k++) 219 for (k = 0; k < AX25_MAX_VALUES; k++)
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 6e280a8a31ee..6a9a6cd74b1e 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -113,7 +113,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
113 struct rtable *rt = &br->fake_rtable; 113 struct rtable *rt = &br->fake_rtable;
114 114
115 atomic_set(&rt->u.dst.__refcnt, 1); 115 atomic_set(&rt->u.dst.__refcnt, 1);
116 rt->u.dst.dev = &br->dev; 116 rt->u.dst.dev = br->dev;
117 rt->u.dst.path = &rt->u.dst; 117 rt->u.dst.path = &rt->u.dst;
118 rt->u.dst.metrics[RTAX_MTU - 1] = 1500; 118 rt->u.dst.metrics[RTAX_MTU - 1] = 1500;
119 rt->u.dst.flags = DST_NOXFRM; 119 rt->u.dst.flags = DST_NOXFRM;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 921bbe5cb94a..6e63ec3f1fcf 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -368,14 +368,25 @@ static void br_make_blocking(struct net_bridge_port *p)
368/* called under bridge lock */ 368/* called under bridge lock */
369static void br_make_forwarding(struct net_bridge_port *p) 369static void br_make_forwarding(struct net_bridge_port *p)
370{ 370{
371 if (p->state == BR_STATE_BLOCKING) { 371 struct net_bridge *br = p->br;
372 if (p->br->stp_enabled == BR_KERNEL_STP)
373 p->state = BR_STATE_LISTENING;
374 else
375 p->state = BR_STATE_LEARNING;
376 372
377 br_log_state(p); 373 if (p->state != BR_STATE_BLOCKING)
378 mod_timer(&p->forward_delay_timer, jiffies + p->br->forward_delay); } 374 return;
375
376 if (br->forward_delay == 0) {
377 p->state = BR_STATE_FORWARDING;
378 br_topology_change_detection(br);
379 del_timer(&p->forward_delay_timer);
380 }
381 else if (p->br->stp_enabled == BR_KERNEL_STP)
382 p->state = BR_STATE_LISTENING;
383 else
384 p->state = BR_STATE_LEARNING;
385
386 br_log_state(p);
387
388 if (br->forward_delay != 0)
389 mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
379} 390}
380 391
381/* called under bridge lock */ 392/* called under bridge lock */
diff --git a/net/core/dev.c b/net/core/dev.c
index 69320a56a084..01993ad74e76 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1796,7 +1796,7 @@ gso:
1796 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); 1796 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1797#endif 1797#endif
1798 if (q->enqueue) { 1798 if (q->enqueue) {
1799 spinlock_t *root_lock = qdisc_root_lock(q); 1799 spinlock_t *root_lock = qdisc_lock(q);
1800 1800
1801 spin_lock(root_lock); 1801 spin_lock(root_lock);
1802 1802
@@ -1805,7 +1805,6 @@ gso:
1805 1805
1806 spin_unlock(root_lock); 1806 spin_unlock(root_lock);
1807 1807
1808 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1809 goto out; 1808 goto out;
1810 } 1809 }
1811 1810
@@ -1909,7 +1908,6 @@ int netif_rx(struct sk_buff *skb)
1909 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { 1908 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1910 if (queue->input_pkt_queue.qlen) { 1909 if (queue->input_pkt_queue.qlen) {
1911enqueue: 1910enqueue:
1912 dev_hold(skb->dev);
1913 __skb_queue_tail(&queue->input_pkt_queue, skb); 1911 __skb_queue_tail(&queue->input_pkt_queue, skb);
1914 local_irq_restore(flags); 1912 local_irq_restore(flags);
1915 return NET_RX_SUCCESS; 1913 return NET_RX_SUCCESS;
@@ -1995,7 +1993,7 @@ static void net_tx_action(struct softirq_action *h)
1995 smp_mb__before_clear_bit(); 1993 smp_mb__before_clear_bit();
1996 clear_bit(__QDISC_STATE_SCHED, &q->state); 1994 clear_bit(__QDISC_STATE_SCHED, &q->state);
1997 1995
1998 root_lock = qdisc_root_lock(q); 1996 root_lock = qdisc_lock(q);
1999 if (spin_trylock(root_lock)) { 1997 if (spin_trylock(root_lock)) {
2000 qdisc_run(q); 1998 qdisc_run(q);
2001 spin_unlock(root_lock); 1999 spin_unlock(root_lock);
@@ -2270,6 +2268,20 @@ out:
2270 return ret; 2268 return ret;
2271} 2269}
2272 2270
2271/* Network device is going away, flush any packets still pending */
2272static void flush_backlog(void *arg)
2273{
2274 struct net_device *dev = arg;
2275 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2276 struct sk_buff *skb, *tmp;
2277
2278 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2279 if (skb->dev == dev) {
2280 __skb_unlink(skb, &queue->input_pkt_queue);
2281 kfree_skb(skb);
2282 }
2283}
2284
2273static int process_backlog(struct napi_struct *napi, int quota) 2285static int process_backlog(struct napi_struct *napi, int quota)
2274{ 2286{
2275 int work = 0; 2287 int work = 0;
@@ -2279,7 +2291,6 @@ static int process_backlog(struct napi_struct *napi, int quota)
2279 napi->weight = weight_p; 2291 napi->weight = weight_p;
2280 do { 2292 do {
2281 struct sk_buff *skb; 2293 struct sk_buff *skb;
2282 struct net_device *dev;
2283 2294
2284 local_irq_disable(); 2295 local_irq_disable();
2285 skb = __skb_dequeue(&queue->input_pkt_queue); 2296 skb = __skb_dequeue(&queue->input_pkt_queue);
@@ -2288,14 +2299,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
2288 local_irq_enable(); 2299 local_irq_enable();
2289 break; 2300 break;
2290 } 2301 }
2291
2292 local_irq_enable(); 2302 local_irq_enable();
2293 2303
2294 dev = skb->dev;
2295
2296 netif_receive_skb(skb); 2304 netif_receive_skb(skb);
2297
2298 dev_put(dev);
2299 } while (++work < quota && jiffies == start_time); 2305 } while (++work < quota && jiffies == start_time);
2300 2306
2301 return work; 2307 return work;
@@ -3988,6 +3994,10 @@ int register_netdevice(struct net_device *dev)
3988 } 3994 }
3989 } 3995 }
3990 3996
3997 /* Enable software GSO if SG is supported. */
3998 if (dev->features & NETIF_F_SG)
3999 dev->features |= NETIF_F_GSO;
4000
3991 netdev_initialize_kobject(dev); 4001 netdev_initialize_kobject(dev);
3992 ret = netdev_register_kobject(dev); 4002 ret = netdev_register_kobject(dev);
3993 if (ret) 4003 if (ret)
@@ -4165,6 +4175,8 @@ void netdev_run_todo(void)
4165 4175
4166 dev->reg_state = NETREG_UNREGISTERED; 4176 dev->reg_state = NETREG_UNREGISTERED;
4167 4177
4178 on_each_cpu(flush_backlog, dev, 1);
4179
4168 netdev_wait_allrefs(dev); 4180 netdev_wait_allrefs(dev);
4169 4181
4170 /* paranoia */ 4182 /* paranoia */
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f62c8af85d38..9d92e41826e7 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2281,6 +2281,7 @@ static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2281 struct neighbour *n = neigh_get_first(seq); 2281 struct neighbour *n = neigh_get_first(seq);
2282 2282
2283 if (n) { 2283 if (n) {
2284 --(*pos);
2284 while (*pos) { 2285 while (*pos) {
2285 n = neigh_get_next(seq, n, pos); 2286 n = neigh_get_next(seq, n, pos);
2286 if (!n) 2287 if (!n)
@@ -2341,6 +2342,7 @@ static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2341 struct pneigh_entry *pn = pneigh_get_first(seq); 2342 struct pneigh_entry *pn = pneigh_get_first(seq);
2342 2343
2343 if (pn) { 2344 if (pn) {
2345 --(*pos);
2344 while (*pos) { 2346 while (*pos) {
2345 pn = pneigh_get_next(seq, pn, pos); 2347 pn = pneigh_get_next(seq, pn, pos);
2346 if (!pn) 2348 if (!pn)
@@ -2354,10 +2356,11 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2354{ 2356{
2355 struct neigh_seq_state *state = seq->private; 2357 struct neigh_seq_state *state = seq->private;
2356 void *rc; 2358 void *rc;
2359 loff_t idxpos = *pos;
2357 2360
2358 rc = neigh_get_idx(seq, pos); 2361 rc = neigh_get_idx(seq, &idxpos);
2359 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 2362 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2360 rc = pneigh_get_idx(seq, pos); 2363 rc = pneigh_get_idx(seq, &idxpos);
2361 2364
2362 return rc; 2365 return rc;
2363} 2366}
@@ -2366,7 +2369,6 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
2366 __acquires(tbl->lock) 2369 __acquires(tbl->lock)
2367{ 2370{
2368 struct neigh_seq_state *state = seq->private; 2371 struct neigh_seq_state *state = seq->private;
2369 loff_t pos_minus_one;
2370 2372
2371 state->tbl = tbl; 2373 state->tbl = tbl;
2372 state->bucket = 0; 2374 state->bucket = 0;
@@ -2374,8 +2376,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
2374 2376
2375 read_lock_bh(&tbl->lock); 2377 read_lock_bh(&tbl->lock);
2376 2378
2377 pos_minus_one = *pos - 1; 2379 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2378 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2379} 2380}
2380EXPORT_SYMBOL(neigh_seq_start); 2381EXPORT_SYMBOL(neigh_seq_start);
2381 2382
@@ -2385,7 +2386,7 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2385 void *rc; 2386 void *rc;
2386 2387
2387 if (v == SEQ_START_TOKEN) { 2388 if (v == SEQ_START_TOKEN) {
2388 rc = neigh_get_idx(seq, pos); 2389 rc = neigh_get_first(seq);
2389 goto out; 2390 goto out;
2390 } 2391 }
2391 2392
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 3284605f2ec7..2498cdaf8cbe 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2085,15 +2085,19 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
2085 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { 2085 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) {
2086 /* reset time */ 2086 /* reset time */
2087 pkt_dev->flows[flow].count = 0; 2087 pkt_dev->flows[flow].count = 0;
2088 pkt_dev->flows[flow].flags = 0;
2088 pkt_dev->curfl += 1; 2089 pkt_dev->curfl += 1;
2089 if (pkt_dev->curfl >= pkt_dev->cflows) 2090 if (pkt_dev->curfl >= pkt_dev->cflows)
2090 pkt_dev->curfl = 0; /*reset */ 2091 pkt_dev->curfl = 0; /*reset */
2091 } 2092 }
2092 } else { 2093 } else {
2093 flow = random32() % pkt_dev->cflows; 2094 flow = random32() % pkt_dev->cflows;
2095 pkt_dev->curfl = flow;
2094 2096
2095 if (pkt_dev->flows[flow].count > pkt_dev->lflow) 2097 if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
2096 pkt_dev->flows[flow].count = 0; 2098 pkt_dev->flows[flow].count = 0;
2099 pkt_dev->flows[flow].flags = 0;
2100 }
2097 } 2101 }
2098 2102
2099 return pkt_dev->curfl; 2103 return pkt_dev->curfl;
@@ -2162,7 +2166,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2162 mc = random32() % pkt_dev->src_mac_count; 2166 mc = random32() % pkt_dev->src_mac_count;
2163 else { 2167 else {
2164 mc = pkt_dev->cur_src_mac_offset++; 2168 mc = pkt_dev->cur_src_mac_offset++;
2165 if (pkt_dev->cur_src_mac_offset > 2169 if (pkt_dev->cur_src_mac_offset >=
2166 pkt_dev->src_mac_count) 2170 pkt_dev->src_mac_count)
2167 pkt_dev->cur_src_mac_offset = 0; 2171 pkt_dev->cur_src_mac_offset = 0;
2168 } 2172 }
@@ -2189,7 +2193,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2189 2193
2190 else { 2194 else {
2191 mc = pkt_dev->cur_dst_mac_offset++; 2195 mc = pkt_dev->cur_dst_mac_offset++;
2192 if (pkt_dev->cur_dst_mac_offset > 2196 if (pkt_dev->cur_dst_mac_offset >=
2193 pkt_dev->dst_mac_count) { 2197 pkt_dev->dst_mac_count) {
2194 pkt_dev->cur_dst_mac_offset = 0; 2198 pkt_dev->cur_dst_mac_offset = 0;
2195 } 2199 }
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 770d827f5ab8..e0689fd7b798 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -232,6 +232,7 @@ static struct ctl_table ipv4_table[] = {
232 .mode = 0644, 232 .mode = 0644,
233 .proc_handler = &ipv4_doint_and_flush, 233 .proc_handler = &ipv4_doint_and_flush,
234 .strategy = &ipv4_doint_and_flush_strategy, 234 .strategy = &ipv4_doint_and_flush_strategy,
235 .extra2 = &init_net,
235 }, 236 },
236 { 237 {
237 .ctl_name = NET_IPV4_NO_PMTU_DISC, 238 .ctl_name = NET_IPV4_NO_PMTU_DISC,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a027003d69a4..a4402de425d9 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -269,7 +269,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
269 skb->mark = sk->sk_mark; 269 skb->mark = sk->sk_mark;
270 270
271 mtu = dst_mtu(dst); 271 mtu = dst_mtu(dst);
272 if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { 272 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
273 IP6_INC_STATS(ip6_dst_idev(skb->dst), 273 IP6_INC_STATS(ip6_dst_idev(skb->dst),
274 IPSTATS_MIB_OUTREQUESTS); 274 IPSTATS_MIB_OUTREQUESTS);
275 return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, 275 return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index ea33b26512c2..741cfcd96f88 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -346,6 +346,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
346 */ 346 */
347 if (optlen == 0) 347 if (optlen == 0)
348 optval = NULL; 348 optval = NULL;
349 else if (optval == NULL)
350 goto e_inval;
349 else if (optlen < sizeof(struct ipv6_opt_hdr) || 351 else if (optlen < sizeof(struct ipv6_opt_hdr) ||
350 optlen & 0x7 || optlen > 8 * 255) 352 optlen & 0x7 || optlen > 8 * 255)
351 goto e_inval; 353 goto e_inval;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index a46badd1082d..ec394cf5a19b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -199,10 +199,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
199 ireq6 = inet6_rsk(req); 199 ireq6 = inet6_rsk(req);
200 treq = tcp_rsk(req); 200 treq = tcp_rsk(req);
201 201
202 if (security_inet_conn_request(sk, skb, req)) { 202 if (security_inet_conn_request(sk, skb, req))
203 reqsk_free(req); 203 goto out_free;
204 goto out;
205 }
206 204
207 req->mss = mss; 205 req->mss = mss;
208 ireq->rmt_port = th->source; 206 ireq->rmt_port = th->source;
@@ -255,14 +253,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
255 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 253 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
256 fl.fl_ip_sport = inet_sk(sk)->sport; 254 fl.fl_ip_sport = inet_sk(sk)->sport;
257 security_req_classify_flow(req, &fl); 255 security_req_classify_flow(req, &fl);
258 if (ip6_dst_lookup(sk, &dst, &fl)) { 256 if (ip6_dst_lookup(sk, &dst, &fl))
259 reqsk_free(req); 257 goto out_free;
260 goto out; 258
261 }
262 if (final_p) 259 if (final_p)
263 ipv6_addr_copy(&fl.fl6_dst, final_p); 260 ipv6_addr_copy(&fl.fl6_dst, final_p);
264 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0) 261 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
265 goto out; 262 goto out_free;
266 } 263 }
267 264
268 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); 265 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
@@ -273,7 +270,10 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
273 ireq->rcv_wscale = rcv_wscale; 270 ireq->rcv_wscale = rcv_wscale;
274 271
275 ret = get_cookie_sock(sk, skb, req, dst); 272 ret = get_cookie_sock(sk, skb, req, dst);
276 273out:
277out: return ret; 274 return ret;
275out_free:
276 reqsk_free(req);
277 return NULL;
278} 278}
279 279
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index a4f9a832722a..ec59345af65b 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -82,6 +82,7 @@ struct ieee80211_sta_bss {
82 82
83 u8 bssid[ETH_ALEN]; 83 u8 bssid[ETH_ALEN];
84 u8 ssid[IEEE80211_MAX_SSID_LEN]; 84 u8 ssid[IEEE80211_MAX_SSID_LEN];
85 u8 dtim_period;
85 u16 capability; /* host byte order */ 86 u16 capability; /* host byte order */
86 enum ieee80211_band band; 87 enum ieee80211_band band;
87 int freq; 88 int freq;
@@ -586,6 +587,7 @@ struct ieee80211_local {
586 struct timer_list sta_cleanup; 587 struct timer_list sta_cleanup;
587 588
588 unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)]; 589 unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)];
590 unsigned long queues_pending_run[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)];
589 struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES]; 591 struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES];
590 struct tasklet_struct tx_pending_tasklet; 592 struct tasklet_struct tx_pending_tasklet;
591 593
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index a4c5b90de769..0c02c471bca2 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1689,6 +1689,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1689 if (local->hw.conf.beacon_int < 10) 1689 if (local->hw.conf.beacon_int < 10)
1690 local->hw.conf.beacon_int = 100; 1690 local->hw.conf.beacon_int = 100;
1691 1691
1692 if (local->hw.max_listen_interval == 0)
1693 local->hw.max_listen_interval = 1;
1694
1695 local->hw.conf.listen_interval = local->hw.max_listen_interval;
1696
1692 local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC | 1697 local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
1693 IEEE80211_HW_SIGNAL_DB | 1698 IEEE80211_HW_SIGNAL_DB |
1694 IEEE80211_HW_SIGNAL_DBM) ? 1699 IEEE80211_HW_SIGNAL_DBM) ?
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index acb04133a95d..e1d11c9b6729 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -551,6 +551,7 @@ static void ieee80211_set_associated(struct net_device *dev,
551 /* set timing information */ 551 /* set timing information */
552 sdata->bss_conf.beacon_int = bss->beacon_int; 552 sdata->bss_conf.beacon_int = bss->beacon_int;
553 sdata->bss_conf.timestamp = bss->timestamp; 553 sdata->bss_conf.timestamp = bss->timestamp;
554 sdata->bss_conf.dtim_period = bss->dtim_period;
554 555
555 changed |= ieee80211_handle_bss_capability(sdata, bss); 556 changed |= ieee80211_handle_bss_capability(sdata, bss);
556 557
@@ -773,7 +774,8 @@ static void ieee80211_send_assoc(struct net_device *dev,
773 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 774 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
774 IEEE80211_STYPE_REASSOC_REQ); 775 IEEE80211_STYPE_REASSOC_REQ);
775 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); 776 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
776 mgmt->u.reassoc_req.listen_interval = cpu_to_le16(1); 777 mgmt->u.reassoc_req.listen_interval =
778 cpu_to_le16(local->hw.conf.listen_interval);
777 memcpy(mgmt->u.reassoc_req.current_ap, ifsta->prev_bssid, 779 memcpy(mgmt->u.reassoc_req.current_ap, ifsta->prev_bssid,
778 ETH_ALEN); 780 ETH_ALEN);
779 } else { 781 } else {
@@ -781,7 +783,8 @@ static void ieee80211_send_assoc(struct net_device *dev,
781 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 783 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
782 IEEE80211_STYPE_ASSOC_REQ); 784 IEEE80211_STYPE_ASSOC_REQ);
783 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); 785 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
784 mgmt->u.assoc_req.listen_interval = cpu_to_le16(1); 786 mgmt->u.reassoc_req.listen_interval =
787 cpu_to_le16(local->hw.conf.listen_interval);
785 } 788 }
786 789
787 /* SSID */ 790 /* SSID */
@@ -2688,6 +2691,16 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2688 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); 2691 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
2689 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); 2692 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
2690 2693
2694 if (elems->tim) {
2695 struct ieee80211_tim_ie *tim_ie =
2696 (struct ieee80211_tim_ie *)elems->tim;
2697 bss->dtim_period = tim_ie->dtim_period;
2698 }
2699
2700 /* set default value for buggy APs */
2701 if (!elems->tim || bss->dtim_period == 0)
2702 bss->dtim_period = 1;
2703
2691 bss->supp_rates_len = 0; 2704 bss->supp_rates_len = 0;
2692 if (elems->supp_rates) { 2705 if (elems->supp_rates) {
2693 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; 2706 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
@@ -3650,11 +3663,21 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3650 "%s\n", print_mac(mac, bssid), 3663 "%s\n", print_mac(mac, bssid),
3651 print_mac(mac2, ifsta->bssid)); 3664 print_mac(mac2, ifsta->bssid));
3652#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 3665#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3653 if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0 && 3666
3654 (bss = ieee80211_rx_bss_get(dev, bssid, 3667 if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) {
3655 local->hw.conf.channel->center_freq,
3656 ifsta->ssid, ifsta->ssid_len))) {
3657 int ret; 3668 int ret;
3669 int search_freq;
3670
3671 if (ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL)
3672 search_freq = bss->freq;
3673 else
3674 search_freq = local->hw.conf.channel->center_freq;
3675
3676 bss = ieee80211_rx_bss_get(dev, bssid, search_freq,
3677 ifsta->ssid, ifsta->ssid_len);
3678 if (!bss)
3679 goto dont_join;
3680
3658 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" 3681 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s"
3659 " based on configured SSID\n", 3682 " based on configured SSID\n",
3660 dev->name, print_mac(mac, bssid)); 3683 dev->name, print_mac(mac, bssid));
@@ -3662,6 +3685,8 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3662 ieee80211_rx_bss_put(local, bss); 3685 ieee80211_rx_bss_put(local, bss);
3663 return ret; 3686 return ret;
3664 } 3687 }
3688
3689dont_join:
3665#ifdef CONFIG_MAC80211_IBSS_DEBUG 3690#ifdef CONFIG_MAC80211_IBSS_DEBUG
3666 printk(KERN_DEBUG " did not try to join ibss\n"); 3691 printk(KERN_DEBUG " did not try to join ibss\n");
3667#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 3692#endif /* CONFIG_MAC80211_IBSS_DEBUG */
@@ -3895,7 +3920,7 @@ done:
3895 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 3920 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
3896 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 3921 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
3897 if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) || 3922 if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) ||
3898 (!ifsta->state == IEEE80211_IBSS_JOINED && 3923 (!(ifsta->state == IEEE80211_IBSS_JOINED) &&
3899 !ieee80211_sta_active_ibss(dev))) 3924 !ieee80211_sta_active_ibss(dev)))
3900 ieee80211_sta_find_ibss(dev, ifsta); 3925 ieee80211_sta_find_ibss(dev, ifsta);
3901 } 3926 }
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 69019e943873..771ec68b848d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1060,13 +1060,14 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1060static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, 1060static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1061 struct ieee80211_tx_data *tx) 1061 struct ieee80211_tx_data *tx)
1062{ 1062{
1063 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1063 struct ieee80211_tx_info *info;
1064 int ret, i; 1064 int ret, i;
1065 1065
1066 if (netif_subqueue_stopped(local->mdev, skb))
1067 return IEEE80211_TX_AGAIN;
1068
1069 if (skb) { 1066 if (skb) {
1067 if (netif_subqueue_stopped(local->mdev, skb))
1068 return IEEE80211_TX_AGAIN;
1069 info = IEEE80211_SKB_CB(skb);
1070
1070 ieee80211_dump_frame(wiphy_name(local->hw.wiphy), 1071 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1071 "TX to low-level driver", skb); 1072 "TX to low-level driver", skb);
1072 ret = local->ops->tx(local_to_hw(local), skb); 1073 ret = local->ops->tx(local_to_hw(local), skb);
@@ -1215,6 +1216,7 @@ retry:
1215 1216
1216 if (ret == IEEE80211_TX_FRAG_AGAIN) 1217 if (ret == IEEE80211_TX_FRAG_AGAIN)
1217 skb = NULL; 1218 skb = NULL;
1219
1218 set_bit(queue, local->queues_pending); 1220 set_bit(queue, local->queues_pending);
1219 smp_mb(); 1221 smp_mb();
1220 /* 1222 /*
@@ -1708,14 +1710,19 @@ void ieee80211_tx_pending(unsigned long data)
1708 netif_tx_lock_bh(dev); 1710 netif_tx_lock_bh(dev);
1709 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { 1711 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1710 /* Check that this queue is ok */ 1712 /* Check that this queue is ok */
1711 if (__netif_subqueue_stopped(local->mdev, i)) 1713 if (__netif_subqueue_stopped(local->mdev, i) &&
1714 !test_bit(i, local->queues_pending_run))
1712 continue; 1715 continue;
1713 1716
1714 if (!test_bit(i, local->queues_pending)) { 1717 if (!test_bit(i, local->queues_pending)) {
1718 clear_bit(i, local->queues_pending_run);
1715 ieee80211_wake_queue(&local->hw, i); 1719 ieee80211_wake_queue(&local->hw, i);
1716 continue; 1720 continue;
1717 } 1721 }
1718 1722
1723 clear_bit(i, local->queues_pending_run);
1724 netif_start_subqueue(local->mdev, i);
1725
1719 store = &local->pending_packet[i]; 1726 store = &local->pending_packet[i];
1720 tx.extra_frag = store->extra_frag; 1727 tx.extra_frag = store->extra_frag;
1721 tx.num_extra_frag = store->num_extra_frag; 1728 tx.num_extra_frag = store->num_extra_frag;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 19f85e1b3695..0d463c80c404 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -361,6 +361,7 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
361 struct ieee80211_local *local = hw_to_local(hw); 361 struct ieee80211_local *local = hw_to_local(hw);
362 362
363 if (test_bit(queue, local->queues_pending)) { 363 if (test_bit(queue, local->queues_pending)) {
364 set_bit(queue, local->queues_pending_run);
364 tasklet_schedule(&local->tx_pending_tasklet); 365 tasklet_schedule(&local->tx_pending_tasklet);
365 } else { 366 } else {
366 netif_wake_subqueue(local->mdev, queue); 367 netif_wake_subqueue(local->mdev, queue);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 28437f0001db..4310e2f65661 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -241,12 +241,14 @@ void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
241 } else { 241 } else {
242 struct netdev_queue *txq; 242 struct netdev_queue *txq;
243 spinlock_t *root_lock; 243 spinlock_t *root_lock;
244 struct Qdisc *q;
244 245
245 txq = netdev_get_tx_queue(local->mdev, agg_queue); 246 txq = netdev_get_tx_queue(local->mdev, agg_queue);
246 root_lock = qdisc_root_lock(txq->qdisc); 247 q = rcu_dereference(txq->qdisc);
248 root_lock = qdisc_lock(q);
247 249
248 spin_lock_bh(root_lock); 250 spin_lock_bh(root_lock);
249 qdisc_reset(txq->qdisc); 251 qdisc_reset(q);
250 spin_unlock_bh(root_lock); 252 spin_unlock_bh(root_lock);
251 } 253 }
252} 254}
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index 8aa822730145..e5b69556bb5b 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -109,6 +109,25 @@ static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB);
109static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX); 109static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX);
110static DEFINE_RFKILL_TASK(rfkill_wwan, RFKILL_TYPE_WWAN); 110static DEFINE_RFKILL_TASK(rfkill_wwan, RFKILL_TYPE_WWAN);
111 111
112static void rfkill_schedule_evsw_rfkillall(int state)
113{
114 /* EVERY radio type. state != 0 means radios ON */
115 /* handle EPO (emergency power off) through shortcut */
116 if (state) {
117 rfkill_schedule_set(&rfkill_wwan,
118 RFKILL_STATE_UNBLOCKED);
119 rfkill_schedule_set(&rfkill_wimax,
120 RFKILL_STATE_UNBLOCKED);
121 rfkill_schedule_set(&rfkill_uwb,
122 RFKILL_STATE_UNBLOCKED);
123 rfkill_schedule_set(&rfkill_bt,
124 RFKILL_STATE_UNBLOCKED);
125 rfkill_schedule_set(&rfkill_wlan,
126 RFKILL_STATE_UNBLOCKED);
127 } else
128 rfkill_schedule_epo();
129}
130
112static void rfkill_event(struct input_handle *handle, unsigned int type, 131static void rfkill_event(struct input_handle *handle, unsigned int type,
113 unsigned int code, int data) 132 unsigned int code, int data)
114{ 133{
@@ -132,21 +151,7 @@ static void rfkill_event(struct input_handle *handle, unsigned int type,
132 } else if (type == EV_SW) { 151 } else if (type == EV_SW) {
133 switch (code) { 152 switch (code) {
134 case SW_RFKILL_ALL: 153 case SW_RFKILL_ALL:
135 /* EVERY radio type. data != 0 means radios ON */ 154 rfkill_schedule_evsw_rfkillall(data);
136 /* handle EPO (emergency power off) through shortcut */
137 if (data) {
138 rfkill_schedule_set(&rfkill_wwan,
139 RFKILL_STATE_UNBLOCKED);
140 rfkill_schedule_set(&rfkill_wimax,
141 RFKILL_STATE_UNBLOCKED);
142 rfkill_schedule_set(&rfkill_uwb,
143 RFKILL_STATE_UNBLOCKED);
144 rfkill_schedule_set(&rfkill_bt,
145 RFKILL_STATE_UNBLOCKED);
146 rfkill_schedule_set(&rfkill_wlan,
147 RFKILL_STATE_UNBLOCKED);
148 } else
149 rfkill_schedule_epo();
150 break; 155 break;
151 default: 156 default:
152 break; 157 break;
@@ -168,6 +173,7 @@ static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
168 handle->handler = handler; 173 handle->handler = handler;
169 handle->name = "rfkill"; 174 handle->name = "rfkill";
170 175
176 /* causes rfkill_start() to be called */
171 error = input_register_handle(handle); 177 error = input_register_handle(handle);
172 if (error) 178 if (error)
173 goto err_free_handle; 179 goto err_free_handle;
@@ -185,6 +191,23 @@ static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
185 return error; 191 return error;
186} 192}
187 193
194static void rfkill_start(struct input_handle *handle)
195{
196 /* Take event_lock to guard against configuration changes, we
197 * should be able to deal with concurrency with rfkill_event()
198 * just fine (which event_lock will also avoid). */
199 spin_lock_irq(&handle->dev->event_lock);
200
201 if (test_bit(EV_SW, handle->dev->evbit)) {
202 if (test_bit(SW_RFKILL_ALL, handle->dev->swbit))
203 rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
204 handle->dev->sw));
205 /* add resync for further EV_SW events here */
206 }
207
208 spin_unlock_irq(&handle->dev->event_lock);
209}
210
188static void rfkill_disconnect(struct input_handle *handle) 211static void rfkill_disconnect(struct input_handle *handle)
189{ 212{
190 input_close_device(handle); 213 input_close_device(handle);
@@ -225,6 +248,7 @@ static struct input_handler rfkill_handler = {
225 .event = rfkill_event, 248 .event = rfkill_event,
226 .connect = rfkill_connect, 249 .connect = rfkill_connect,
227 .disconnect = rfkill_disconnect, 250 .disconnect = rfkill_disconnect,
251 .start = rfkill_start,
228 .name = "rfkill", 252 .name = "rfkill",
229 .id_table = rfkill_ids, 253 .id_table = rfkill_ids,
230}; 254};
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index c6f2f388cb72..d2d45655cd1a 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -105,6 +105,16 @@ static void rfkill_led_trigger(struct rfkill *rfkill,
105#endif /* CONFIG_RFKILL_LEDS */ 105#endif /* CONFIG_RFKILL_LEDS */
106} 106}
107 107
108#ifdef CONFIG_RFKILL_LEDS
109static void rfkill_led_trigger_activate(struct led_classdev *led)
110{
111 struct rfkill *rfkill = container_of(led->trigger,
112 struct rfkill, led_trigger);
113
114 rfkill_led_trigger(rfkill, rfkill->state);
115}
116#endif /* CONFIG_RFKILL_LEDS */
117
108static void notify_rfkill_state_change(struct rfkill *rfkill) 118static void notify_rfkill_state_change(struct rfkill *rfkill)
109{ 119{
110 blocking_notifier_call_chain(&rfkill_notifier_list, 120 blocking_notifier_call_chain(&rfkill_notifier_list,
@@ -589,7 +599,10 @@ static void rfkill_led_trigger_register(struct rfkill *rfkill)
589#ifdef CONFIG_RFKILL_LEDS 599#ifdef CONFIG_RFKILL_LEDS
590 int error; 600 int error;
591 601
592 rfkill->led_trigger.name = rfkill->dev.bus_id; 602 if (!rfkill->led_trigger.name)
603 rfkill->led_trigger.name = rfkill->dev.bus_id;
604 if (!rfkill->led_trigger.activate)
605 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
593 error = led_trigger_register(&rfkill->led_trigger); 606 error = led_trigger_register(&rfkill->led_trigger);
594 if (error) 607 if (error)
595 rfkill->led_trigger.name = NULL; 608 rfkill->led_trigger.name = NULL;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 6b517b9dac5b..43d37256c15e 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
415 case TC_ACT_QUEUED: 415 case TC_ACT_QUEUED:
416 case TC_ACT_STOLEN: 416 case TC_ACT_STOLEN:
417 kfree_skb(skb); 417 kfree_skb(skb);
418 return NET_XMIT_SUCCESS; 418 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
419 case TC_ACT_SHOT: 419 case TC_ACT_SHOT:
420 kfree_skb(skb); 420 kfree_skb(skb);
421 goto drop; 421 goto drop;
@@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
432 ret = qdisc_enqueue(skb, flow->q); 432 ret = qdisc_enqueue(skb, flow->q);
433 if (ret != 0) { 433 if (ret != 0) {
434drop: __maybe_unused 434drop: __maybe_unused
435 sch->qstats.drops++; 435 if (net_xmit_drop_count(ret)) {
436 if (flow) 436 sch->qstats.drops++;
437 flow->qstats.drops++; 437 if (flow)
438 flow->qstats.drops++;
439 }
438 return ret; 440 return ret;
439 } 441 }
440 sch->bstats.bytes += qdisc_pkt_len(skb); 442 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -455,7 +457,7 @@ drop: __maybe_unused
455 return 0; 457 return 0;
456 } 458 }
457 tasklet_schedule(&p->task); 459 tasklet_schedule(&p->task);
458 return NET_XMIT_BYPASS; 460 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
459} 461}
460 462
461/* 463/*
@@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
530 if (!ret) { 532 if (!ret) {
531 sch->q.qlen++; 533 sch->q.qlen++;
532 sch->qstats.requeues++; 534 sch->qstats.requeues++;
533 } else { 535 } else if (net_xmit_drop_count(ret)) {
534 sch->qstats.drops++; 536 sch->qstats.drops++;
535 p->link.qstats.drops++; 537 p->link.qstats.drops++;
536 } 538 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 14954bf4a683..4e261ce62f48 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
230 (cl = cbq_class_lookup(q, prio)) != NULL) 230 (cl = cbq_class_lookup(q, prio)) != NULL)
231 return cl; 231 return cl;
232 232
233 *qerr = NET_XMIT_BYPASS; 233 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
234 for (;;) { 234 for (;;) {
235 int result = 0; 235 int result = 0;
236 defmap = head->defaults; 236 defmap = head->defaults;
@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
256 switch (result) { 256 switch (result) {
257 case TC_ACT_QUEUED: 257 case TC_ACT_QUEUED:
258 case TC_ACT_STOLEN: 258 case TC_ACT_STOLEN:
259 *qerr = NET_XMIT_SUCCESS; 259 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
260 case TC_ACT_SHOT: 260 case TC_ACT_SHOT:
261 return NULL; 261 return NULL;
262 case TC_ACT_RECLASSIFY: 262 case TC_ACT_RECLASSIFY:
@@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377 q->rx_class = cl; 377 q->rx_class = cl;
378#endif 378#endif
379 if (cl == NULL) { 379 if (cl == NULL) {
380 if (ret == NET_XMIT_BYPASS) 380 if (ret & __NET_XMIT_BYPASS)
381 sch->qstats.drops++; 381 sch->qstats.drops++;
382 kfree_skb(skb); 382 kfree_skb(skb);
383 return ret; 383 return ret;
@@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
397 return ret; 397 return ret;
398 } 398 }
399 399
400 sch->qstats.drops++; 400 if (net_xmit_drop_count(ret)) {
401 cbq_mark_toplevel(q, cl); 401 sch->qstats.drops++;
402 cl->qstats.drops++; 402 cbq_mark_toplevel(q, cl);
403 cl->qstats.drops++;
404 }
403 return ret; 405 return ret;
404} 406}
405 407
@@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
430 cbq_activate_class(cl); 432 cbq_activate_class(cl);
431 return 0; 433 return 0;
432 } 434 }
433 sch->qstats.drops++; 435 if (net_xmit_drop_count(ret)) {
434 cl->qstats.drops++; 436 sch->qstats.drops++;
437 cl->qstats.drops++;
438 }
435 return ret; 439 return ret;
436} 440}
437 441
@@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
664 q->rx_class = NULL; 668 q->rx_class = NULL;
665 669
666 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { 670 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
671 int ret;
667 672
668 cbq_mark_toplevel(q, cl); 673 cbq_mark_toplevel(q, cl);
669 674
670 q->rx_class = cl; 675 q->rx_class = cl;
671 cl->q->__parent = sch; 676 cl->q->__parent = sch;
672 677
673 if (qdisc_enqueue(skb, cl->q) == 0) { 678 ret = qdisc_enqueue(skb, cl->q);
679 if (ret == NET_XMIT_SUCCESS) {
674 sch->q.qlen++; 680 sch->q.qlen++;
675 sch->bstats.packets++; 681 sch->bstats.packets++;
676 sch->bstats.bytes += qdisc_pkt_len(skb); 682 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
678 cbq_activate_class(cl); 684 cbq_activate_class(cl);
679 return 0; 685 return 0;
680 } 686 }
681 sch->qstats.drops++; 687 if (net_xmit_drop_count(ret))
688 sch->qstats.drops++;
682 return 0; 689 return 0;
683 } 690 }
684 691
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index a935676987e2..edd1298f85f6 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
236 case TC_ACT_QUEUED: 236 case TC_ACT_QUEUED:
237 case TC_ACT_STOLEN: 237 case TC_ACT_STOLEN:
238 kfree_skb(skb); 238 kfree_skb(skb);
239 return NET_XMIT_SUCCESS; 239 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
240 240
241 case TC_ACT_SHOT: 241 case TC_ACT_SHOT:
242 goto drop; 242 goto drop;
@@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
254 254
255 err = qdisc_enqueue(skb, p->q); 255 err = qdisc_enqueue(skb, p->q);
256 if (err != NET_XMIT_SUCCESS) { 256 if (err != NET_XMIT_SUCCESS) {
257 sch->qstats.drops++; 257 if (net_xmit_drop_count(err))
258 sch->qstats.drops++;
258 return err; 259 return err;
259 } 260 }
260 261
@@ -267,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
267drop: 268drop:
268 kfree_skb(skb); 269 kfree_skb(skb);
269 sch->qstats.drops++; 270 sch->qstats.drops++;
270 return NET_XMIT_BYPASS; 271 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
271} 272}
272 273
273static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) 274static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
@@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
321 322
322 err = p->q->ops->requeue(skb, p->q); 323 err = p->q->ops->requeue(skb, p->q);
323 if (err != NET_XMIT_SUCCESS) { 324 if (err != NET_XMIT_SUCCESS) {
324 sch->qstats.drops++; 325 if (net_xmit_drop_count(err))
326 sch->qstats.drops++;
325 return err; 327 return err;
326 } 328 }
327 329
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9c9cd4d94890..7cf83b37459d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -29,7 +29,7 @@
29/* Main transmission queue. */ 29/* Main transmission queue. */
30 30
31/* Modifications to data participating in scheduling must be protected with 31/* Modifications to data participating in scheduling must be protected with
32 * qdisc_root_lock(qdisc) spinlock. 32 * qdisc_lock(qdisc) spinlock.
33 * 33 *
34 * The idea is the following: 34 * The idea is the following:
35 * - enqueue, dequeue are serialized via qdisc root lock 35 * - enqueue, dequeue are serialized via qdisc root lock
@@ -126,7 +126,7 @@ static inline int qdisc_restart(struct Qdisc *q)
126 if (unlikely((skb = dequeue_skb(q)) == NULL)) 126 if (unlikely((skb = dequeue_skb(q)) == NULL))
127 return 0; 127 return 0;
128 128
129 root_lock = qdisc_root_lock(q); 129 root_lock = qdisc_lock(q);
130 130
131 /* And release qdisc */ 131 /* And release qdisc */
132 spin_unlock(root_lock); 132 spin_unlock(root_lock);
@@ -507,7 +507,7 @@ errout:
507} 507}
508EXPORT_SYMBOL(qdisc_create_dflt); 508EXPORT_SYMBOL(qdisc_create_dflt);
509 509
510/* Under qdisc_root_lock(qdisc) and BH! */ 510/* Under qdisc_lock(qdisc) and BH! */
511 511
512void qdisc_reset(struct Qdisc *qdisc) 512void qdisc_reset(struct Qdisc *qdisc)
513{ 513{
@@ -543,7 +543,7 @@ static void __qdisc_destroy(struct rcu_head *head)
543 kfree((char *) qdisc - qdisc->padded); 543 kfree((char *) qdisc - qdisc->padded);
544} 544}
545 545
546/* Under qdisc_root_lock(qdisc) and BH! */ 546/* Under qdisc_lock(qdisc) and BH! */
547 547
548void qdisc_destroy(struct Qdisc *qdisc) 548void qdisc_destroy(struct Qdisc *qdisc)
549{ 549{
@@ -659,7 +659,7 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)
659 659
660 dev_queue = netdev_get_tx_queue(dev, i); 660 dev_queue = netdev_get_tx_queue(dev, i);
661 q = dev_queue->qdisc; 661 q = dev_queue->qdisc;
662 root_lock = qdisc_root_lock(q); 662 root_lock = qdisc_lock(q);
663 663
664 if (lock) 664 if (lock)
665 spin_lock_bh(root_lock); 665 spin_lock_bh(root_lock);
@@ -735,7 +735,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
735 struct Qdisc *qdisc_default = _qdisc_default; 735 struct Qdisc *qdisc_default = _qdisc_default;
736 736
737 if (qdisc) { 737 if (qdisc) {
738 spinlock_t *root_lock = qdisc_root_lock(qdisc); 738 spinlock_t *root_lock = qdisc_lock(qdisc);
739 739
740 dev_queue->qdisc = qdisc_default; 740 dev_queue->qdisc = qdisc_default;
741 dev_queue->qdisc_sleeping = qdisc_default; 741 dev_queue->qdisc_sleeping = qdisc_default;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 0ae7d19dcba8..c2b8d9cce3d2 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1159,14 +1159,14 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1159 if (cl->level == 0) 1159 if (cl->level == 0)
1160 return cl; 1160 return cl;
1161 1161
1162 *qerr = NET_XMIT_BYPASS; 1162 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1163 tcf = q->root.filter_list; 1163 tcf = q->root.filter_list;
1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1165#ifdef CONFIG_NET_CLS_ACT 1165#ifdef CONFIG_NET_CLS_ACT
1166 switch (result) { 1166 switch (result) {
1167 case TC_ACT_QUEUED: 1167 case TC_ACT_QUEUED:
1168 case TC_ACT_STOLEN: 1168 case TC_ACT_STOLEN:
1169 *qerr = NET_XMIT_SUCCESS; 1169 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1170 case TC_ACT_SHOT: 1170 case TC_ACT_SHOT:
1171 return NULL; 1171 return NULL;
1172 } 1172 }
@@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1578 1578
1579 cl = hfsc_classify(skb, sch, &err); 1579 cl = hfsc_classify(skb, sch, &err);
1580 if (cl == NULL) { 1580 if (cl == NULL) {
1581 if (err == NET_XMIT_BYPASS) 1581 if (err & __NET_XMIT_BYPASS)
1582 sch->qstats.drops++; 1582 sch->qstats.drops++;
1583 kfree_skb(skb); 1583 kfree_skb(skb);
1584 return err; 1584 return err;
@@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1586 1586
1587 err = qdisc_enqueue(skb, cl->qdisc); 1587 err = qdisc_enqueue(skb, cl->qdisc);
1588 if (unlikely(err != NET_XMIT_SUCCESS)) { 1588 if (unlikely(err != NET_XMIT_SUCCESS)) {
1589 cl->qstats.drops++; 1589 if (net_xmit_drop_count(err)) {
1590 sch->qstats.drops++; 1590 cl->qstats.drops++;
1591 sch->qstats.drops++;
1592 }
1591 return err; 1593 return err;
1592 } 1594 }
1593 1595
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 75a40951c4f2..be35422711a3 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -214,14 +214,14 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
214 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) 214 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
215 return cl; 215 return cl;
216 216
217 *qerr = NET_XMIT_BYPASS; 217 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
218 tcf = q->filter_list; 218 tcf = q->filter_list;
219 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 219 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
220#ifdef CONFIG_NET_CLS_ACT 220#ifdef CONFIG_NET_CLS_ACT
221 switch (result) { 221 switch (result) {
222 case TC_ACT_QUEUED: 222 case TC_ACT_QUEUED:
223 case TC_ACT_STOLEN: 223 case TC_ACT_STOLEN:
224 *qerr = NET_XMIT_SUCCESS; 224 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
225 case TC_ACT_SHOT: 225 case TC_ACT_SHOT:
226 return NULL; 226 return NULL;
227 } 227 }
@@ -567,14 +567,16 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
567 } 567 }
568#ifdef CONFIG_NET_CLS_ACT 568#ifdef CONFIG_NET_CLS_ACT
569 } else if (!cl) { 569 } else if (!cl) {
570 if (ret == NET_XMIT_BYPASS) 570 if (ret & __NET_XMIT_BYPASS)
571 sch->qstats.drops++; 571 sch->qstats.drops++;
572 kfree_skb(skb); 572 kfree_skb(skb);
573 return ret; 573 return ret;
574#endif 574#endif
575 } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { 575 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
576 sch->qstats.drops++; 576 if (net_xmit_drop_count(ret)) {
577 cl->qstats.drops++; 577 sch->qstats.drops++;
578 cl->qstats.drops++;
579 }
578 return NET_XMIT_DROP; 580 return NET_XMIT_DROP;
579 } else { 581 } else {
580 cl->bstats.packets += 582 cl->bstats.packets +=
@@ -610,15 +612,17 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
610 } 612 }
611#ifdef CONFIG_NET_CLS_ACT 613#ifdef CONFIG_NET_CLS_ACT
612 } else if (!cl) { 614 } else if (!cl) {
613 if (ret == NET_XMIT_BYPASS) 615 if (ret & __NET_XMIT_BYPASS)
614 sch->qstats.drops++; 616 sch->qstats.drops++;
615 kfree_skb(skb); 617 kfree_skb(skb);
616 return ret; 618 return ret;
617#endif 619#endif
618 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != 620 } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
619 NET_XMIT_SUCCESS) { 621 NET_XMIT_SUCCESS) {
620 sch->qstats.drops++; 622 if (net_xmit_drop_count(ret)) {
621 cl->qstats.drops++; 623 sch->qstats.drops++;
624 cl->qstats.drops++;
625 }
622 return NET_XMIT_DROP; 626 return NET_XMIT_DROP;
623 } else 627 } else
624 htb_activate(q, cl); 628 htb_activate(q, cl);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a59085700678..fb0294d0b55e 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
176 if (count == 0) { 176 if (count == 0) {
177 sch->qstats.drops++; 177 sch->qstats.drops++;
178 kfree_skb(skb); 178 kfree_skb(skb);
179 return NET_XMIT_BYPASS; 179 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
180 } 180 }
181 181
182 skb_orphan(skb); 182 skb_orphan(skb);
@@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 sch->q.qlen++; 240 sch->q.qlen++;
241 sch->bstats.bytes += qdisc_pkt_len(skb); 241 sch->bstats.bytes += qdisc_pkt_len(skb);
242 sch->bstats.packets++; 242 sch->bstats.packets++;
243 } else 243 } else if (net_xmit_drop_count(ret)) {
244 sch->qstats.drops++; 244 sch->qstats.drops++;
245 }
245 246
246 pr_debug("netem: enqueue ret %d\n", ret); 247 pr_debug("netem: enqueue ret %d\n", ret);
247 return ret; 248 return ret;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f849243eb095..eac197610edf 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -38,14 +38,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
38 struct tcf_result res; 38 struct tcf_result res;
39 int err; 39 int err;
40 40
41 *qerr = NET_XMIT_BYPASS; 41 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
42 if (TC_H_MAJ(skb->priority) != sch->handle) { 42 if (TC_H_MAJ(skb->priority) != sch->handle) {
43 err = tc_classify(skb, q->filter_list, &res); 43 err = tc_classify(skb, q->filter_list, &res);
44#ifdef CONFIG_NET_CLS_ACT 44#ifdef CONFIG_NET_CLS_ACT
45 switch (err) { 45 switch (err) {
46 case TC_ACT_STOLEN: 46 case TC_ACT_STOLEN:
47 case TC_ACT_QUEUED: 47 case TC_ACT_QUEUED:
48 *qerr = NET_XMIT_SUCCESS; 48 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
49 case TC_ACT_SHOT: 49 case TC_ACT_SHOT:
50 return NULL; 50 return NULL;
51 } 51 }
@@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
74#ifdef CONFIG_NET_CLS_ACT 74#ifdef CONFIG_NET_CLS_ACT
75 if (qdisc == NULL) { 75 if (qdisc == NULL) {
76 76
77 if (ret == NET_XMIT_BYPASS) 77 if (ret & __NET_XMIT_BYPASS)
78 sch->qstats.drops++; 78 sch->qstats.drops++;
79 kfree_skb(skb); 79 kfree_skb(skb);
80 return ret; 80 return ret;
@@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
88 sch->q.qlen++; 88 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
90 } 90 }
91 sch->qstats.drops++; 91 if (net_xmit_drop_count(ret))
92 sch->qstats.drops++;
92 return ret; 93 return ret;
93} 94}
94 95
@@ -102,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
102 qdisc = prio_classify(skb, sch, &ret); 103 qdisc = prio_classify(skb, sch, &ret);
103#ifdef CONFIG_NET_CLS_ACT 104#ifdef CONFIG_NET_CLS_ACT
104 if (qdisc == NULL) { 105 if (qdisc == NULL) {
105 if (ret == NET_XMIT_BYPASS) 106 if (ret & __NET_XMIT_BYPASS)
106 sch->qstats.drops++; 107 sch->qstats.drops++;
107 kfree_skb(skb); 108 kfree_skb(skb);
108 return ret; 109 return ret;
@@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
114 sch->qstats.requeues++; 115 sch->qstats.requeues++;
115 return 0; 116 return 0;
116 } 117 }
117 sch->qstats.drops++; 118 if (net_xmit_drop_count(ret))
119 sch->qstats.drops++;
118 return NET_XMIT_DROP; 120 return NET_XMIT_DROP;
119} 121}
120 122
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3f2d1d7f3bbd..5da05839e225 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
97 sch->bstats.bytes += qdisc_pkt_len(skb); 97 sch->bstats.bytes += qdisc_pkt_len(skb);
98 sch->bstats.packets++; 98 sch->bstats.packets++;
99 sch->q.qlen++; 99 sch->q.qlen++;
100 } else { 100 } else if (net_xmit_drop_count(ret)) {
101 q->stats.pdrop++; 101 q->stats.pdrop++;
102 sch->qstats.drops++; 102 sch->qstats.drops++;
103 } 103 }
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8589da666568..6e041d10dbdb 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -171,14 +171,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
171 if (!q->filter_list) 171 if (!q->filter_list)
172 return sfq_hash(q, skb) + 1; 172 return sfq_hash(q, skb) + 1;
173 173
174 *qerr = NET_XMIT_BYPASS; 174 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
175 result = tc_classify(skb, q->filter_list, &res); 175 result = tc_classify(skb, q->filter_list, &res);
176 if (result >= 0) { 176 if (result >= 0) {
177#ifdef CONFIG_NET_CLS_ACT 177#ifdef CONFIG_NET_CLS_ACT
178 switch (result) { 178 switch (result) {
179 case TC_ACT_STOLEN: 179 case TC_ACT_STOLEN:
180 case TC_ACT_QUEUED: 180 case TC_ACT_QUEUED:
181 *qerr = NET_XMIT_SUCCESS; 181 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
182 case TC_ACT_SHOT: 182 case TC_ACT_SHOT:
183 return 0; 183 return 0;
184 } 184 }
@@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
285 285
286 hash = sfq_classify(skb, sch, &ret); 286 hash = sfq_classify(skb, sch, &ret);
287 if (hash == 0) { 287 if (hash == 0) {
288 if (ret == NET_XMIT_BYPASS) 288 if (ret & __NET_XMIT_BYPASS)
289 sch->qstats.drops++; 289 sch->qstats.drops++;
290 kfree_skb(skb); 290 kfree_skb(skb);
291 return ret; 291 return ret;
@@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
339 339
340 hash = sfq_classify(skb, sch, &ret); 340 hash = sfq_classify(skb, sch, &ret);
341 if (hash == 0) { 341 if (hash == 0) {
342 if (ret == NET_XMIT_BYPASS) 342 if (ret & __NET_XMIT_BYPASS)
343 sch->qstats.drops++; 343 sch->qstats.drops++;
344 kfree_skb(skb); 344 kfree_skb(skb);
345 return ret; 345 return ret;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b296672f7632..7d3b7ff3bf07 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
135 135
136 ret = qdisc_enqueue(skb, q->qdisc); 136 ret = qdisc_enqueue(skb, q->qdisc);
137 if (ret != 0) { 137 if (ret != 0) {
138 sch->qstats.drops++; 138 if (net_xmit_drop_count(ret))
139 sch->qstats.drops++;
139 return ret; 140 return ret;
140 } 141 }
141 142
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index a238d6834b33..483a01d0740a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -195,8 +195,7 @@ out:
195} 195}
196 196
197/* Based on tcp_v6_xmit() in tcp_ipv6.c. */ 197/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
198static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport, 198static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
199 int ipfragok)
200{ 199{
201 struct sock *sk = skb->sk; 200 struct sock *sk = skb->sk;
202 struct ipv6_pinfo *np = inet6_sk(sk); 201 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -231,7 +230,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport,
231 230
232 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 231 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
233 232
234 return ip6_xmit(sk, skb, &fl, np->opt, ipfragok); 233 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
234 skb->local_df = 1;
235
236 return ip6_xmit(sk, skb, &fl, np->opt, 0);
235} 237}
236 238
237/* Returns the dst cache entry for the given source and destination ip 239/* Returns the dst cache entry for the given source and destination ip
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 45684646b1db..0dc4a7dfb234 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -586,10 +586,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
586 SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n", 586 SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n",
587 nskb->len); 587 nskb->len);
588 588
589 if (tp->param_flags & SPP_PMTUD_ENABLE) 589 nskb->local_df = packet->ipfragok;
590 (*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok); 590 (*tp->af_specific->sctp_xmit)(nskb, tp);
591 else
592 (*tp->af_specific->sctp_xmit)(nskb, tp, 1);
593 591
594out: 592out:
595 packet->size = packet->overhead; 593 packet->size = packet->overhead;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index a6e0818bcff5..0b65354aaf64 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -862,16 +862,21 @@ static int sctp_inet_supported_addrs(const struct sctp_sock *opt,
862 862
863/* Wrapper routine that calls the ip transmit routine. */ 863/* Wrapper routine that calls the ip transmit routine. */
864static inline int sctp_v4_xmit(struct sk_buff *skb, 864static inline int sctp_v4_xmit(struct sk_buff *skb,
865 struct sctp_transport *transport, int ipfragok) 865 struct sctp_transport *transport)
866{ 866{
867 struct inet_sock *inet = inet_sk(skb->sk);
868
867 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " 869 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, "
868 "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n", 870 "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n",
869 __func__, skb, skb->len, 871 __func__, skb, skb->len,
870 NIPQUAD(skb->rtable->rt_src), 872 NIPQUAD(skb->rtable->rt_src),
871 NIPQUAD(skb->rtable->rt_dst)); 873 NIPQUAD(skb->rtable->rt_dst));
872 874
875 inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
876 IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
877
873 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 878 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
874 return ip_queue_xmit(skb, ipfragok); 879 return ip_queue_xmit(skb, 0);
875} 880}
876 881
877static struct sctp_af sctp_af_inet; 882static struct sctp_af sctp_af_inet;