aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/core/dev.c24
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/syncookies.c22
-rw-r--r--net/sched/sch_atm.c14
-rw-r--r--net/sched/sch_cbq.c27
-rw-r--r--net/sched/sch_dsmark.c10
-rw-r--r--net/sched/sch_hfsc.c12
-rw-r--r--net/sched/sch_htb.c24
-rw-r--r--net/sched/sch_netem.c5
-rw-r--r--net/sched/sch_prio.c14
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c8
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sctp/ipv6.c8
-rw-r--r--net/sctp/output.c6
-rw-r--r--net/sctp/protocol.c9
18 files changed, 115 insertions, 79 deletions
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 6e280a8a31e..6a9a6cd74b1 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -113,7 +113,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
113 struct rtable *rt = &br->fake_rtable; 113 struct rtable *rt = &br->fake_rtable;
114 114
115 atomic_set(&rt->u.dst.__refcnt, 1); 115 atomic_set(&rt->u.dst.__refcnt, 1);
116 rt->u.dst.dev = &br->dev; 116 rt->u.dst.dev = br->dev;
117 rt->u.dst.path = &rt->u.dst; 117 rt->u.dst.path = &rt->u.dst;
118 rt->u.dst.metrics[RTAX_MTU - 1] = 1500; 118 rt->u.dst.metrics[RTAX_MTU - 1] = 1500;
119 rt->u.dst.flags = DST_NOXFRM; 119 rt->u.dst.flags = DST_NOXFRM;
diff --git a/net/core/dev.c b/net/core/dev.c
index cbf80098980..01993ad74e7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1805,7 +1805,6 @@ gso:
1805 1805
1806 spin_unlock(root_lock); 1806 spin_unlock(root_lock);
1807 1807
1808 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1809 goto out; 1808 goto out;
1810 } 1809 }
1811 1810
@@ -1909,7 +1908,6 @@ int netif_rx(struct sk_buff *skb)
1909 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { 1908 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1910 if (queue->input_pkt_queue.qlen) { 1909 if (queue->input_pkt_queue.qlen) {
1911enqueue: 1910enqueue:
1912 dev_hold(skb->dev);
1913 __skb_queue_tail(&queue->input_pkt_queue, skb); 1911 __skb_queue_tail(&queue->input_pkt_queue, skb);
1914 local_irq_restore(flags); 1912 local_irq_restore(flags);
1915 return NET_RX_SUCCESS; 1913 return NET_RX_SUCCESS;
@@ -2270,6 +2268,20 @@ out:
2270 return ret; 2268 return ret;
2271} 2269}
2272 2270
2271/* Network device is going away, flush any packets still pending */
2272static void flush_backlog(void *arg)
2273{
2274 struct net_device *dev = arg;
2275 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2276 struct sk_buff *skb, *tmp;
2277
2278 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2279 if (skb->dev == dev) {
2280 __skb_unlink(skb, &queue->input_pkt_queue);
2281 kfree_skb(skb);
2282 }
2283}
2284
2273static int process_backlog(struct napi_struct *napi, int quota) 2285static int process_backlog(struct napi_struct *napi, int quota)
2274{ 2286{
2275 int work = 0; 2287 int work = 0;
@@ -2279,7 +2291,6 @@ static int process_backlog(struct napi_struct *napi, int quota)
2279 napi->weight = weight_p; 2291 napi->weight = weight_p;
2280 do { 2292 do {
2281 struct sk_buff *skb; 2293 struct sk_buff *skb;
2282 struct net_device *dev;
2283 2294
2284 local_irq_disable(); 2295 local_irq_disable();
2285 skb = __skb_dequeue(&queue->input_pkt_queue); 2296 skb = __skb_dequeue(&queue->input_pkt_queue);
@@ -2288,14 +2299,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
2288 local_irq_enable(); 2299 local_irq_enable();
2289 break; 2300 break;
2290 } 2301 }
2291
2292 local_irq_enable(); 2302 local_irq_enable();
2293 2303
2294 dev = skb->dev;
2295
2296 netif_receive_skb(skb); 2304 netif_receive_skb(skb);
2297
2298 dev_put(dev);
2299 } while (++work < quota && jiffies == start_time); 2305 } while (++work < quota && jiffies == start_time);
2300 2306
2301 return work; 2307 return work;
@@ -4169,6 +4175,8 @@ void netdev_run_todo(void)
4169 4175
4170 dev->reg_state = NETREG_UNREGISTERED; 4176 dev->reg_state = NETREG_UNREGISTERED;
4171 4177
4178 on_each_cpu(flush_backlog, dev, 1);
4179
4172 netdev_wait_allrefs(dev); 4180 netdev_wait_allrefs(dev);
4173 4181
4174 /* paranoia */ 4182 /* paranoia */
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a027003d69a..a4402de425d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -269,7 +269,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
269 skb->mark = sk->sk_mark; 269 skb->mark = sk->sk_mark;
270 270
271 mtu = dst_mtu(dst); 271 mtu = dst_mtu(dst);
272 if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { 272 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
273 IP6_INC_STATS(ip6_dst_idev(skb->dst), 273 IP6_INC_STATS(ip6_dst_idev(skb->dst),
274 IPSTATS_MIB_OUTREQUESTS); 274 IPSTATS_MIB_OUTREQUESTS);
275 return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, 275 return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index ea33b26512c..741cfcd96f8 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -346,6 +346,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
346 */ 346 */
347 if (optlen == 0) 347 if (optlen == 0)
348 optval = NULL; 348 optval = NULL;
349 else if (optval == NULL)
350 goto e_inval;
349 else if (optlen < sizeof(struct ipv6_opt_hdr) || 351 else if (optlen < sizeof(struct ipv6_opt_hdr) ||
350 optlen & 0x7 || optlen > 8 * 255) 352 optlen & 0x7 || optlen > 8 * 255)
351 goto e_inval; 353 goto e_inval;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index a46badd1082..ec394cf5a19 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -199,10 +199,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
199 ireq6 = inet6_rsk(req); 199 ireq6 = inet6_rsk(req);
200 treq = tcp_rsk(req); 200 treq = tcp_rsk(req);
201 201
202 if (security_inet_conn_request(sk, skb, req)) { 202 if (security_inet_conn_request(sk, skb, req))
203 reqsk_free(req); 203 goto out_free;
204 goto out;
205 }
206 204
207 req->mss = mss; 205 req->mss = mss;
208 ireq->rmt_port = th->source; 206 ireq->rmt_port = th->source;
@@ -255,14 +253,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
255 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 253 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
256 fl.fl_ip_sport = inet_sk(sk)->sport; 254 fl.fl_ip_sport = inet_sk(sk)->sport;
257 security_req_classify_flow(req, &fl); 255 security_req_classify_flow(req, &fl);
258 if (ip6_dst_lookup(sk, &dst, &fl)) { 256 if (ip6_dst_lookup(sk, &dst, &fl))
259 reqsk_free(req); 257 goto out_free;
260 goto out; 258
261 }
262 if (final_p) 259 if (final_p)
263 ipv6_addr_copy(&fl.fl6_dst, final_p); 260 ipv6_addr_copy(&fl.fl6_dst, final_p);
264 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0) 261 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
265 goto out; 262 goto out_free;
266 } 263 }
267 264
268 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); 265 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
@@ -273,7 +270,10 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
273 ireq->rcv_wscale = rcv_wscale; 270 ireq->rcv_wscale = rcv_wscale;
274 271
275 ret = get_cookie_sock(sk, skb, req, dst); 272 ret = get_cookie_sock(sk, skb, req, dst);
276 273out:
277out: return ret; 274 return ret;
275out_free:
276 reqsk_free(req);
277 return NULL;
278} 278}
279 279
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 6b517b9dac5..43d37256c15 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
415 case TC_ACT_QUEUED: 415 case TC_ACT_QUEUED:
416 case TC_ACT_STOLEN: 416 case TC_ACT_STOLEN:
417 kfree_skb(skb); 417 kfree_skb(skb);
418 return NET_XMIT_SUCCESS; 418 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
419 case TC_ACT_SHOT: 419 case TC_ACT_SHOT:
420 kfree_skb(skb); 420 kfree_skb(skb);
421 goto drop; 421 goto drop;
@@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
432 ret = qdisc_enqueue(skb, flow->q); 432 ret = qdisc_enqueue(skb, flow->q);
433 if (ret != 0) { 433 if (ret != 0) {
434drop: __maybe_unused 434drop: __maybe_unused
435 sch->qstats.drops++; 435 if (net_xmit_drop_count(ret)) {
436 if (flow) 436 sch->qstats.drops++;
437 flow->qstats.drops++; 437 if (flow)
438 flow->qstats.drops++;
439 }
438 return ret; 440 return ret;
439 } 441 }
440 sch->bstats.bytes += qdisc_pkt_len(skb); 442 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -455,7 +457,7 @@ drop: __maybe_unused
455 return 0; 457 return 0;
456 } 458 }
457 tasklet_schedule(&p->task); 459 tasklet_schedule(&p->task);
458 return NET_XMIT_BYPASS; 460 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
459} 461}
460 462
461/* 463/*
@@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
530 if (!ret) { 532 if (!ret) {
531 sch->q.qlen++; 533 sch->q.qlen++;
532 sch->qstats.requeues++; 534 sch->qstats.requeues++;
533 } else { 535 } else if (net_xmit_drop_count(ret)) {
534 sch->qstats.drops++; 536 sch->qstats.drops++;
535 p->link.qstats.drops++; 537 p->link.qstats.drops++;
536 } 538 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 14954bf4a68..4e261ce62f4 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
230 (cl = cbq_class_lookup(q, prio)) != NULL) 230 (cl = cbq_class_lookup(q, prio)) != NULL)
231 return cl; 231 return cl;
232 232
233 *qerr = NET_XMIT_BYPASS; 233 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
234 for (;;) { 234 for (;;) {
235 int result = 0; 235 int result = 0;
236 defmap = head->defaults; 236 defmap = head->defaults;
@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
256 switch (result) { 256 switch (result) {
257 case TC_ACT_QUEUED: 257 case TC_ACT_QUEUED:
258 case TC_ACT_STOLEN: 258 case TC_ACT_STOLEN:
259 *qerr = NET_XMIT_SUCCESS; 259 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
260 case TC_ACT_SHOT: 260 case TC_ACT_SHOT:
261 return NULL; 261 return NULL;
262 case TC_ACT_RECLASSIFY: 262 case TC_ACT_RECLASSIFY:
@@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377 q->rx_class = cl; 377 q->rx_class = cl;
378#endif 378#endif
379 if (cl == NULL) { 379 if (cl == NULL) {
380 if (ret == NET_XMIT_BYPASS) 380 if (ret & __NET_XMIT_BYPASS)
381 sch->qstats.drops++; 381 sch->qstats.drops++;
382 kfree_skb(skb); 382 kfree_skb(skb);
383 return ret; 383 return ret;
@@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
397 return ret; 397 return ret;
398 } 398 }
399 399
400 sch->qstats.drops++; 400 if (net_xmit_drop_count(ret)) {
401 cbq_mark_toplevel(q, cl); 401 sch->qstats.drops++;
402 cl->qstats.drops++; 402 cbq_mark_toplevel(q, cl);
403 cl->qstats.drops++;
404 }
403 return ret; 405 return ret;
404} 406}
405 407
@@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
430 cbq_activate_class(cl); 432 cbq_activate_class(cl);
431 return 0; 433 return 0;
432 } 434 }
433 sch->qstats.drops++; 435 if (net_xmit_drop_count(ret)) {
434 cl->qstats.drops++; 436 sch->qstats.drops++;
437 cl->qstats.drops++;
438 }
435 return ret; 439 return ret;
436} 440}
437 441
@@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
664 q->rx_class = NULL; 668 q->rx_class = NULL;
665 669
666 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { 670 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
671 int ret;
667 672
668 cbq_mark_toplevel(q, cl); 673 cbq_mark_toplevel(q, cl);
669 674
670 q->rx_class = cl; 675 q->rx_class = cl;
671 cl->q->__parent = sch; 676 cl->q->__parent = sch;
672 677
673 if (qdisc_enqueue(skb, cl->q) == 0) { 678 ret = qdisc_enqueue(skb, cl->q);
679 if (ret == NET_XMIT_SUCCESS) {
674 sch->q.qlen++; 680 sch->q.qlen++;
675 sch->bstats.packets++; 681 sch->bstats.packets++;
676 sch->bstats.bytes += qdisc_pkt_len(skb); 682 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
678 cbq_activate_class(cl); 684 cbq_activate_class(cl);
679 return 0; 685 return 0;
680 } 686 }
681 sch->qstats.drops++; 687 if (net_xmit_drop_count(ret))
688 sch->qstats.drops++;
682 return 0; 689 return 0;
683 } 690 }
684 691
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index a935676987e..edd1298f85f 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
236 case TC_ACT_QUEUED: 236 case TC_ACT_QUEUED:
237 case TC_ACT_STOLEN: 237 case TC_ACT_STOLEN:
238 kfree_skb(skb); 238 kfree_skb(skb);
239 return NET_XMIT_SUCCESS; 239 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
240 240
241 case TC_ACT_SHOT: 241 case TC_ACT_SHOT:
242 goto drop; 242 goto drop;
@@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
254 254
255 err = qdisc_enqueue(skb, p->q); 255 err = qdisc_enqueue(skb, p->q);
256 if (err != NET_XMIT_SUCCESS) { 256 if (err != NET_XMIT_SUCCESS) {
257 sch->qstats.drops++; 257 if (net_xmit_drop_count(err))
258 sch->qstats.drops++;
258 return err; 259 return err;
259 } 260 }
260 261
@@ -267,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
267drop: 268drop:
268 kfree_skb(skb); 269 kfree_skb(skb);
269 sch->qstats.drops++; 270 sch->qstats.drops++;
270 return NET_XMIT_BYPASS; 271 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
271} 272}
272 273
273static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) 274static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
@@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
321 322
322 err = p->q->ops->requeue(skb, p->q); 323 err = p->q->ops->requeue(skb, p->q);
323 if (err != NET_XMIT_SUCCESS) { 324 if (err != NET_XMIT_SUCCESS) {
324 sch->qstats.drops++; 325 if (net_xmit_drop_count(err))
326 sch->qstats.drops++;
325 return err; 327 return err;
326 } 328 }
327 329
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 0ae7d19dcba..c2b8d9cce3d 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1159,14 +1159,14 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1159 if (cl->level == 0) 1159 if (cl->level == 0)
1160 return cl; 1160 return cl;
1161 1161
1162 *qerr = NET_XMIT_BYPASS; 1162 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1163 tcf = q->root.filter_list; 1163 tcf = q->root.filter_list;
1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1165#ifdef CONFIG_NET_CLS_ACT 1165#ifdef CONFIG_NET_CLS_ACT
1166 switch (result) { 1166 switch (result) {
1167 case TC_ACT_QUEUED: 1167 case TC_ACT_QUEUED:
1168 case TC_ACT_STOLEN: 1168 case TC_ACT_STOLEN:
1169 *qerr = NET_XMIT_SUCCESS; 1169 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1170 case TC_ACT_SHOT: 1170 case TC_ACT_SHOT:
1171 return NULL; 1171 return NULL;
1172 } 1172 }
@@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1578 1578
1579 cl = hfsc_classify(skb, sch, &err); 1579 cl = hfsc_classify(skb, sch, &err);
1580 if (cl == NULL) { 1580 if (cl == NULL) {
1581 if (err == NET_XMIT_BYPASS) 1581 if (err & __NET_XMIT_BYPASS)
1582 sch->qstats.drops++; 1582 sch->qstats.drops++;
1583 kfree_skb(skb); 1583 kfree_skb(skb);
1584 return err; 1584 return err;
@@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1586 1586
1587 err = qdisc_enqueue(skb, cl->qdisc); 1587 err = qdisc_enqueue(skb, cl->qdisc);
1588 if (unlikely(err != NET_XMIT_SUCCESS)) { 1588 if (unlikely(err != NET_XMIT_SUCCESS)) {
1589 cl->qstats.drops++; 1589 if (net_xmit_drop_count(err)) {
1590 sch->qstats.drops++; 1590 cl->qstats.drops++;
1591 sch->qstats.drops++;
1592 }
1591 return err; 1593 return err;
1592 } 1594 }
1593 1595
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 75a40951c4f..be35422711a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -214,14 +214,14 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
214 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) 214 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
215 return cl; 215 return cl;
216 216
217 *qerr = NET_XMIT_BYPASS; 217 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
218 tcf = q->filter_list; 218 tcf = q->filter_list;
219 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 219 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
220#ifdef CONFIG_NET_CLS_ACT 220#ifdef CONFIG_NET_CLS_ACT
221 switch (result) { 221 switch (result) {
222 case TC_ACT_QUEUED: 222 case TC_ACT_QUEUED:
223 case TC_ACT_STOLEN: 223 case TC_ACT_STOLEN:
224 *qerr = NET_XMIT_SUCCESS; 224 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
225 case TC_ACT_SHOT: 225 case TC_ACT_SHOT:
226 return NULL; 226 return NULL;
227 } 227 }
@@ -567,14 +567,16 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
567 } 567 }
568#ifdef CONFIG_NET_CLS_ACT 568#ifdef CONFIG_NET_CLS_ACT
569 } else if (!cl) { 569 } else if (!cl) {
570 if (ret == NET_XMIT_BYPASS) 570 if (ret & __NET_XMIT_BYPASS)
571 sch->qstats.drops++; 571 sch->qstats.drops++;
572 kfree_skb(skb); 572 kfree_skb(skb);
573 return ret; 573 return ret;
574#endif 574#endif
575 } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { 575 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
576 sch->qstats.drops++; 576 if (net_xmit_drop_count(ret)) {
577 cl->qstats.drops++; 577 sch->qstats.drops++;
578 cl->qstats.drops++;
579 }
578 return NET_XMIT_DROP; 580 return NET_XMIT_DROP;
579 } else { 581 } else {
580 cl->bstats.packets += 582 cl->bstats.packets +=
@@ -610,15 +612,17 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
610 } 612 }
611#ifdef CONFIG_NET_CLS_ACT 613#ifdef CONFIG_NET_CLS_ACT
612 } else if (!cl) { 614 } else if (!cl) {
613 if (ret == NET_XMIT_BYPASS) 615 if (ret & __NET_XMIT_BYPASS)
614 sch->qstats.drops++; 616 sch->qstats.drops++;
615 kfree_skb(skb); 617 kfree_skb(skb);
616 return ret; 618 return ret;
617#endif 619#endif
618 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != 620 } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
619 NET_XMIT_SUCCESS) { 621 NET_XMIT_SUCCESS) {
620 sch->qstats.drops++; 622 if (net_xmit_drop_count(ret)) {
621 cl->qstats.drops++; 623 sch->qstats.drops++;
624 cl->qstats.drops++;
625 }
622 return NET_XMIT_DROP; 626 return NET_XMIT_DROP;
623 } else 627 } else
624 htb_activate(q, cl); 628 htb_activate(q, cl);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a5908570067..fb0294d0b55 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
176 if (count == 0) { 176 if (count == 0) {
177 sch->qstats.drops++; 177 sch->qstats.drops++;
178 kfree_skb(skb); 178 kfree_skb(skb);
179 return NET_XMIT_BYPASS; 179 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
180 } 180 }
181 181
182 skb_orphan(skb); 182 skb_orphan(skb);
@@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 sch->q.qlen++; 240 sch->q.qlen++;
241 sch->bstats.bytes += qdisc_pkt_len(skb); 241 sch->bstats.bytes += qdisc_pkt_len(skb);
242 sch->bstats.packets++; 242 sch->bstats.packets++;
243 } else 243 } else if (net_xmit_drop_count(ret)) {
244 sch->qstats.drops++; 244 sch->qstats.drops++;
245 }
245 246
246 pr_debug("netem: enqueue ret %d\n", ret); 247 pr_debug("netem: enqueue ret %d\n", ret);
247 return ret; 248 return ret;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f849243eb09..eac197610ed 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -38,14 +38,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
38 struct tcf_result res; 38 struct tcf_result res;
39 int err; 39 int err;
40 40
41 *qerr = NET_XMIT_BYPASS; 41 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
42 if (TC_H_MAJ(skb->priority) != sch->handle) { 42 if (TC_H_MAJ(skb->priority) != sch->handle) {
43 err = tc_classify(skb, q->filter_list, &res); 43 err = tc_classify(skb, q->filter_list, &res);
44#ifdef CONFIG_NET_CLS_ACT 44#ifdef CONFIG_NET_CLS_ACT
45 switch (err) { 45 switch (err) {
46 case TC_ACT_STOLEN: 46 case TC_ACT_STOLEN:
47 case TC_ACT_QUEUED: 47 case TC_ACT_QUEUED:
48 *qerr = NET_XMIT_SUCCESS; 48 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
49 case TC_ACT_SHOT: 49 case TC_ACT_SHOT:
50 return NULL; 50 return NULL;
51 } 51 }
@@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
74#ifdef CONFIG_NET_CLS_ACT 74#ifdef CONFIG_NET_CLS_ACT
75 if (qdisc == NULL) { 75 if (qdisc == NULL) {
76 76
77 if (ret == NET_XMIT_BYPASS) 77 if (ret & __NET_XMIT_BYPASS)
78 sch->qstats.drops++; 78 sch->qstats.drops++;
79 kfree_skb(skb); 79 kfree_skb(skb);
80 return ret; 80 return ret;
@@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
88 sch->q.qlen++; 88 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
90 } 90 }
91 sch->qstats.drops++; 91 if (net_xmit_drop_count(ret))
92 sch->qstats.drops++;
92 return ret; 93 return ret;
93} 94}
94 95
@@ -102,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
102 qdisc = prio_classify(skb, sch, &ret); 103 qdisc = prio_classify(skb, sch, &ret);
103#ifdef CONFIG_NET_CLS_ACT 104#ifdef CONFIG_NET_CLS_ACT
104 if (qdisc == NULL) { 105 if (qdisc == NULL) {
105 if (ret == NET_XMIT_BYPASS) 106 if (ret & __NET_XMIT_BYPASS)
106 sch->qstats.drops++; 107 sch->qstats.drops++;
107 kfree_skb(skb); 108 kfree_skb(skb);
108 return ret; 109 return ret;
@@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
114 sch->qstats.requeues++; 115 sch->qstats.requeues++;
115 return 0; 116 return 0;
116 } 117 }
117 sch->qstats.drops++; 118 if (net_xmit_drop_count(ret))
119 sch->qstats.drops++;
118 return NET_XMIT_DROP; 120 return NET_XMIT_DROP;
119} 121}
120 122
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3f2d1d7f3bb..5da05839e22 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
97 sch->bstats.bytes += qdisc_pkt_len(skb); 97 sch->bstats.bytes += qdisc_pkt_len(skb);
98 sch->bstats.packets++; 98 sch->bstats.packets++;
99 sch->q.qlen++; 99 sch->q.qlen++;
100 } else { 100 } else if (net_xmit_drop_count(ret)) {
101 q->stats.pdrop++; 101 q->stats.pdrop++;
102 sch->qstats.drops++; 102 sch->qstats.drops++;
103 } 103 }
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8589da66656..6e041d10dbd 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -171,14 +171,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
171 if (!q->filter_list) 171 if (!q->filter_list)
172 return sfq_hash(q, skb) + 1; 172 return sfq_hash(q, skb) + 1;
173 173
174 *qerr = NET_XMIT_BYPASS; 174 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
175 result = tc_classify(skb, q->filter_list, &res); 175 result = tc_classify(skb, q->filter_list, &res);
176 if (result >= 0) { 176 if (result >= 0) {
177#ifdef CONFIG_NET_CLS_ACT 177#ifdef CONFIG_NET_CLS_ACT
178 switch (result) { 178 switch (result) {
179 case TC_ACT_STOLEN: 179 case TC_ACT_STOLEN:
180 case TC_ACT_QUEUED: 180 case TC_ACT_QUEUED:
181 *qerr = NET_XMIT_SUCCESS; 181 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
182 case TC_ACT_SHOT: 182 case TC_ACT_SHOT:
183 return 0; 183 return 0;
184 } 184 }
@@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
285 285
286 hash = sfq_classify(skb, sch, &ret); 286 hash = sfq_classify(skb, sch, &ret);
287 if (hash == 0) { 287 if (hash == 0) {
288 if (ret == NET_XMIT_BYPASS) 288 if (ret & __NET_XMIT_BYPASS)
289 sch->qstats.drops++; 289 sch->qstats.drops++;
290 kfree_skb(skb); 290 kfree_skb(skb);
291 return ret; 291 return ret;
@@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
339 339
340 hash = sfq_classify(skb, sch, &ret); 340 hash = sfq_classify(skb, sch, &ret);
341 if (hash == 0) { 341 if (hash == 0) {
342 if (ret == NET_XMIT_BYPASS) 342 if (ret & __NET_XMIT_BYPASS)
343 sch->qstats.drops++; 343 sch->qstats.drops++;
344 kfree_skb(skb); 344 kfree_skb(skb);
345 return ret; 345 return ret;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b296672f763..7d3b7ff3bf0 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
135 135
136 ret = qdisc_enqueue(skb, q->qdisc); 136 ret = qdisc_enqueue(skb, q->qdisc);
137 if (ret != 0) { 137 if (ret != 0) {
138 sch->qstats.drops++; 138 if (net_xmit_drop_count(ret))
139 sch->qstats.drops++;
139 return ret; 140 return ret;
140 } 141 }
141 142
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index a238d6834b3..483a01d0740 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -195,8 +195,7 @@ out:
195} 195}
196 196
197/* Based on tcp_v6_xmit() in tcp_ipv6.c. */ 197/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
198static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport, 198static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
199 int ipfragok)
200{ 199{
201 struct sock *sk = skb->sk; 200 struct sock *sk = skb->sk;
202 struct ipv6_pinfo *np = inet6_sk(sk); 201 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -231,7 +230,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport,
231 230
232 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 231 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
233 232
234 return ip6_xmit(sk, skb, &fl, np->opt, ipfragok); 233 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
234 skb->local_df = 1;
235
236 return ip6_xmit(sk, skb, &fl, np->opt, 0);
235} 237}
236 238
237/* Returns the dst cache entry for the given source and destination ip 239/* Returns the dst cache entry for the given source and destination ip
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 45684646b1d..0dc4a7dfb23 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -586,10 +586,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
586 SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n", 586 SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n",
587 nskb->len); 587 nskb->len);
588 588
589 if (tp->param_flags & SPP_PMTUD_ENABLE) 589 nskb->local_df = packet->ipfragok;
590 (*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok); 590 (*tp->af_specific->sctp_xmit)(nskb, tp);
591 else
592 (*tp->af_specific->sctp_xmit)(nskb, tp, 1);
593 591
594out: 592out:
595 packet->size = packet->overhead; 593 packet->size = packet->overhead;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index a6e0818bcff..0b65354aaf6 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -862,16 +862,21 @@ static int sctp_inet_supported_addrs(const struct sctp_sock *opt,
862 862
863/* Wrapper routine that calls the ip transmit routine. */ 863/* Wrapper routine that calls the ip transmit routine. */
864static inline int sctp_v4_xmit(struct sk_buff *skb, 864static inline int sctp_v4_xmit(struct sk_buff *skb,
865 struct sctp_transport *transport, int ipfragok) 865 struct sctp_transport *transport)
866{ 866{
867 struct inet_sock *inet = inet_sk(skb->sk);
868
867 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " 869 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, "
868 "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n", 870 "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n",
869 __func__, skb, skb->len, 871 __func__, skb, skb->len,
870 NIPQUAD(skb->rtable->rt_src), 872 NIPQUAD(skb->rtable->rt_src),
871 NIPQUAD(skb->rtable->rt_dst)); 873 NIPQUAD(skb->rtable->rt_dst));
872 874
875 inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
876 IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
877
873 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 878 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
874 return ip_queue_xmit(skb, ipfragok); 879 return ip_queue_xmit(skb, 0);
875} 880}
876 881
877static struct sctp_af sctp_af_inet; 882static struct sctp_af sctp_af_inet;