diff options
author | David S. Miller <davem@davemloft.net> | 2008-08-18 02:55:36 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-18 03:39:41 -0400 |
commit | 69747650c814a8a79fef412c7416adf823293a3e (patch) | |
tree | c1c83e9ae903b0e93029f0071fdca544d2add340 | |
parent | 96d203169d1d851ac1468f7d4459a09581be364c (diff) |
pkt_sched: Fix return value corruption in HTB and TBF.
Based upon a bug report by Josip Rodin.
Packet schedulers should only return NET_XMIT_DROP iff
the packet really was dropped. If the packet does reach
the device after we return NET_XMIT_DROP then TCP can
crash because it depends upon the enqueue path return
values being accurate.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/sched/sch_htb.c | 4 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 11 |
2 files changed, 4 insertions, 11 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 6febd245e62b..0df0df202ed0 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -577,7 +577,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
577 | sch->qstats.drops++; | 577 | sch->qstats.drops++; |
578 | cl->qstats.drops++; | 578 | cl->qstats.drops++; |
579 | } | 579 | } |
580 | return NET_XMIT_DROP; | 580 | return ret; |
581 | } else { | 581 | } else { |
582 | cl->bstats.packets += | 582 | cl->bstats.packets += |
583 | skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | 583 | skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; |
@@ -623,7 +623,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
623 | sch->qstats.drops++; | 623 | sch->qstats.drops++; |
624 | cl->qstats.drops++; | 624 | cl->qstats.drops++; |
625 | } | 625 | } |
626 | return NET_XMIT_DROP; | 626 | return ret; |
627 | } else | 627 | } else |
628 | htb_activate(q, cl); | 628 | htb_activate(q, cl); |
629 | 629 | ||
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 7d3b7ff3bf07..94c61598b86a 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -123,15 +123,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
123 | struct tbf_sched_data *q = qdisc_priv(sch); | 123 | struct tbf_sched_data *q = qdisc_priv(sch); |
124 | int ret; | 124 | int ret; |
125 | 125 | ||
126 | if (qdisc_pkt_len(skb) > q->max_size) { | 126 | if (qdisc_pkt_len(skb) > q->max_size) |
127 | sch->qstats.drops++; | 127 | return qdisc_reshape_fail(skb, sch); |
128 | #ifdef CONFIG_NET_CLS_ACT | ||
129 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) | ||
130 | #endif | ||
131 | kfree_skb(skb); | ||
132 | |||
133 | return NET_XMIT_DROP; | ||
134 | } | ||
135 | 128 | ||
136 | ret = qdisc_enqueue(skb, q->qdisc); | 129 | ret = qdisc_enqueue(skb, q->qdisc); |
137 | if (ret != 0) { | 130 | if (ret != 0) { |