diff options
| -rw-r--r-- | net/sched/sch_netem.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 956ff3da81f4..b17f2ed970e2 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
| @@ -439,8 +439,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 439 | struct netem_skb_cb *cb; | 439 | struct netem_skb_cb *cb; |
| 440 | struct sk_buff *skb2; | 440 | struct sk_buff *skb2; |
| 441 | struct sk_buff *segs = NULL; | 441 | struct sk_buff *segs = NULL; |
| 442 | unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb); | 442 | unsigned int prev_len = qdisc_pkt_len(skb); |
| 443 | int nb = 0; | ||
| 444 | int count = 1; | 443 | int count = 1; |
| 445 | int rc = NET_XMIT_SUCCESS; | 444 | int rc = NET_XMIT_SUCCESS; |
| 446 | int rc_drop = NET_XMIT_DROP; | 445 | int rc_drop = NET_XMIT_DROP; |
| @@ -494,16 +493,14 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 494 | */ | 493 | */ |
| 495 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { | 494 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { |
| 496 | if (skb_is_gso(skb)) { | 495 | if (skb_is_gso(skb)) { |
| 497 | segs = netem_segment(skb, sch, to_free); | 496 | skb = netem_segment(skb, sch, to_free); |
| 498 | if (!segs) | 497 | if (!skb) |
| 499 | return rc_drop; | 498 | return rc_drop; |
| 500 | } else { | 499 | segs = skb->next; |
| 501 | segs = skb; | 500 | skb_mark_not_on_list(skb); |
| 501 | qdisc_skb_cb(skb)->pkt_len = skb->len; | ||
| 502 | } | 502 | } |
| 503 | 503 | ||
| 504 | skb = segs; | ||
| 505 | segs = segs->next; | ||
| 506 | |||
| 507 | skb = skb_unshare(skb, GFP_ATOMIC); | 504 | skb = skb_unshare(skb, GFP_ATOMIC); |
| 508 | if (unlikely(!skb)) { | 505 | if (unlikely(!skb)) { |
| 509 | qdisc_qstats_drop(sch); | 506 | qdisc_qstats_drop(sch); |
| @@ -520,6 +517,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 520 | } | 517 | } |
| 521 | 518 | ||
| 522 | if (unlikely(sch->q.qlen >= sch->limit)) { | 519 | if (unlikely(sch->q.qlen >= sch->limit)) { |
| 520 | /* re-link segs, so that qdisc_drop_all() frees them all */ | ||
| 521 | skb->next = segs; | ||
| 523 | qdisc_drop_all(skb, sch, to_free); | 522 | qdisc_drop_all(skb, sch, to_free); |
| 524 | return rc_drop; | 523 | return rc_drop; |
| 525 | } | 524 | } |
| @@ -593,6 +592,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 593 | 592 | ||
| 594 | finish_segs: | 593 | finish_segs: |
| 595 | if (segs) { | 594 | if (segs) { |
| 595 | unsigned int len, last_len; | ||
| 596 | int nb = 0; | ||
| 597 | |||
| 598 | len = skb->len; | ||
| 599 | |||
| 596 | while (segs) { | 600 | while (segs) { |
| 597 | skb2 = segs->next; | 601 | skb2 = segs->next; |
| 598 | skb_mark_not_on_list(segs); | 602 | skb_mark_not_on_list(segs); |
| @@ -608,9 +612,7 @@ finish_segs: | |||
| 608 | } | 612 | } |
| 609 | segs = skb2; | 613 | segs = skb2; |
| 610 | } | 614 | } |
| 611 | sch->q.qlen += nb; | 615 | qdisc_tree_reduce_backlog(sch, -nb, prev_len - len); |
| 612 | if (nb > 1) | ||
| 613 | qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); | ||
| 614 | } | 616 | } |
| 615 | return NET_XMIT_SUCCESS; | 617 | return NET_XMIT_SUCCESS; |
| 616 | } | 618 | } |
