aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@linux-foundation.org>2007-03-22 15:17:05 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:27:21 -0400
commit075aa573b74a732aeff487ab77d3fbd627c10856 (patch)
treeba787170f860875318a13e4c934dc02a0304a18f
parentb407621c35ed5f9a0734e57472e9539117963768 (diff)
[NETEM]: Optimize tfifo
In most cases, the next packet will be sent after the last one. So optimize that case. Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sched/sch_netem.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 4ac6df0a5b35..7e9e658d4d93 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -479,22 +479,28 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
479 */ 479 */
480struct fifo_sched_data { 480struct fifo_sched_data {
481 u32 limit; 481 u32 limit;
482 psched_time_t oldest;
482}; 483};
483 484
484static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 485static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
485{ 486{
486 struct fifo_sched_data *q = qdisc_priv(sch); 487 struct fifo_sched_data *q = qdisc_priv(sch);
487 struct sk_buff_head *list = &sch->q; 488 struct sk_buff_head *list = &sch->q;
488 const struct netem_skb_cb *ncb 489 psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
489 = (const struct netem_skb_cb *)nskb->cb;
490 struct sk_buff *skb; 490 struct sk_buff *skb;
491 491
492 if (likely(skb_queue_len(list) < q->limit)) { 492 if (likely(skb_queue_len(list) < q->limit)) {
493 /* Optimize for add at tail */
494 if (likely(skb_queue_empty(list) || !PSCHED_TLESS(tnext, q->oldest))) {
495 q->oldest = tnext;
496 return qdisc_enqueue_tail(nskb, sch);
497 }
498
493 skb_queue_reverse_walk(list, skb) { 499 skb_queue_reverse_walk(list, skb) {
494 const struct netem_skb_cb *cb 500 const struct netem_skb_cb *cb
495 = (const struct netem_skb_cb *)skb->cb; 501 = (const struct netem_skb_cb *)skb->cb;
496 502
497 if (!PSCHED_TLESS(ncb->time_to_send, cb->time_to_send)) 503 if (!PSCHED_TLESS(tnext, cb->time_to_send))
498 break; 504 break;
499 } 505 }
500 506
@@ -507,7 +513,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
507 return NET_XMIT_SUCCESS; 513 return NET_XMIT_SUCCESS;
508 } 514 }
509 515
510 return qdisc_drop(nskb, sch); 516 return qdisc_reshape_fail(nskb, sch);
511} 517}
512 518
513static int tfifo_init(struct Qdisc *sch, struct rtattr *opt) 519static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
@@ -523,6 +529,7 @@ static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
523 } else 529 } else
524 q->limit = max_t(u32, sch->dev->tx_queue_len, 1); 530 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
525 531
532 PSCHED_SET_PASTPERFECT(q->oldest);
526 return 0; 533 return 0;
527} 534}
528 535