diff options
author | Thomas Graf <tgraf@suug.ch> | 2005-06-19 01:58:15 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-06-19 01:58:15 -0400 |
commit | 821d24ae741f83ef0754a98b4b8aef7231856543 (patch) | |
tree | 74c3241b1caf60309840fdc8a976e2d0713d1052 /net/sched/sch_generic.c | |
parent | 6fc8e84f4cf8d623f98aebfd6996dc3848bcf964 (diff) |
[PKT_SCHED]: Transform pfifo_fast to use generic queue management interface
Gives pfifo_fast a byte based backlog.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 23 |
1 files changed, 9 insertions, 14 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 87e48a4e1051..03cf001adb41 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -318,16 +318,12 @@ pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | |||
318 | 318 | ||
319 | list += prio2band[skb->priority&TC_PRIO_MAX]; | 319 | list += prio2band[skb->priority&TC_PRIO_MAX]; |
320 | 320 | ||
321 | if (list->qlen < qdisc->dev->tx_queue_len) { | 321 | if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { |
322 | __skb_queue_tail(list, skb); | ||
323 | qdisc->q.qlen++; | 322 | qdisc->q.qlen++; |
324 | qdisc->bstats.bytes += skb->len; | 323 | return __qdisc_enqueue_tail(skb, qdisc, list); |
325 | qdisc->bstats.packets++; | ||
326 | return 0; | ||
327 | } | 324 | } |
328 | qdisc->qstats.drops++; | 325 | |
329 | kfree_skb(skb); | 326 | return qdisc_drop(skb, qdisc); |
330 | return NET_XMIT_DROP; | ||
331 | } | 327 | } |
332 | 328 | ||
333 | static struct sk_buff * | 329 | static struct sk_buff * |
@@ -335,10 +331,9 @@ pfifo_fast_dequeue(struct Qdisc* qdisc) | |||
335 | { | 331 | { |
336 | int prio; | 332 | int prio; |
337 | struct sk_buff_head *list = qdisc_priv(qdisc); | 333 | struct sk_buff_head *list = qdisc_priv(qdisc); |
338 | struct sk_buff *skb; | ||
339 | 334 | ||
340 | for (prio = 0; prio < 3; prio++, list++) { | 335 | for (prio = 0; prio < 3; prio++, list++) { |
341 | skb = __skb_dequeue(list); | 336 | struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); |
342 | if (skb) { | 337 | if (skb) { |
343 | qdisc->q.qlen--; | 338 | qdisc->q.qlen--; |
344 | return skb; | 339 | return skb; |
@@ -354,10 +349,8 @@ pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) | |||
354 | 349 | ||
355 | list += prio2band[skb->priority&TC_PRIO_MAX]; | 350 | list += prio2band[skb->priority&TC_PRIO_MAX]; |
356 | 351 | ||
357 | __skb_queue_head(list, skb); | ||
358 | qdisc->q.qlen++; | 352 | qdisc->q.qlen++; |
359 | qdisc->qstats.requeues++; | 353 | return __qdisc_requeue(skb, qdisc, list); |
360 | return 0; | ||
361 | } | 354 | } |
362 | 355 | ||
363 | static void | 356 | static void |
@@ -367,7 +360,9 @@ pfifo_fast_reset(struct Qdisc* qdisc) | |||
367 | struct sk_buff_head *list = qdisc_priv(qdisc); | 360 | struct sk_buff_head *list = qdisc_priv(qdisc); |
368 | 361 | ||
369 | for (prio=0; prio < 3; prio++) | 362 | for (prio=0; prio < 3; prio++) |
370 | skb_queue_purge(list+prio); | 363 | __qdisc_reset_queue(qdisc, list + prio); |
364 | |||
365 | qdisc->qstats.backlog = 0; | ||
371 | qdisc->q.qlen = 0; | 366 | qdisc->q.qlen = 0; |
372 | } | 367 | } |
373 | 368 | ||