aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/skbuff.h38
-rw-r--r--net/sched/sch_netem.c85
2 files changed, 114 insertions, 9 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4286d832166f..fdfb8fe8c38c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -603,23 +603,23 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
603 */ 603 */
604 604
605/** 605/**
606 * __skb_queue_head - queue a buffer at the list head 606 * __skb_queue_after - queue a buffer at the list head
607 * @list: list to use 607 * @list: list to use
608 * @prev: place after this buffer
608 * @newsk: buffer to queue 609 * @newsk: buffer to queue
609 * 610 *
610 * Queue a buffer at the start of a list. This function takes no locks 611 * Queue a buffer int the middle of a list. This function takes no locks
611 * and you must therefore hold required locks before calling it. 612 * and you must therefore hold required locks before calling it.
612 * 613 *
613 * A buffer cannot be placed on two lists at the same time. 614 * A buffer cannot be placed on two lists at the same time.
614 */ 615 */
615extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 616static inline void __skb_queue_after(struct sk_buff_head *list,
616static inline void __skb_queue_head(struct sk_buff_head *list, 617 struct sk_buff *prev,
617 struct sk_buff *newsk) 618 struct sk_buff *newsk)
618{ 619{
619 struct sk_buff *prev, *next; 620 struct sk_buff *next;
620
621 list->qlen++; 621 list->qlen++;
622 prev = (struct sk_buff *)list; 622
623 next = prev->next; 623 next = prev->next;
624 newsk->next = next; 624 newsk->next = next;
625 newsk->prev = prev; 625 newsk->prev = prev;
@@ -627,6 +627,23 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
627} 627}
628 628
629/** 629/**
630 * __skb_queue_head - queue a buffer at the list head
631 * @list: list to use
632 * @newsk: buffer to queue
633 *
634 * Queue a buffer at the start of a list. This function takes no locks
635 * and you must therefore hold required locks before calling it.
636 *
637 * A buffer cannot be placed on two lists at the same time.
638 */
639extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
640static inline void __skb_queue_head(struct sk_buff_head *list,
641 struct sk_buff *newsk)
642{
643 __skb_queue_after(list, (struct sk_buff *)list, newsk);
644}
645
646/**
630 * __skb_queue_tail - queue a buffer at the list tail 647 * __skb_queue_tail - queue a buffer at the list tail
631 * @list: list to use 648 * @list: list to use
632 * @newsk: buffer to queue 649 * @newsk: buffer to queue
@@ -1203,6 +1220,11 @@ static inline void kunmap_skb_frag(void *vaddr)
1203 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1220 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1204 skb = skb->next) 1221 skb = skb->next)
1205 1222
1223#define skb_queue_reverse_walk(queue, skb) \
1224 for (skb = (queue)->prev; \
1225 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1226 skb = skb->prev)
1227
1206 1228
1207extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 1229extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1208 int noblock, int *err); 1230 int noblock, int *err);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index d871fe7f81a9..7c10ef3457d7 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -300,11 +300,16 @@ static void netem_reset(struct Qdisc *sch)
300 del_timer_sync(&q->timer); 300 del_timer_sync(&q->timer);
301} 301}
302 302
303/* Pass size change message down to embedded FIFO */
303static int set_fifo_limit(struct Qdisc *q, int limit) 304static int set_fifo_limit(struct Qdisc *q, int limit)
304{ 305{
305 struct rtattr *rta; 306 struct rtattr *rta;
306 int ret = -ENOMEM; 307 int ret = -ENOMEM;
307 308
309 /* Hack to avoid sending change message to non-FIFO */
310 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
311 return 0;
312
308 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); 313 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
309 if (rta) { 314 if (rta) {
310 rta->rta_type = RTM_NEWQDISC; 315 rta->rta_type = RTM_NEWQDISC;
@@ -436,6 +441,84 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
436 return 0; 441 return 0;
437} 442}
438 443
444/*
445 * Special case version of FIFO queue for use by netem.
446 * It queues in order based on timestamps in skb's
447 */
448struct fifo_sched_data {
449 u32 limit;
450};
451
452static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
453{
454 struct fifo_sched_data *q = qdisc_priv(sch);
455 struct sk_buff_head *list = &sch->q;
456 const struct netem_skb_cb *ncb
457 = (const struct netem_skb_cb *)nskb->cb;
458 struct sk_buff *skb;
459
460 if (likely(skb_queue_len(list) < q->limit)) {
461 skb_queue_reverse_walk(list, skb) {
462 const struct netem_skb_cb *cb
463 = (const struct netem_skb_cb *)skb->cb;
464
465 if (PSCHED_TLESS(cb->time_to_send, ncb->time_to_send))
466 break;
467 }
468
469 __skb_queue_after(list, skb, nskb);
470
471 sch->qstats.backlog += nskb->len;
472 sch->bstats.bytes += nskb->len;
473 sch->bstats.packets++;
474
475 return NET_XMIT_SUCCESS;
476 }
477
478 return qdisc_drop(nskb, sch);
479}
480
481static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
482{
483 struct fifo_sched_data *q = qdisc_priv(sch);
484
485 if (opt) {
486 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
487 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
488 return -EINVAL;
489
490 q->limit = ctl->limit;
491 } else
492 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
493
494 return 0;
495}
496
497static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
498{
499 struct fifo_sched_data *q = qdisc_priv(sch);
500 struct tc_fifo_qopt opt = { .limit = q->limit };
501
502 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
503 return skb->len;
504
505rtattr_failure:
506 return -1;
507}
508
509static struct Qdisc_ops tfifo_qdisc_ops = {
510 .id = "tfifo",
511 .priv_size = sizeof(struct fifo_sched_data),
512 .enqueue = tfifo_enqueue,
513 .dequeue = qdisc_dequeue_head,
514 .requeue = qdisc_requeue,
515 .drop = qdisc_queue_drop,
516 .init = tfifo_init,
517 .reset = qdisc_reset_queue,
518 .change = tfifo_init,
519 .dump = tfifo_dump,
520};
521
439static int netem_init(struct Qdisc *sch, struct rtattr *opt) 522static int netem_init(struct Qdisc *sch, struct rtattr *opt)
440{ 523{
441 struct netem_sched_data *q = qdisc_priv(sch); 524 struct netem_sched_data *q = qdisc_priv(sch);
@@ -448,7 +531,7 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
448 q->timer.function = netem_watchdog; 531 q->timer.function = netem_watchdog;
449 q->timer.data = (unsigned long) sch; 532 q->timer.data = (unsigned long) sch;
450 533
451 q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); 534 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops);
452 if (!q->qdisc) { 535 if (!q->qdisc) {
453 pr_debug("netem: qdisc create failed\n"); 536 pr_debug("netem: qdisc create failed\n");
454 return -ENOMEM; 537 return -ENOMEM;