diff options
author | Florian Westphal <fw@strlen.de> | 2016-09-17 18:57:34 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-09-19 01:47:18 -0400 |
commit | 48da34b7a74201f15315cb1fc40bb9a7bd2b4940 (patch) | |
tree | 123595224534ee49f1175b6ef8371728b6d31538 /include/net/sch_generic.h | |
parent | ed760cb8aae7c2b84c193d4a7637b0c9e752f07e (diff) |
sched: add and use qdisc_skb_head helpers
This change replaces sk_buff_head struct in Qdiscs with new qdisc_skb_head.
Its similar to the skb_buff_head api, but does not use skb->prev pointers.
Qdiscs will commonly enqueue at the tail of a list and dequeue at head.
While skb_buff_head works fine for this, enqueue/dequeue needs to also
adjust the prev pointer of next element.
The ->prev pointer is not required for qdiscs so we can just leave
it undefined and avoid one cacheline write access for en/dequeue.
Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sch_generic.h')
-rw-r--r-- | include/net/sch_generic.h | 63 |
1 files changed, 51 insertions, 12 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 0741ed41575b..e6aa0a249672 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -36,6 +36,14 @@ struct qdisc_size_table { | |||
36 | u16 data[]; | 36 | u16 data[]; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | /* similar to sk_buff_head, but skb->prev pointer is undefined. */ | ||
40 | struct qdisc_skb_head { | ||
41 | struct sk_buff *head; | ||
42 | struct sk_buff *tail; | ||
43 | __u32 qlen; | ||
44 | spinlock_t lock; | ||
45 | }; | ||
46 | |||
39 | struct Qdisc { | 47 | struct Qdisc { |
40 | int (*enqueue)(struct sk_buff *skb, | 48 | int (*enqueue)(struct sk_buff *skb, |
41 | struct Qdisc *sch, | 49 | struct Qdisc *sch, |
@@ -76,7 +84,7 @@ struct Qdisc { | |||
76 | * For performance sake on SMP, we put highly modified fields at the end | 84 | * For performance sake on SMP, we put highly modified fields at the end |
77 | */ | 85 | */ |
78 | struct sk_buff *gso_skb ____cacheline_aligned_in_smp; | 86 | struct sk_buff *gso_skb ____cacheline_aligned_in_smp; |
79 | struct sk_buff_head q; | 87 | struct qdisc_skb_head q; |
80 | struct gnet_stats_basic_packed bstats; | 88 | struct gnet_stats_basic_packed bstats; |
81 | seqcount_t running; | 89 | seqcount_t running; |
82 | struct gnet_stats_queue qstats; | 90 | struct gnet_stats_queue qstats; |
@@ -600,10 +608,27 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch) | |||
600 | sch->qstats.overlimits++; | 608 | sch->qstats.overlimits++; |
601 | } | 609 | } |
602 | 610 | ||
611 | static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) | ||
612 | { | ||
613 | qh->head = NULL; | ||
614 | qh->tail = NULL; | ||
615 | qh->qlen = 0; | ||
616 | } | ||
617 | |||
603 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | 618 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, |
604 | struct sk_buff_head *list) | 619 | struct qdisc_skb_head *qh) |
605 | { | 620 | { |
606 | __skb_queue_tail(list, skb); | 621 | struct sk_buff *last = qh->tail; |
622 | |||
623 | if (last) { | ||
624 | skb->next = NULL; | ||
625 | last->next = skb; | ||
626 | qh->tail = skb; | ||
627 | } else { | ||
628 | qh->tail = skb; | ||
629 | qh->head = skb; | ||
630 | } | ||
631 | qh->qlen++; | ||
607 | qdisc_qstats_backlog_inc(sch, skb); | 632 | qdisc_qstats_backlog_inc(sch, skb); |
608 | 633 | ||
609 | return NET_XMIT_SUCCESS; | 634 | return NET_XMIT_SUCCESS; |
@@ -614,9 +639,17 @@ static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) | |||
614 | return __qdisc_enqueue_tail(skb, sch, &sch->q); | 639 | return __qdisc_enqueue_tail(skb, sch, &sch->q); |
615 | } | 640 | } |
616 | 641 | ||
617 | static inline struct sk_buff *__qdisc_dequeue_head(struct sk_buff_head *list) | 642 | static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) |
618 | { | 643 | { |
619 | struct sk_buff *skb = __skb_dequeue(list); | 644 | struct sk_buff *skb = qh->head; |
645 | |||
646 | if (likely(skb != NULL)) { | ||
647 | qh->head = skb->next; | ||
648 | qh->qlen--; | ||
649 | if (qh->head == NULL) | ||
650 | qh->tail = NULL; | ||
651 | skb->next = NULL; | ||
652 | } | ||
620 | 653 | ||
621 | return skb; | 654 | return skb; |
622 | } | 655 | } |
@@ -643,10 +676,10 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) | |||
643 | } | 676 | } |
644 | 677 | ||
645 | static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, | 678 | static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, |
646 | struct sk_buff_head *list, | 679 | struct qdisc_skb_head *qh, |
647 | struct sk_buff **to_free) | 680 | struct sk_buff **to_free) |
648 | { | 681 | { |
649 | struct sk_buff *skb = __skb_dequeue(list); | 682 | struct sk_buff *skb = __qdisc_dequeue_head(qh); |
650 | 683 | ||
651 | if (likely(skb != NULL)) { | 684 | if (likely(skb != NULL)) { |
652 | unsigned int len = qdisc_pkt_len(skb); | 685 | unsigned int len = qdisc_pkt_len(skb); |
@@ -667,7 +700,9 @@ static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, | |||
667 | 700 | ||
668 | static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) | 701 | static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) |
669 | { | 702 | { |
670 | return skb_peek(&sch->q); | 703 | const struct qdisc_skb_head *qh = &sch->q; |
704 | |||
705 | return qh->head; | ||
671 | } | 706 | } |
672 | 707 | ||
673 | /* generic pseudo peek method for non-work-conserving qdisc */ | 708 | /* generic pseudo peek method for non-work-conserving qdisc */ |
@@ -702,15 +737,19 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) | |||
702 | return skb; | 737 | return skb; |
703 | } | 738 | } |
704 | 739 | ||
705 | static inline void __qdisc_reset_queue(struct sk_buff_head *list) | 740 | static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) |
706 | { | 741 | { |
707 | /* | 742 | /* |
708 | * We do not know the backlog in bytes of this list, it | 743 | * We do not know the backlog in bytes of this list, it |
709 | * is up to the caller to correct it | 744 | * is up to the caller to correct it |
710 | */ | 745 | */ |
711 | if (!skb_queue_empty(list)) { | 746 | ASSERT_RTNL(); |
712 | rtnl_kfree_skbs(list->next, list->prev); | 747 | if (qh->qlen) { |
713 | __skb_queue_head_init(list); | 748 | rtnl_kfree_skbs(qh->head, qh->tail); |
749 | |||
750 | qh->head = NULL; | ||
751 | qh->tail = NULL; | ||
752 | qh->qlen = 0; | ||
714 | } | 753 | } |
715 | } | 754 | } |
716 | 755 | ||