aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sch_generic.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-06-22 02:16:49 -0400
committerDavid S. Miller <davem@davemloft.net>2016-06-25 12:19:35 -0400
commit520ac30f45519b0a82dd92117c181d1d6144677b (patch)
treea3189ae1ab3b5be6213716e42ddaec082578e774 /include/net/sch_generic.h
parent36195d869e4b1dbf81bafa3a31bb095a3c013bdd (diff)
net_sched: drop packets after root qdisc lock is released
Qdisc performance suffers when packets are dropped at enqueue() time because drops (kfree_skb()) are done while qdisc lock is held, delaying a dequeue() draining the queue. Nominal throughput can be reduced by 50 % when this happens, at a time we would like the dequeue() to proceed as fast as possible. Even FQ is vulnerable to this problem, while one of FQ goals was to provide some flow isolation. This patch adds a 'struct sk_buff **to_free' parameter to all qdisc->enqueue(), and in qdisc_drop() helper. I measured a performance increase of up to 12 %, but this patch is a prereq so that future batches in enqueue() can fly. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sch_generic.h')
-rw-r--r--include/net/sch_generic.h41
1 files changed, 30 insertions, 11 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4f7cee8344c4..04e84c07c94f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -37,8 +37,10 @@ struct qdisc_size_table {
37}; 37};
38 38
39struct Qdisc { 39struct Qdisc {
40 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 40 int (*enqueue)(struct sk_buff *skb,
41 struct sk_buff * (*dequeue)(struct Qdisc *dev); 41 struct Qdisc *sch,
42 struct sk_buff **to_free);
43 struct sk_buff * (*dequeue)(struct Qdisc *sch);
42 unsigned int flags; 44 unsigned int flags;
43#define TCQ_F_BUILTIN 1 45#define TCQ_F_BUILTIN 1
44#define TCQ_F_INGRESS 2 46#define TCQ_F_INGRESS 2
@@ -160,7 +162,9 @@ struct Qdisc_ops {
160 char id[IFNAMSIZ]; 162 char id[IFNAMSIZ];
161 int priv_size; 163 int priv_size;
162 164
163 int (*enqueue)(struct sk_buff *, struct Qdisc *); 165 int (*enqueue)(struct sk_buff *skb,
166 struct Qdisc *sch,
167 struct sk_buff **to_free);
164 struct sk_buff * (*dequeue)(struct Qdisc *); 168 struct sk_buff * (*dequeue)(struct Qdisc *);
165 struct sk_buff * (*peek)(struct Qdisc *); 169 struct sk_buff * (*peek)(struct Qdisc *);
166 170
@@ -498,10 +502,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
498#endif 502#endif
499} 503}
500 504
501static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 505static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
506 struct sk_buff **to_free)
502{ 507{
503 qdisc_calculate_pkt_len(skb, sch); 508 qdisc_calculate_pkt_len(skb, sch);
504 return sch->enqueue(skb, sch); 509 return sch->enqueue(skb, sch, to_free);
505} 510}
506 511
507static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 512static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
@@ -626,24 +631,36 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
626 return __qdisc_dequeue_head(sch, &sch->q); 631 return __qdisc_dequeue_head(sch, &sch->q);
627} 632}
628 633
634/* Instead of calling kfree_skb() while root qdisc lock is held,
635 * queue the skb for future freeing at end of __dev_xmit_skb()
636 */
637static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
638{
639 skb->next = *to_free;
640 *to_free = skb;
641}
642
629static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 643static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
630 struct sk_buff_head *list) 644 struct sk_buff_head *list,
645 struct sk_buff **to_free)
631{ 646{
632 struct sk_buff *skb = __skb_dequeue(list); 647 struct sk_buff *skb = __skb_dequeue(list);
633 648
634 if (likely(skb != NULL)) { 649 if (likely(skb != NULL)) {
635 unsigned int len = qdisc_pkt_len(skb); 650 unsigned int len = qdisc_pkt_len(skb);
651
636 qdisc_qstats_backlog_dec(sch, skb); 652 qdisc_qstats_backlog_dec(sch, skb);
637 kfree_skb(skb); 653 __qdisc_drop(skb, to_free);
638 return len; 654 return len;
639 } 655 }
640 656
641 return 0; 657 return 0;
642} 658}
643 659
644static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) 660static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
661 struct sk_buff **to_free)
645{ 662{
646 return __qdisc_queue_drop_head(sch, &sch->q); 663 return __qdisc_queue_drop_head(sch, &sch->q, to_free);
647} 664}
648 665
649static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 666static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
@@ -724,9 +741,11 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
724 qdisc_qstats_drop(sch); 741 qdisc_qstats_drop(sch);
725} 742}
726 743
727static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 744
745static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
746 struct sk_buff **to_free)
728{ 747{
729 kfree_skb(skb); 748 __qdisc_drop(skb, to_free);
730 qdisc_qstats_drop(sch); 749 qdisc_qstats_drop(sch);
731 750
732 return NET_XMIT_DROP; 751 return NET_XMIT_DROP;