aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/sch_generic.h16
-rw-r--r--net/sched/sch_generic.c40
2 files changed, 54 insertions, 2 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f12669819d1a..d17ed6fb2f70 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -7,6 +7,7 @@
7#include <linux/pkt_sched.h> 7#include <linux/pkt_sched.h>
8#include <linux/pkt_cls.h> 8#include <linux/pkt_cls.h>
9#include <linux/percpu.h> 9#include <linux/percpu.h>
10#include <linux/dynamic_queue_limits.h>
10#include <net/gen_stats.h> 11#include <net/gen_stats.h>
11#include <net/rtnetlink.h> 12#include <net/rtnetlink.h>
12 13
@@ -119,6 +120,21 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
119 qdisc->__state &= ~__QDISC___STATE_RUNNING; 120 qdisc->__state &= ~__QDISC___STATE_RUNNING;
120} 121}
121 122
123static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
124{
125 return qdisc->flags & TCQ_F_ONETXQUEUE;
126}
127
128static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
129{
130#ifdef CONFIG_BQL
131 /* Non-BQL migrated drivers will return 0, too. */
132 return dql_avail(&txq->dql);
133#else
134 return 0;
135#endif
136}
137
122static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) 138static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
123{ 139{
124 return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; 140 return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7c8e5d73d433..797ebef73642 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -56,6 +56,35 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
56 return 0; 56 return 0;
57} 57}
58 58
59static struct sk_buff *try_bulk_dequeue_skb(struct Qdisc *q,
60 struct sk_buff *head_skb,
61 int bytelimit)
62{
63 struct sk_buff *skb, *tail_skb = head_skb;
64
65 while (bytelimit > 0) {
66 skb = q->dequeue(q);
67 if (!skb)
68 break;
69
70 bytelimit -= skb->len; /* covers GSO len */
71 skb = validate_xmit_skb(skb, qdisc_dev(q));
72 if (!skb)
73 break;
74
75 while (tail_skb->next) /* GSO list goto tail */
76 tail_skb = tail_skb->next;
77
78 tail_skb->next = skb;
79 tail_skb = skb;
80 }
81
82 return head_skb;
83}
84
85/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
86 * A requeued skb (via q->gso_skb) can also be a SKB list.
87 */
59static inline struct sk_buff *dequeue_skb(struct Qdisc *q) 88static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
60{ 89{
61 struct sk_buff *skb = q->gso_skb; 90 struct sk_buff *skb = q->gso_skb;
@@ -70,10 +99,17 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
70 } else 99 } else
71 skb = NULL; 100 skb = NULL;
72 } else { 101 } else {
73 if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq)) { 102 if (!(q->flags & TCQ_F_ONETXQUEUE) ||
103 !netif_xmit_frozen_or_stopped(txq)) {
104 int bytelimit = qdisc_avail_bulklimit(txq);
105
74 skb = q->dequeue(q); 106 skb = q->dequeue(q);
75 if (skb) 107 if (skb) {
108 bytelimit -= skb->len;
76 skb = validate_xmit_skb(skb, qdisc_dev(q)); 109 skb = validate_xmit_skb(skb, qdisc_dev(q));
110 }
111 if (skb && qdisc_may_bulk(q))
112 skb = try_bulk_dequeue_skb(q, skb, bytelimit);
77 } 113 }
78 } 114 }
79 115