diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/sch_generic.c | 20 |
1 files changed, 13 insertions, 7 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 38d58e6cef07..6efca30894aa 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -57,7 +57,8 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | |||
57 | 57 | ||
58 | static void try_bulk_dequeue_skb(struct Qdisc *q, | 58 | static void try_bulk_dequeue_skb(struct Qdisc *q, |
59 | struct sk_buff *skb, | 59 | struct sk_buff *skb, |
60 | const struct netdev_queue *txq) | 60 | const struct netdev_queue *txq, |
61 | int *packets) | ||
61 | { | 62 | { |
62 | int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; | 63 | int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; |
63 | 64 | ||
@@ -70,6 +71,7 @@ static void try_bulk_dequeue_skb(struct Qdisc *q, | |||
70 | bytelimit -= nskb->len; /* covers GSO len */ | 71 | bytelimit -= nskb->len; /* covers GSO len */ |
71 | skb->next = nskb; | 72 | skb->next = nskb; |
72 | skb = nskb; | 73 | skb = nskb; |
74 | (*packets)++; /* GSO counts as one pkt */ | ||
73 | } | 75 | } |
74 | skb->next = NULL; | 76 | skb->next = NULL; |
75 | } | 77 | } |
@@ -77,11 +79,13 @@ static void try_bulk_dequeue_skb(struct Qdisc *q, | |||
77 | /* Note that dequeue_skb can possibly return a SKB list (via skb->next). | 79 | /* Note that dequeue_skb can possibly return a SKB list (via skb->next). |
78 | * A requeued skb (via q->gso_skb) can also be a SKB list. | 80 | * A requeued skb (via q->gso_skb) can also be a SKB list. |
79 | */ | 81 | */ |
80 | static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate) | 82 | static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, |
83 | int *packets) | ||
81 | { | 84 | { |
82 | struct sk_buff *skb = q->gso_skb; | 85 | struct sk_buff *skb = q->gso_skb; |
83 | const struct netdev_queue *txq = q->dev_queue; | 86 | const struct netdev_queue *txq = q->dev_queue; |
84 | 87 | ||
88 | *packets = 1; | ||
85 | *validate = true; | 89 | *validate = true; |
86 | if (unlikely(skb)) { | 90 | if (unlikely(skb)) { |
87 | /* check the reason of requeuing without tx lock first */ | 91 | /* check the reason of requeuing without tx lock first */ |
@@ -98,7 +102,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate) | |||
98 | !netif_xmit_frozen_or_stopped(txq)) { | 102 | !netif_xmit_frozen_or_stopped(txq)) { |
99 | skb = q->dequeue(q); | 103 | skb = q->dequeue(q); |
100 | if (skb && qdisc_may_bulk(q)) | 104 | if (skb && qdisc_may_bulk(q)) |
101 | try_bulk_dequeue_skb(q, skb, txq); | 105 | try_bulk_dequeue_skb(q, skb, txq, packets); |
102 | } | 106 | } |
103 | } | 107 | } |
104 | return skb; | 108 | return skb; |
@@ -204,7 +208,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
204 | * >0 - queue is not empty. | 208 | * >0 - queue is not empty. |
205 | * | 209 | * |
206 | */ | 210 | */ |
207 | static inline int qdisc_restart(struct Qdisc *q) | 211 | static inline int qdisc_restart(struct Qdisc *q, int *packets) |
208 | { | 212 | { |
209 | struct netdev_queue *txq; | 213 | struct netdev_queue *txq; |
210 | struct net_device *dev; | 214 | struct net_device *dev; |
@@ -213,7 +217,7 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
213 | bool validate; | 217 | bool validate; |
214 | 218 | ||
215 | /* Dequeue packet */ | 219 | /* Dequeue packet */ |
216 | skb = dequeue_skb(q, &validate); | 220 | skb = dequeue_skb(q, &validate, packets); |
217 | if (unlikely(!skb)) | 221 | if (unlikely(!skb)) |
218 | return 0; | 222 | return 0; |
219 | 223 | ||
@@ -227,14 +231,16 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
227 | void __qdisc_run(struct Qdisc *q) | 231 | void __qdisc_run(struct Qdisc *q) |
228 | { | 232 | { |
229 | int quota = weight_p; | 233 | int quota = weight_p; |
234 | int packets; | ||
230 | 235 | ||
231 | while (qdisc_restart(q)) { | 236 | while (qdisc_restart(q, &packets)) { |
232 | /* | 237 | /* |
233 | * Ordered by possible occurrence: Postpone processing if | 238 | * Ordered by possible occurrence: Postpone processing if |
234 | * 1. we've exceeded packet quota | 239 | * 1. we've exceeded packet quota |
235 | * 2. another process needs the CPU; | 240 | * 2. another process needs the CPU; |
236 | */ | 241 | */ |
237 | if (--quota <= 0 || need_resched()) { | 242 | quota -= packets; |
243 | if (quota <= 0 || need_resched()) { | ||
238 | __netif_schedule(q); | 244 | __netif_schedule(q); |
239 | break; | 245 | break; |
240 | } | 246 | } |