aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2011-01-14 08:12:37 -0500
committerPatrick McHardy <kaber@trash.net>2011-01-14 08:12:37 -0500
commit0134e89c7bcc9fde1da962c82a120691e185619f (patch)
tree3e03335cf001019a2687d161e956de4f73379984 /net/sched
parentc7066f70d9610df0b9406cc635fc09e86136e714 (diff)
parent6faee60a4e82075853a437831768cc9e2e563e4e (diff)
Merge branch 'master' of git://1984.lsi.us.es/net-next-2.6
Conflicts: net/ipv4/route.c Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_fifo.c2
-rw-r--r--net/sched/sch_generic.c41
-rw-r--r--net/sched/sch_red.c1
-rw-r--r--net/sched/sch_sfq.c291
-rw-r--r--net/sched/sch_teql.c3
5 files changed, 218 insertions, 120 deletions
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 4dfecb0cba37..aa4d6337e43c 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -54,8 +54,6 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
54 54
55 /* queue full, remove one skb to fulfill the limit */ 55 /* queue full, remove one skb to fulfill the limit */
56 skb_head = qdisc_dequeue_head(sch); 56 skb_head = qdisc_dequeue_head(sch);
57 sch->bstats.bytes -= qdisc_pkt_len(skb_head);
58 sch->bstats.packets--;
59 sch->qstats.drops++; 57 sch->qstats.drops++;
60 kfree_skb(skb_head); 58 kfree_skb(skb_head);
61 59
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5dbb3cd96e59..34dc598440a2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
60 60
61 /* check the reason of requeuing without tx lock first */ 61 /* check the reason of requeuing without tx lock first */
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 if (!netif_tx_queue_stopped(txq) && 63 if (!netif_tx_queue_frozen_or_stopped(txq)) {
64 !netif_tx_queue_frozen(txq)) {
65 q->gso_skb = NULL; 64 q->gso_skb = NULL;
66 q->q.qlen--; 65 q->q.qlen--;
67 } else 66 } else
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
122 spin_unlock(root_lock); 121 spin_unlock(root_lock);
123 122
124 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
125 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) 124 if (!netif_tx_queue_frozen_or_stopped(txq))
126 ret = dev_hard_start_xmit(skb, dev, txq); 125 ret = dev_hard_start_xmit(skb, dev, txq);
127 126
128 HARD_TX_UNLOCK(dev, txq); 127 HARD_TX_UNLOCK(dev, txq);
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
144 ret = dev_requeue_skb(skb, q); 143 ret = dev_requeue_skb(skb, q);
145 } 144 }
146 145
147 if (ret && (netif_tx_queue_stopped(txq) || 146 if (ret && netif_tx_queue_frozen_or_stopped(txq))
148 netif_tx_queue_frozen(txq)))
149 ret = 0; 147 ret = 0;
150 148
151 return ret; 149 return ret;
@@ -555,7 +553,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
555 size = QDISC_ALIGN(sizeof(*sch)); 553 size = QDISC_ALIGN(sizeof(*sch));
556 size += ops->priv_size + (QDISC_ALIGNTO - 1); 554 size += ops->priv_size + (QDISC_ALIGNTO - 1);
557 555
558 p = kzalloc(size, GFP_KERNEL); 556 p = kzalloc_node(size, GFP_KERNEL,
557 netdev_queue_numa_node_read(dev_queue));
558
559 if (!p) 559 if (!p)
560 goto errout; 560 goto errout;
561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
@@ -810,20 +810,35 @@ static bool some_qdisc_is_busy(struct net_device *dev)
810 return false; 810 return false;
811} 811}
812 812
813void dev_deactivate(struct net_device *dev) 813void dev_deactivate_many(struct list_head *head)
814{ 814{
815 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); 815 struct net_device *dev;
816 if (dev_ingress_queue(dev)) 816
817 dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 817 list_for_each_entry(dev, head, unreg_list) {
818 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
819 &noop_qdisc);
820 if (dev_ingress_queue(dev))
821 dev_deactivate_queue(dev, dev_ingress_queue(dev),
822 &noop_qdisc);
818 823
819 dev_watchdog_down(dev); 824 dev_watchdog_down(dev);
825 }
820 826
821 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 827 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
822 synchronize_rcu(); 828 synchronize_rcu();
823 829
824 /* Wait for outstanding qdisc_run calls. */ 830 /* Wait for outstanding qdisc_run calls. */
825 while (some_qdisc_is_busy(dev)) 831 list_for_each_entry(dev, head, unreg_list)
826 yield(); 832 while (some_qdisc_is_busy(dev))
833 yield();
834}
835
836void dev_deactivate(struct net_device *dev)
837{
838 LIST_HEAD(single);
839
840 list_add(&dev->unreg_list, &single);
841 dev_deactivate_many(&single);
827} 842}
828 843
829static void dev_init_scheduler_queue(struct net_device *dev, 844static void dev_init_scheduler_queue(struct net_device *dev,
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 8d42bb3ba540..a67ba3c5a0cc 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -239,6 +239,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
239 .Scell_log = q->parms.Scell_log, 239 .Scell_log = q->parms.Scell_log,
240 }; 240 };
241 241
242 sch->qstats.backlog = q->qdisc->qstats.backlog;
242 opts = nla_nest_start(skb, TCA_OPTIONS); 243 opts = nla_nest_start(skb, TCA_OPTIONS);
243 if (opts == NULL) 244 if (opts == NULL)
244 goto nla_put_failure; 245 goto nla_put_failure;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3cf478d012dd..d54ac94066c2 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -67,27 +67,47 @@
67 67
68 IMPLEMENTATION: 68 IMPLEMENTATION:
69 This implementation limits maximal queue length to 128; 69 This implementation limits maximal queue length to 128;
70 maximal mtu to 2^15-1; number of hash buckets to 1024. 70 max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
71 The only goal of this restrictions was that all data 71 The only goal of this restrictions was that all data
72 fit into one 4K page :-). Struct sfq_sched_data is 72 fit into one 4K page on 32bit arches.
73 organized in anti-cache manner: all the data for a bucket
74 are scattered over different locations. This is not good,
75 but it allowed me to put it into 4K.
76 73
77 It is easy to increase these values, but not in flight. */ 74 It is easy to increase these values, but not in flight. */
78 75
79#define SFQ_DEPTH 128 76#define SFQ_DEPTH 128 /* max number of packets per flow */
77#define SFQ_SLOTS 128 /* max number of flows */
78#define SFQ_EMPTY_SLOT 255
80#define SFQ_HASH_DIVISOR 1024 79#define SFQ_HASH_DIVISOR 1024
80/* We use 16 bits to store allot, and want to handle packets up to 64K
81 * Scale allot by 8 (1<<3) so that no overflow occurs.
82 */
83#define SFQ_ALLOT_SHIFT 3
84#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
81 85
82/* This type should contain at least SFQ_DEPTH*2 values */ 86/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
83typedef unsigned char sfq_index; 87typedef unsigned char sfq_index;
84 88
89/*
90 * We dont use pointers to save space.
91 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
92 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
93 * are 'pointers' to dep[] array
94 */
85struct sfq_head 95struct sfq_head
86{ 96{
87 sfq_index next; 97 sfq_index next;
88 sfq_index prev; 98 sfq_index prev;
89}; 99};
90 100
101struct sfq_slot {
102 struct sk_buff *skblist_next;
103 struct sk_buff *skblist_prev;
104 sfq_index qlen; /* number of skbs in skblist */
105 sfq_index next; /* next slot in sfq chain */
106 struct sfq_head dep; /* anchor in dep[] chains */
107 unsigned short hash; /* hash value (index in ht[]) */
108 short allot; /* credit for this slot */
109};
110
91struct sfq_sched_data 111struct sfq_sched_data
92{ 112{
93/* Parameters */ 113/* Parameters */
@@ -99,17 +119,24 @@ struct sfq_sched_data
99 struct tcf_proto *filter_list; 119 struct tcf_proto *filter_list;
100 struct timer_list perturb_timer; 120 struct timer_list perturb_timer;
101 u32 perturbation; 121 u32 perturbation;
102 sfq_index tail; /* Index of current slot in round */ 122 sfq_index cur_depth; /* depth of longest slot */
103 sfq_index max_depth; /* Maximal depth */ 123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
104 124 struct sfq_slot *tail; /* current slot in round */
105 sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */ 125 sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */
106 sfq_index next[SFQ_DEPTH]; /* Active slots link */ 126 struct sfq_slot slots[SFQ_SLOTS];
107 short allot[SFQ_DEPTH]; /* Current allotment per slot */ 127 struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
108 unsigned short hash[SFQ_DEPTH]; /* Hash value indexed by slots */
109 struct sk_buff_head qs[SFQ_DEPTH]; /* Slot queue */
110 struct sfq_head dep[SFQ_DEPTH*2]; /* Linked list of slots, indexed by depth */
111}; 128};
112 129
130/*
131 * sfq_head are either in a sfq_slot or in dep[] array
132 */
133static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
134{
135 if (val < SFQ_SLOTS)
136 return &q->slots[val].dep;
137 return &q->dep[val - SFQ_SLOTS];
138}
139
113static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) 140static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
114{ 141{
115 return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); 142 return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
@@ -200,30 +227,41 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
200 return 0; 227 return 0;
201} 228}
202 229
230/*
231 * x : slot number [0 .. SFQ_SLOTS - 1]
232 */
203static inline void sfq_link(struct sfq_sched_data *q, sfq_index x) 233static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
204{ 234{
205 sfq_index p, n; 235 sfq_index p, n;
206 int d = q->qs[x].qlen + SFQ_DEPTH; 236 int qlen = q->slots[x].qlen;
237
238 p = qlen + SFQ_SLOTS;
239 n = q->dep[qlen].next;
207 240
208 p = d; 241 q->slots[x].dep.next = n;
209 n = q->dep[d].next; 242 q->slots[x].dep.prev = p;
210 q->dep[x].next = n; 243
211 q->dep[x].prev = p; 244 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
212 q->dep[p].next = q->dep[n].prev = x; 245 sfq_dep_head(q, n)->prev = x;
213} 246}
214 247
248#define sfq_unlink(q, x, n, p) \
249 n = q->slots[x].dep.next; \
250 p = q->slots[x].dep.prev; \
251 sfq_dep_head(q, p)->next = n; \
252 sfq_dep_head(q, n)->prev = p
253
254
215static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x) 255static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
216{ 256{
217 sfq_index p, n; 257 sfq_index p, n;
258 int d;
218 259
219 n = q->dep[x].next; 260 sfq_unlink(q, x, n, p);
220 p = q->dep[x].prev;
221 q->dep[p].next = n;
222 q->dep[n].prev = p;
223
224 if (n == p && q->max_depth == q->qs[x].qlen + 1)
225 q->max_depth--;
226 261
262 d = q->slots[x].qlen--;
263 if (n == p && q->cur_depth == d)
264 q->cur_depth--;
227 sfq_link(q, x); 265 sfq_link(q, x);
228} 266}
229 267
@@ -232,34 +270,74 @@ static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
232 sfq_index p, n; 270 sfq_index p, n;
233 int d; 271 int d;
234 272
235 n = q->dep[x].next; 273 sfq_unlink(q, x, n, p);
236 p = q->dep[x].prev;
237 q->dep[p].next = n;
238 q->dep[n].prev = p;
239 d = q->qs[x].qlen;
240 if (q->max_depth < d)
241 q->max_depth = d;
242 274
275 d = ++q->slots[x].qlen;
276 if (q->cur_depth < d)
277 q->cur_depth = d;
243 sfq_link(q, x); 278 sfq_link(q, x);
244} 279}
245 280
281/* helper functions : might be changed when/if skb use a standard list_head */
282
283/* remove one skb from tail of slot queue */
284static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
285{
286 struct sk_buff *skb = slot->skblist_prev;
287
288 slot->skblist_prev = skb->prev;
289 skb->prev->next = (struct sk_buff *)slot;
290 skb->next = skb->prev = NULL;
291 return skb;
292}
293
294/* remove one skb from head of slot queue */
295static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
296{
297 struct sk_buff *skb = slot->skblist_next;
298
299 slot->skblist_next = skb->next;
300 skb->next->prev = (struct sk_buff *)slot;
301 skb->next = skb->prev = NULL;
302 return skb;
303}
304
305static inline void slot_queue_init(struct sfq_slot *slot)
306{
307 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
308}
309
310/* add skb to slot queue (tail add) */
311static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
312{
313 skb->prev = slot->skblist_prev;
314 skb->next = (struct sk_buff *)slot;
315 slot->skblist_prev->next = skb;
316 slot->skblist_prev = skb;
317}
318
319#define slot_queue_walk(slot, skb) \
320 for (skb = slot->skblist_next; \
321 skb != (struct sk_buff *)slot; \
322 skb = skb->next)
323
246static unsigned int sfq_drop(struct Qdisc *sch) 324static unsigned int sfq_drop(struct Qdisc *sch)
247{ 325{
248 struct sfq_sched_data *q = qdisc_priv(sch); 326 struct sfq_sched_data *q = qdisc_priv(sch);
249 sfq_index d = q->max_depth; 327 sfq_index x, d = q->cur_depth;
250 struct sk_buff *skb; 328 struct sk_buff *skb;
251 unsigned int len; 329 unsigned int len;
330 struct sfq_slot *slot;
252 331
253 /* Queue is full! Find the longest slot and 332 /* Queue is full! Find the longest slot and drop tail packet from it */
254 drop a packet from it */
255
256 if (d > 1) { 333 if (d > 1) {
257 sfq_index x = q->dep[d + SFQ_DEPTH].next; 334 x = q->dep[d].next;
258 skb = q->qs[x].prev; 335 slot = &q->slots[x];
336drop:
337 skb = slot_dequeue_tail(slot);
259 len = qdisc_pkt_len(skb); 338 len = qdisc_pkt_len(skb);
260 __skb_unlink(skb, &q->qs[x]);
261 kfree_skb(skb);
262 sfq_dec(q, x); 339 sfq_dec(q, x);
340 kfree_skb(skb);
263 sch->q.qlen--; 341 sch->q.qlen--;
264 sch->qstats.drops++; 342 sch->qstats.drops++;
265 sch->qstats.backlog -= len; 343 sch->qstats.backlog -= len;
@@ -268,19 +346,11 @@ static unsigned int sfq_drop(struct Qdisc *sch)
268 346
269 if (d == 1) { 347 if (d == 1) {
270 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ 348 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
271 d = q->next[q->tail]; 349 x = q->tail->next;
272 q->next[q->tail] = q->next[d]; 350 slot = &q->slots[x];
273 q->allot[q->next[d]] += q->quantum; 351 q->tail->next = slot->next;
274 skb = q->qs[d].prev; 352 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
275 len = qdisc_pkt_len(skb); 353 goto drop;
276 __skb_unlink(skb, &q->qs[d]);
277 kfree_skb(skb);
278 sfq_dec(q, d);
279 sch->q.qlen--;
280 q->ht[q->hash[d]] = SFQ_DEPTH;
281 sch->qstats.drops++;
282 sch->qstats.backlog -= len;
283 return len;
284 } 354 }
285 355
286 return 0; 356 return 0;
@@ -292,6 +362,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
292 struct sfq_sched_data *q = qdisc_priv(sch); 362 struct sfq_sched_data *q = qdisc_priv(sch);
293 unsigned int hash; 363 unsigned int hash;
294 sfq_index x; 364 sfq_index x;
365 struct sfq_slot *slot;
295 int uninitialized_var(ret); 366 int uninitialized_var(ret);
296 367
297 hash = sfq_classify(skb, sch, &ret); 368 hash = sfq_classify(skb, sch, &ret);
@@ -304,31 +375,32 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
304 hash--; 375 hash--;
305 376
306 x = q->ht[hash]; 377 x = q->ht[hash];
307 if (x == SFQ_DEPTH) { 378 slot = &q->slots[x];
308 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 379 if (x == SFQ_EMPTY_SLOT) {
309 q->hash[x] = hash; 380 x = q->dep[0].next; /* get a free slot */
381 q->ht[hash] = x;
382 slot = &q->slots[x];
383 slot->hash = hash;
310 } 384 }
311 385
312 /* If selected queue has length q->limit, this means that 386 /* If selected queue has length q->limit, do simple tail drop,
313 * all another queues are empty and that we do simple tail drop,
314 * i.e. drop _this_ packet. 387 * i.e. drop _this_ packet.
315 */ 388 */
316 if (q->qs[x].qlen >= q->limit) 389 if (slot->qlen >= q->limit)
317 return qdisc_drop(skb, sch); 390 return qdisc_drop(skb, sch);
318 391
319 sch->qstats.backlog += qdisc_pkt_len(skb); 392 sch->qstats.backlog += qdisc_pkt_len(skb);
320 __skb_queue_tail(&q->qs[x], skb); 393 slot_queue_add(slot, skb);
321 sfq_inc(q, x); 394 sfq_inc(q, x);
322 if (q->qs[x].qlen == 1) { /* The flow is new */ 395 if (slot->qlen == 1) { /* The flow is new */
323 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 396 if (q->tail == NULL) { /* It is the first flow */
324 q->tail = x; 397 slot->next = x;
325 q->next[x] = x;
326 q->allot[x] = q->quantum;
327 } else { 398 } else {
328 q->next[x] = q->next[q->tail]; 399 slot->next = q->tail->next;
329 q->next[q->tail] = x; 400 q->tail->next = x;
330 q->tail = x;
331 } 401 }
402 q->tail = slot;
403 slot->allot = q->scaled_quantum;
332 } 404 }
333 if (++sch->q.qlen <= q->limit) { 405 if (++sch->q.qlen <= q->limit) {
334 sch->bstats.bytes += qdisc_pkt_len(skb); 406 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -344,14 +416,12 @@ static struct sk_buff *
344sfq_peek(struct Qdisc *sch) 416sfq_peek(struct Qdisc *sch)
345{ 417{
346 struct sfq_sched_data *q = qdisc_priv(sch); 418 struct sfq_sched_data *q = qdisc_priv(sch);
347 sfq_index a;
348 419
349 /* No active slots */ 420 /* No active slots */
350 if (q->tail == SFQ_DEPTH) 421 if (q->tail == NULL)
351 return NULL; 422 return NULL;
352 423
353 a = q->next[q->tail]; 424 return q->slots[q->tail->next].skblist_next;
354 return skb_peek(&q->qs[a]);
355} 425}
356 426
357static struct sk_buff * 427static struct sk_buff *
@@ -359,34 +429,37 @@ sfq_dequeue(struct Qdisc *sch)
359{ 429{
360 struct sfq_sched_data *q = qdisc_priv(sch); 430 struct sfq_sched_data *q = qdisc_priv(sch);
361 struct sk_buff *skb; 431 struct sk_buff *skb;
362 sfq_index a, old_a; 432 sfq_index a, next_a;
433 struct sfq_slot *slot;
363 434
364 /* No active slots */ 435 /* No active slots */
365 if (q->tail == SFQ_DEPTH) 436 if (q->tail == NULL)
366 return NULL; 437 return NULL;
367 438
368 a = old_a = q->next[q->tail]; 439next_slot:
369 440 a = q->tail->next;
370 /* Grab packet */ 441 slot = &q->slots[a];
371 skb = __skb_dequeue(&q->qs[a]); 442 if (slot->allot <= 0) {
443 q->tail = slot;
444 slot->allot += q->scaled_quantum;
445 goto next_slot;
446 }
447 skb = slot_dequeue_head(slot);
372 sfq_dec(q, a); 448 sfq_dec(q, a);
373 sch->q.qlen--; 449 sch->q.qlen--;
374 sch->qstats.backlog -= qdisc_pkt_len(skb); 450 sch->qstats.backlog -= qdisc_pkt_len(skb);
375 451
376 /* Is the slot empty? */ 452 /* Is the slot empty? */
377 if (q->qs[a].qlen == 0) { 453 if (slot->qlen == 0) {
378 q->ht[q->hash[a]] = SFQ_DEPTH; 454 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
379 a = q->next[a]; 455 next_a = slot->next;
380 if (a == old_a) { 456 if (a == next_a) {
381 q->tail = SFQ_DEPTH; 457 q->tail = NULL; /* no more active slots */
382 return skb; 458 return skb;
383 } 459 }
384 q->next[q->tail] = a; 460 q->tail->next = next_a;
385 q->allot[a] += q->quantum; 461 } else {
386 } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) { 462 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
387 q->tail = a;
388 a = q->next[a];
389 q->allot[a] += q->quantum;
390 } 463 }
391 return skb; 464 return skb;
392} 465}
@@ -422,6 +495,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
422 495
423 sch_tree_lock(sch); 496 sch_tree_lock(sch);
424 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch)); 497 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
498 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
425 q->perturb_period = ctl->perturb_period * HZ; 499 q->perturb_period = ctl->perturb_period * HZ;
426 if (ctl->limit) 500 if (ctl->limit)
427 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); 501 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
@@ -450,19 +524,19 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
450 init_timer_deferrable(&q->perturb_timer); 524 init_timer_deferrable(&q->perturb_timer);
451 525
452 for (i = 0; i < SFQ_HASH_DIVISOR; i++) 526 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
453 q->ht[i] = SFQ_DEPTH; 527 q->ht[i] = SFQ_EMPTY_SLOT;
454 528
455 for (i = 0; i < SFQ_DEPTH; i++) { 529 for (i = 0; i < SFQ_DEPTH; i++) {
456 skb_queue_head_init(&q->qs[i]); 530 q->dep[i].next = i + SFQ_SLOTS;
457 q->dep[i + SFQ_DEPTH].next = i + SFQ_DEPTH; 531 q->dep[i].prev = i + SFQ_SLOTS;
458 q->dep[i + SFQ_DEPTH].prev = i + SFQ_DEPTH;
459 } 532 }
460 533
461 q->limit = SFQ_DEPTH - 1; 534 q->limit = SFQ_DEPTH - 1;
462 q->max_depth = 0; 535 q->cur_depth = 0;
463 q->tail = SFQ_DEPTH; 536 q->tail = NULL;
464 if (opt == NULL) { 537 if (opt == NULL) {
465 q->quantum = psched_mtu(qdisc_dev(sch)); 538 q->quantum = psched_mtu(qdisc_dev(sch));
539 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
466 q->perturb_period = 0; 540 q->perturb_period = 0;
467 q->perturbation = net_random(); 541 q->perturbation = net_random();
468 } else { 542 } else {
@@ -471,8 +545,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
471 return err; 545 return err;
472 } 546 }
473 547
474 for (i = 0; i < SFQ_DEPTH; i++) 548 for (i = 0; i < SFQ_SLOTS; i++) {
549 slot_queue_init(&q->slots[i]);
475 sfq_link(q, i); 550 sfq_link(q, i);
551 }
476 return 0; 552 return 0;
477} 553}
478 554
@@ -547,10 +623,19 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
547 struct gnet_dump *d) 623 struct gnet_dump *d)
548{ 624{
549 struct sfq_sched_data *q = qdisc_priv(sch); 625 struct sfq_sched_data *q = qdisc_priv(sch);
550 sfq_index idx = q->ht[cl-1]; 626 sfq_index idx = q->ht[cl - 1];
551 struct gnet_stats_queue qs = { .qlen = q->qs[idx].qlen }; 627 struct gnet_stats_queue qs = { 0 };
552 struct tc_sfq_xstats xstats = { .allot = q->allot[idx] }; 628 struct tc_sfq_xstats xstats = { 0 };
629 struct sk_buff *skb;
630
631 if (idx != SFQ_EMPTY_SLOT) {
632 const struct sfq_slot *slot = &q->slots[idx];
553 633
634 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
635 qs.qlen = slot->qlen;
636 slot_queue_walk(slot, skb)
637 qs.backlog += qdisc_pkt_len(skb);
638 }
554 if (gnet_stats_copy_queue(d, &qs) < 0) 639 if (gnet_stats_copy_queue(d, &qs) < 0)
555 return -1; 640 return -1;
556 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 641 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
@@ -565,7 +650,7 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
565 return; 650 return;
566 651
567 for (i = 0; i < SFQ_HASH_DIVISOR; i++) { 652 for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
568 if (q->ht[i] == SFQ_DEPTH || 653 if (q->ht[i] == SFQ_EMPTY_SLOT ||
569 arg->count < arg->skip) { 654 arg->count < arg->skip) {
570 arg->count++; 655 arg->count++;
571 continue; 656 continue;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 401af9596709..106479a7c94a 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -309,8 +309,7 @@ restart:
309 if (__netif_tx_trylock(slave_txq)) { 309 if (__netif_tx_trylock(slave_txq)) {
310 unsigned int length = qdisc_pkt_len(skb); 310 unsigned int length = qdisc_pkt_len(skb);
311 311
312 if (!netif_tx_queue_stopped(slave_txq) && 312 if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
313 !netif_tx_queue_frozen(slave_txq) &&
314 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { 313 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
315 txq_trans_update(slave_txq); 314 txq_trans_update(slave_txq);
316 __netif_tx_unlock(slave_txq); 315 __netif_tx_unlock(slave_txq);