aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_ipt.c3
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_nat.c3
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c3
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c3
-rw-r--r--net/sched/sch_atm.c6
-rw-r--r--net/sched/sch_cbq.c6
-rw-r--r--net/sched/sch_drr.c8
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_fifo.c2
-rw-r--r--net/sched/sch_generic.c41
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c17
-rw-r--r--net/sched/sch_ingress.c3
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_sfq.c294
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c6
25 files changed, 251 insertions, 186 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a36270a994d7..f04d4a484d53 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -24,7 +24,7 @@ menuconfig NET_SCHED
24 To administer these schedulers, you'll need the user-level utilities 24 To administer these schedulers, you'll need the user-level utilities
25 from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>. 25 from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>.
26 That package also contains some documentation; for more, check out 26 That package also contains some documentation; for more, check out
27 <http://linux-net.osdl.org/index.php/Iproute2>. 27 <http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>.
28 28
29 This Quality of Service (QoS) support will enable you to use 29 This Quality of Service (QoS) support will enable you to use
30 Differentiated Services (diffserv) and Resource Reservation Protocol 30 Differentiated Services (diffserv) and Resource Reservation Protocol
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 67dc7ce9b63a..83ddfc07e45d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb,
508 508
509 spin_lock(&p->tcf_lock); 509 spin_lock(&p->tcf_lock);
510 p->tcf_tm.lastuse = jiffies; 510 p->tcf_tm.lastuse = jiffies;
511 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 511 bstats_update(&p->tcf_bstats, skb);
512 p->tcf_bstats.packets++;
513 action = p->tcf_action; 512 action = p->tcf_action;
514 update_flags = p->update_flags; 513 update_flags = p->update_flags;
515 spin_unlock(&p->tcf_lock); 514 spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8daef9632255..c2a7c20e81c1 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
209 spin_lock(&ipt->tcf_lock); 209 spin_lock(&ipt->tcf_lock);
210 210
211 ipt->tcf_tm.lastuse = jiffies; 211 ipt->tcf_tm.lastuse = jiffies;
212 ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); 212 bstats_update(&ipt->tcf_bstats, skb);
213 ipt->tcf_bstats.packets++;
214 213
215 /* yes, we have to worry about both in and out dev 214 /* yes, we have to worry about both in and out dev
216 worry later - danger - this API seems to have changed 215 worry later - danger - this API seems to have changed
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0c311be92827..d765067e99db 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
165 165
166 spin_lock(&m->tcf_lock); 166 spin_lock(&m->tcf_lock);
167 m->tcf_tm.lastuse = jiffies; 167 m->tcf_tm.lastuse = jiffies;
168 m->tcf_bstats.bytes += qdisc_pkt_len(skb); 168 bstats_update(&m->tcf_bstats, skb);
169 m->tcf_bstats.packets++;
170 169
171 dev = m->tcfm_dev; 170 dev = m->tcfm_dev;
172 if (!dev) { 171 if (!dev) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 186eb837e600..178a4bd7b7cb 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
125 egress = p->flags & TCA_NAT_FLAG_EGRESS; 125 egress = p->flags & TCA_NAT_FLAG_EGRESS;
126 action = p->tcf_action; 126 action = p->tcf_action;
127 127
128 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 128 bstats_update(&p->tcf_bstats, skb);
129 p->tcf_bstats.packets++;
130 129
131 spin_unlock(&p->tcf_lock); 130 spin_unlock(&p->tcf_lock);
132 131
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a0593c9640db..445bef716f77 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
187bad: 187bad:
188 p->tcf_qstats.overlimits++; 188 p->tcf_qstats.overlimits++;
189done: 189done:
190 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 190 bstats_update(&p->tcf_bstats, skb);
191 p->tcf_bstats.packets++;
192 spin_unlock(&p->tcf_lock); 191 spin_unlock(&p->tcf_lock);
193 return p->tcf_action; 192 return p->tcf_action;
194} 193}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 7ebf7439b478..e2f08b1e2e58 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
298 298
299 spin_lock(&police->tcf_lock); 299 spin_lock(&police->tcf_lock);
300 300
301 police->tcf_bstats.bytes += qdisc_pkt_len(skb); 301 bstats_update(&police->tcf_bstats, skb);
302 police->tcf_bstats.packets++;
303 302
304 if (police->tcfp_ewma_rate && 303 if (police->tcfp_ewma_rate &&
305 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 304 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 97e84f3ee775..7287cff7af3e 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
42 42
43 spin_lock(&d->tcf_lock); 43 spin_lock(&d->tcf_lock);
44 d->tcf_tm.lastuse = jiffies; 44 d->tcf_tm.lastuse = jiffies;
45 d->tcf_bstats.bytes += qdisc_pkt_len(skb); 45 bstats_update(&d->tcf_bstats, skb);
46 d->tcf_bstats.packets++;
47 46
48 /* print policy string followed by _ then packet count 47 /* print policy string followed by _ then packet count
49 * Example if this was the 3rd packet and the string was "hello" 48 * Example if this was the 3rd packet and the string was "hello"
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 66cbf4eb8855..836f5fee9e58 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
46 46
47 spin_lock(&d->tcf_lock); 47 spin_lock(&d->tcf_lock);
48 d->tcf_tm.lastuse = jiffies; 48 d->tcf_tm.lastuse = jiffies;
49 d->tcf_bstats.bytes += qdisc_pkt_len(skb); 49 bstats_update(&d->tcf_bstats, skb);
50 d->tcf_bstats.packets++;
51 50
52 if (d->flags & SKBEDIT_F_PRIORITY) 51 if (d->flags & SKBEDIT_F_PRIORITY)
53 skb->priority = d->priority; 52 skb->priority = d->priority;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 282540778aa8..943d733409d0 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -422,10 +422,8 @@ drop: __maybe_unused
422 } 422 }
423 return ret; 423 return ret;
424 } 424 }
425 sch->bstats.bytes += qdisc_pkt_len(skb); 425 qdisc_bstats_update(sch, skb);
426 sch->bstats.packets++; 426 bstats_update(&flow->bstats, skb);
427 flow->bstats.bytes += qdisc_pkt_len(skb);
428 flow->bstats.packets++;
429 /* 427 /*
430 * Okay, this may seem weird. We pretend we've dropped the packet if 428 * Okay, this may seem weird. We pretend we've dropped the packet if
431 * it goes via ATM. The reason for this is that the outer qdisc 429 * it goes via ATM. The reason for this is that the outer qdisc
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index eb7631590865..c80d1c210c5d 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
390 ret = qdisc_enqueue(skb, cl->q); 390 ret = qdisc_enqueue(skb, cl->q);
391 if (ret == NET_XMIT_SUCCESS) { 391 if (ret == NET_XMIT_SUCCESS) {
392 sch->q.qlen++; 392 sch->q.qlen++;
393 sch->bstats.packets++; 393 qdisc_bstats_update(sch, skb);
394 sch->bstats.bytes += qdisc_pkt_len(skb);
395 cbq_mark_toplevel(q, cl); 394 cbq_mark_toplevel(q, cl);
396 if (!cl->next_alive) 395 if (!cl->next_alive)
397 cbq_activate_class(cl); 396 cbq_activate_class(cl);
@@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
650 ret = qdisc_enqueue(skb, cl->q); 649 ret = qdisc_enqueue(skb, cl->q);
651 if (ret == NET_XMIT_SUCCESS) { 650 if (ret == NET_XMIT_SUCCESS) {
652 sch->q.qlen++; 651 sch->q.qlen++;
653 sch->bstats.packets++; 652 qdisc_bstats_update(sch, skb);
654 sch->bstats.bytes += qdisc_pkt_len(skb);
655 if (!cl->next_alive) 653 if (!cl->next_alive)
656 cbq_activate_class(cl); 654 cbq_activate_class(cl);
657 return 0; 655 return 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index aa8b5313f8cf..de55e642eafc 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
351{ 351{
352 struct drr_sched *q = qdisc_priv(sch); 352 struct drr_sched *q = qdisc_priv(sch);
353 struct drr_class *cl; 353 struct drr_class *cl;
354 unsigned int len;
355 int err; 354 int err;
356 355
357 cl = drr_classify(skb, sch, &err); 356 cl = drr_classify(skb, sch, &err);
@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
362 return err; 361 return err;
363 } 362 }
364 363
365 len = qdisc_pkt_len(skb);
366 err = qdisc_enqueue(skb, cl->qdisc); 364 err = qdisc_enqueue(skb, cl->qdisc);
367 if (unlikely(err != NET_XMIT_SUCCESS)) { 365 if (unlikely(err != NET_XMIT_SUCCESS)) {
368 if (net_xmit_drop_count(err)) { 366 if (net_xmit_drop_count(err)) {
@@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377 cl->deficit = cl->quantum; 375 cl->deficit = cl->quantum;
378 } 376 }
379 377
380 cl->bstats.packets++; 378 bstats_update(&cl->bstats, skb);
381 cl->bstats.bytes += len; 379 qdisc_bstats_update(sch, skb);
382 sch->bstats.packets++;
383 sch->bstats.bytes += len;
384 380
385 sch->q.qlen++; 381 sch->q.qlen++;
386 return err; 382 return err;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1d295d62bb5c..60f4bdd4408e 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
260 return err; 260 return err;
261 } 261 }
262 262
263 sch->bstats.bytes += qdisc_pkt_len(skb); 263 qdisc_bstats_update(sch, skb);
264 sch->bstats.packets++;
265 sch->q.qlen++; 264 sch->q.qlen++;
266 265
267 return NET_XMIT_SUCCESS; 266 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 4dfecb0cba37..aa4d6337e43c 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -54,8 +54,6 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
54 54
55 /* queue full, remove one skb to fulfill the limit */ 55 /* queue full, remove one skb to fulfill the limit */
56 skb_head = qdisc_dequeue_head(sch); 56 skb_head = qdisc_dequeue_head(sch);
57 sch->bstats.bytes -= qdisc_pkt_len(skb_head);
58 sch->bstats.packets--;
59 sch->qstats.drops++; 57 sch->qstats.drops++;
60 kfree_skb(skb_head); 58 kfree_skb(skb_head);
61 59
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5dbb3cd96e59..34dc598440a2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
60 60
61 /* check the reason of requeuing without tx lock first */ 61 /* check the reason of requeuing without tx lock first */
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 if (!netif_tx_queue_stopped(txq) && 63 if (!netif_tx_queue_frozen_or_stopped(txq)) {
64 !netif_tx_queue_frozen(txq)) {
65 q->gso_skb = NULL; 64 q->gso_skb = NULL;
66 q->q.qlen--; 65 q->q.qlen--;
67 } else 66 } else
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
122 spin_unlock(root_lock); 121 spin_unlock(root_lock);
123 122
124 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
125 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) 124 if (!netif_tx_queue_frozen_or_stopped(txq))
126 ret = dev_hard_start_xmit(skb, dev, txq); 125 ret = dev_hard_start_xmit(skb, dev, txq);
127 126
128 HARD_TX_UNLOCK(dev, txq); 127 HARD_TX_UNLOCK(dev, txq);
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
144 ret = dev_requeue_skb(skb, q); 143 ret = dev_requeue_skb(skb, q);
145 } 144 }
146 145
147 if (ret && (netif_tx_queue_stopped(txq) || 146 if (ret && netif_tx_queue_frozen_or_stopped(txq))
148 netif_tx_queue_frozen(txq)))
149 ret = 0; 147 ret = 0;
150 148
151 return ret; 149 return ret;
@@ -555,7 +553,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
555 size = QDISC_ALIGN(sizeof(*sch)); 553 size = QDISC_ALIGN(sizeof(*sch));
556 size += ops->priv_size + (QDISC_ALIGNTO - 1); 554 size += ops->priv_size + (QDISC_ALIGNTO - 1);
557 555
558 p = kzalloc(size, GFP_KERNEL); 556 p = kzalloc_node(size, GFP_KERNEL,
557 netdev_queue_numa_node_read(dev_queue));
558
559 if (!p) 559 if (!p)
560 goto errout; 560 goto errout;
561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
@@ -810,20 +810,35 @@ static bool some_qdisc_is_busy(struct net_device *dev)
810 return false; 810 return false;
811} 811}
812 812
813void dev_deactivate(struct net_device *dev) 813void dev_deactivate_many(struct list_head *head)
814{ 814{
815 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); 815 struct net_device *dev;
816 if (dev_ingress_queue(dev)) 816
817 dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 817 list_for_each_entry(dev, head, unreg_list) {
818 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
819 &noop_qdisc);
820 if (dev_ingress_queue(dev))
821 dev_deactivate_queue(dev, dev_ingress_queue(dev),
822 &noop_qdisc);
818 823
819 dev_watchdog_down(dev); 824 dev_watchdog_down(dev);
825 }
820 826
821 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 827 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
822 synchronize_rcu(); 828 synchronize_rcu();
823 829
824 /* Wait for outstanding qdisc_run calls. */ 830 /* Wait for outstanding qdisc_run calls. */
825 while (some_qdisc_is_busy(dev)) 831 list_for_each_entry(dev, head, unreg_list)
826 yield(); 832 while (some_qdisc_is_busy(dev))
833 yield();
834}
835
836void dev_deactivate(struct net_device *dev)
837{
838 LIST_HEAD(single);
839
840 list_add(&dev->unreg_list, &single);
841 dev_deactivate_many(&single);
827} 842}
828 843
829static void dev_init_scheduler_queue(struct net_device *dev, 844static void dev_init_scheduler_queue(struct net_device *dev,
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 069c62b7bb36..2e45791d4f6c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1599 if (cl->qdisc->q.qlen == 1) 1599 if (cl->qdisc->q.qlen == 1)
1600 set_active(cl, qdisc_pkt_len(skb)); 1600 set_active(cl, qdisc_pkt_len(skb));
1601 1601
1602 cl->bstats.packets++; 1602 bstats_update(&cl->bstats, skb);
1603 cl->bstats.bytes += qdisc_pkt_len(skb); 1603 qdisc_bstats_update(sch, skb);
1604 sch->bstats.packets++;
1605 sch->bstats.bytes += qdisc_pkt_len(skb);
1606 sch->q.qlen++; 1604 sch->q.qlen++;
1607 1605
1608 return NET_XMIT_SUCCESS; 1606 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 01b519d6c52d..984c1b0c6836 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
569 } 569 }
570 return ret; 570 return ret;
571 } else { 571 } else {
572 cl->bstats.packets += 572 bstats_update(&cl->bstats, skb);
573 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
574 cl->bstats.bytes += qdisc_pkt_len(skb);
575 htb_activate(q, cl); 573 htb_activate(q, cl);
576 } 574 }
577 575
578 sch->q.qlen++; 576 sch->q.qlen++;
579 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 577 qdisc_bstats_update(sch, skb);
580 sch->bstats.bytes += qdisc_pkt_len(skb);
581 return NET_XMIT_SUCCESS; 578 return NET_XMIT_SUCCESS;
582} 579}
583 580
@@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
648 htb_add_to_wait_tree(q, cl, diff); 645 htb_add_to_wait_tree(q, cl, diff);
649 } 646 }
650 647
651 /* update byte stats except for leaves which are already updated */ 648 /* update basic stats except for leaves which are already updated */
652 if (cl->level) { 649 if (cl->level)
653 cl->bstats.bytes += bytes; 650 bstats_update(&cl->bstats, skb);
654 cl->bstats.packets += skb_is_gso(skb)? 651
655 skb_shinfo(skb)->gso_segs:1;
656 }
657 cl = cl->parent; 652 cl = cl->parent;
658 } 653 }
659} 654}
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f10e34a68445..bce1665239b8 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
63 63
64 result = tc_classify(skb, p->filter_list, &res); 64 result = tc_classify(skb, p->filter_list, &res);
65 65
66 sch->bstats.packets++; 66 qdisc_bstats_update(sch, skb);
67 sch->bstats.bytes += qdisc_pkt_len(skb);
68 switch (result) { 67 switch (result) {
69 case TC_ACT_SHOT: 68 case TC_ACT_SHOT:
70 result = TC_ACT_SHOT; 69 result = TC_ACT_SHOT;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 32690deab5d0..21f13da24763 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
83 83
84 ret = qdisc_enqueue(skb, qdisc); 84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) { 85 if (ret == NET_XMIT_SUCCESS) {
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 qdisc_bstats_update(sch, skb);
87 sch->bstats.packets++;
88 sch->q.qlen++; 87 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
90 } 89 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e5593c083a78..1c4bce863479 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 240
241 if (likely(ret == NET_XMIT_SUCCESS)) { 241 if (likely(ret == NET_XMIT_SUCCESS)) {
242 sch->q.qlen++; 242 sch->q.qlen++;
243 sch->bstats.bytes += qdisc_pkt_len(skb); 243 qdisc_bstats_update(sch, skb);
244 sch->bstats.packets++;
245 } else if (net_xmit_drop_count(ret)) { 244 } else if (net_xmit_drop_count(ret)) {
246 sch->qstats.drops++; 245 sch->qstats.drops++;
247 } 246 }
@@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
477 __skb_queue_after(list, skb, nskb); 476 __skb_queue_after(list, skb, nskb);
478 477
479 sch->qstats.backlog += qdisc_pkt_len(nskb); 478 sch->qstats.backlog += qdisc_pkt_len(nskb);
480 sch->bstats.bytes += qdisc_pkt_len(nskb); 479 qdisc_bstats_update(sch, nskb);
481 sch->bstats.packets++;
482 480
483 return NET_XMIT_SUCCESS; 481 return NET_XMIT_SUCCESS;
484 } 482 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b1c95bce33ce..966158d49dd1 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
84 84
85 ret = qdisc_enqueue(skb, qdisc); 85 ret = qdisc_enqueue(skb, qdisc);
86 if (ret == NET_XMIT_SUCCESS) { 86 if (ret == NET_XMIT_SUCCESS) {
87 sch->bstats.bytes += qdisc_pkt_len(skb); 87 qdisc_bstats_update(sch, skb);
88 sch->bstats.packets++;
89 sch->q.qlen++; 88 sch->q.qlen++;
90 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
91 } 90 }
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 8d42bb3ba540..a6009c5a2c97 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
94 94
95 ret = qdisc_enqueue(skb, child); 95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) { 96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 sch->bstats.bytes += qdisc_pkt_len(skb); 97 qdisc_bstats_update(sch, skb);
98 sch->bstats.packets++;
99 sch->q.qlen++; 98 sch->q.qlen++;
100 } else if (net_xmit_drop_count(ret)) { 99 } else if (net_xmit_drop_count(ret)) {
101 q->stats.pdrop++; 100 q->stats.pdrop++;
@@ -239,6 +238,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
239 .Scell_log = q->parms.Scell_log, 238 .Scell_log = q->parms.Scell_log,
240 }; 239 };
241 240
241 sch->qstats.backlog = q->qdisc->qstats.backlog;
242 opts = nla_nest_start(skb, TCA_OPTIONS); 242 opts = nla_nest_start(skb, TCA_OPTIONS);
243 if (opts == NULL) 243 if (opts == NULL)
244 goto nla_put_failure; 244 goto nla_put_failure;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3cf478d012dd..239ec53a634d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -67,27 +67,47 @@
67 67
68 IMPLEMENTATION: 68 IMPLEMENTATION:
69 This implementation limits maximal queue length to 128; 69 This implementation limits maximal queue length to 128;
70 maximal mtu to 2^15-1; number of hash buckets to 1024. 70 max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
71 The only goal of this restrictions was that all data 71 The only goal of this restrictions was that all data
72 fit into one 4K page :-). Struct sfq_sched_data is 72 fit into one 4K page on 32bit arches.
73 organized in anti-cache manner: all the data for a bucket
74 are scattered over different locations. This is not good,
75 but it allowed me to put it into 4K.
76 73
77 It is easy to increase these values, but not in flight. */ 74 It is easy to increase these values, but not in flight. */
78 75
79#define SFQ_DEPTH 128 76#define SFQ_DEPTH 128 /* max number of packets per flow */
77#define SFQ_SLOTS 128 /* max number of flows */
78#define SFQ_EMPTY_SLOT 255
80#define SFQ_HASH_DIVISOR 1024 79#define SFQ_HASH_DIVISOR 1024
80/* We use 16 bits to store allot, and want to handle packets up to 64K
81 * Scale allot by 8 (1<<3) so that no overflow occurs.
82 */
83#define SFQ_ALLOT_SHIFT 3
84#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
81 85
82/* This type should contain at least SFQ_DEPTH*2 values */ 86/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
83typedef unsigned char sfq_index; 87typedef unsigned char sfq_index;
84 88
89/*
90 * We dont use pointers to save space.
91 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
92 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
93 * are 'pointers' to dep[] array
94 */
85struct sfq_head 95struct sfq_head
86{ 96{
87 sfq_index next; 97 sfq_index next;
88 sfq_index prev; 98 sfq_index prev;
89}; 99};
90 100
101struct sfq_slot {
102 struct sk_buff *skblist_next;
103 struct sk_buff *skblist_prev;
104 sfq_index qlen; /* number of skbs in skblist */
105 sfq_index next; /* next slot in sfq chain */
106 struct sfq_head dep; /* anchor in dep[] chains */
107 unsigned short hash; /* hash value (index in ht[]) */
108 short allot; /* credit for this slot */
109};
110
91struct sfq_sched_data 111struct sfq_sched_data
92{ 112{
93/* Parameters */ 113/* Parameters */
@@ -99,17 +119,24 @@ struct sfq_sched_data
99 struct tcf_proto *filter_list; 119 struct tcf_proto *filter_list;
100 struct timer_list perturb_timer; 120 struct timer_list perturb_timer;
101 u32 perturbation; 121 u32 perturbation;
102 sfq_index tail; /* Index of current slot in round */ 122 sfq_index cur_depth; /* depth of longest slot */
103 sfq_index max_depth; /* Maximal depth */ 123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
104 124 struct sfq_slot *tail; /* current slot in round */
105 sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */ 125 sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */
106 sfq_index next[SFQ_DEPTH]; /* Active slots link */ 126 struct sfq_slot slots[SFQ_SLOTS];
107 short allot[SFQ_DEPTH]; /* Current allotment per slot */ 127 struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
108 unsigned short hash[SFQ_DEPTH]; /* Hash value indexed by slots */
109 struct sk_buff_head qs[SFQ_DEPTH]; /* Slot queue */
110 struct sfq_head dep[SFQ_DEPTH*2]; /* Linked list of slots, indexed by depth */
111}; 128};
112 129
130/*
131 * sfq_head are either in a sfq_slot or in dep[] array
132 */
133static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
134{
135 if (val < SFQ_SLOTS)
136 return &q->slots[val].dep;
137 return &q->dep[val - SFQ_SLOTS];
138}
139
113static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) 140static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
114{ 141{
115 return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); 142 return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
@@ -200,30 +227,41 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
200 return 0; 227 return 0;
201} 228}
202 229
230/*
231 * x : slot number [0 .. SFQ_SLOTS - 1]
232 */
203static inline void sfq_link(struct sfq_sched_data *q, sfq_index x) 233static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
204{ 234{
205 sfq_index p, n; 235 sfq_index p, n;
206 int d = q->qs[x].qlen + SFQ_DEPTH; 236 int qlen = q->slots[x].qlen;
237
238 p = qlen + SFQ_SLOTS;
239 n = q->dep[qlen].next;
207 240
208 p = d; 241 q->slots[x].dep.next = n;
209 n = q->dep[d].next; 242 q->slots[x].dep.prev = p;
210 q->dep[x].next = n; 243
211 q->dep[x].prev = p; 244 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
212 q->dep[p].next = q->dep[n].prev = x; 245 sfq_dep_head(q, n)->prev = x;
213} 246}
214 247
248#define sfq_unlink(q, x, n, p) \
249 n = q->slots[x].dep.next; \
250 p = q->slots[x].dep.prev; \
251 sfq_dep_head(q, p)->next = n; \
252 sfq_dep_head(q, n)->prev = p
253
254
215static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x) 255static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
216{ 256{
217 sfq_index p, n; 257 sfq_index p, n;
258 int d;
218 259
219 n = q->dep[x].next; 260 sfq_unlink(q, x, n, p);
220 p = q->dep[x].prev;
221 q->dep[p].next = n;
222 q->dep[n].prev = p;
223
224 if (n == p && q->max_depth == q->qs[x].qlen + 1)
225 q->max_depth--;
226 261
262 d = q->slots[x].qlen--;
263 if (n == p && q->cur_depth == d)
264 q->cur_depth--;
227 sfq_link(q, x); 265 sfq_link(q, x);
228} 266}
229 267
@@ -232,34 +270,74 @@ static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
232 sfq_index p, n; 270 sfq_index p, n;
233 int d; 271 int d;
234 272
235 n = q->dep[x].next; 273 sfq_unlink(q, x, n, p);
236 p = q->dep[x].prev;
237 q->dep[p].next = n;
238 q->dep[n].prev = p;
239 d = q->qs[x].qlen;
240 if (q->max_depth < d)
241 q->max_depth = d;
242 274
275 d = ++q->slots[x].qlen;
276 if (q->cur_depth < d)
277 q->cur_depth = d;
243 sfq_link(q, x); 278 sfq_link(q, x);
244} 279}
245 280
281/* helper functions : might be changed when/if skb use a standard list_head */
282
283/* remove one skb from tail of slot queue */
284static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
285{
286 struct sk_buff *skb = slot->skblist_prev;
287
288 slot->skblist_prev = skb->prev;
289 skb->prev->next = (struct sk_buff *)slot;
290 skb->next = skb->prev = NULL;
291 return skb;
292}
293
294/* remove one skb from head of slot queue */
295static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
296{
297 struct sk_buff *skb = slot->skblist_next;
298
299 slot->skblist_next = skb->next;
300 skb->next->prev = (struct sk_buff *)slot;
301 skb->next = skb->prev = NULL;
302 return skb;
303}
304
305static inline void slot_queue_init(struct sfq_slot *slot)
306{
307 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
308}
309
310/* add skb to slot queue (tail add) */
311static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
312{
313 skb->prev = slot->skblist_prev;
314 skb->next = (struct sk_buff *)slot;
315 slot->skblist_prev->next = skb;
316 slot->skblist_prev = skb;
317}
318
319#define slot_queue_walk(slot, skb) \
320 for (skb = slot->skblist_next; \
321 skb != (struct sk_buff *)slot; \
322 skb = skb->next)
323
246static unsigned int sfq_drop(struct Qdisc *sch) 324static unsigned int sfq_drop(struct Qdisc *sch)
247{ 325{
248 struct sfq_sched_data *q = qdisc_priv(sch); 326 struct sfq_sched_data *q = qdisc_priv(sch);
249 sfq_index d = q->max_depth; 327 sfq_index x, d = q->cur_depth;
250 struct sk_buff *skb; 328 struct sk_buff *skb;
251 unsigned int len; 329 unsigned int len;
330 struct sfq_slot *slot;
252 331
253 /* Queue is full! Find the longest slot and 332 /* Queue is full! Find the longest slot and drop tail packet from it */
254 drop a packet from it */
255
256 if (d > 1) { 333 if (d > 1) {
257 sfq_index x = q->dep[d + SFQ_DEPTH].next; 334 x = q->dep[d].next;
258 skb = q->qs[x].prev; 335 slot = &q->slots[x];
336drop:
337 skb = slot_dequeue_tail(slot);
259 len = qdisc_pkt_len(skb); 338 len = qdisc_pkt_len(skb);
260 __skb_unlink(skb, &q->qs[x]);
261 kfree_skb(skb);
262 sfq_dec(q, x); 339 sfq_dec(q, x);
340 kfree_skb(skb);
263 sch->q.qlen--; 341 sch->q.qlen--;
264 sch->qstats.drops++; 342 sch->qstats.drops++;
265 sch->qstats.backlog -= len; 343 sch->qstats.backlog -= len;
@@ -268,19 +346,11 @@ static unsigned int sfq_drop(struct Qdisc *sch)
268 346
269 if (d == 1) { 347 if (d == 1) {
270 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ 348 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
271 d = q->next[q->tail]; 349 x = q->tail->next;
272 q->next[q->tail] = q->next[d]; 350 slot = &q->slots[x];
273 q->allot[q->next[d]] += q->quantum; 351 q->tail->next = slot->next;
274 skb = q->qs[d].prev; 352 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
275 len = qdisc_pkt_len(skb); 353 goto drop;
276 __skb_unlink(skb, &q->qs[d]);
277 kfree_skb(skb);
278 sfq_dec(q, d);
279 sch->q.qlen--;
280 q->ht[q->hash[d]] = SFQ_DEPTH;
281 sch->qstats.drops++;
282 sch->qstats.backlog -= len;
283 return len;
284 } 354 }
285 355
286 return 0; 356 return 0;
@@ -292,6 +362,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
292 struct sfq_sched_data *q = qdisc_priv(sch); 362 struct sfq_sched_data *q = qdisc_priv(sch);
293 unsigned int hash; 363 unsigned int hash;
294 sfq_index x; 364 sfq_index x;
365 struct sfq_slot *slot;
295 int uninitialized_var(ret); 366 int uninitialized_var(ret);
296 367
297 hash = sfq_classify(skb, sch, &ret); 368 hash = sfq_classify(skb, sch, &ret);
@@ -304,35 +375,35 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
304 hash--; 375 hash--;
305 376
306 x = q->ht[hash]; 377 x = q->ht[hash];
307 if (x == SFQ_DEPTH) { 378 slot = &q->slots[x];
308 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 379 if (x == SFQ_EMPTY_SLOT) {
309 q->hash[x] = hash; 380 x = q->dep[0].next; /* get a free slot */
381 q->ht[hash] = x;
382 slot = &q->slots[x];
383 slot->hash = hash;
310 } 384 }
311 385
312 /* If selected queue has length q->limit, this means that 386 /* If selected queue has length q->limit, do simple tail drop,
313 * all another queues are empty and that we do simple tail drop,
314 * i.e. drop _this_ packet. 387 * i.e. drop _this_ packet.
315 */ 388 */
316 if (q->qs[x].qlen >= q->limit) 389 if (slot->qlen >= q->limit)
317 return qdisc_drop(skb, sch); 390 return qdisc_drop(skb, sch);
318 391
319 sch->qstats.backlog += qdisc_pkt_len(skb); 392 sch->qstats.backlog += qdisc_pkt_len(skb);
320 __skb_queue_tail(&q->qs[x], skb); 393 slot_queue_add(slot, skb);
321 sfq_inc(q, x); 394 sfq_inc(q, x);
322 if (q->qs[x].qlen == 1) { /* The flow is new */ 395 if (slot->qlen == 1) { /* The flow is new */
323 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 396 if (q->tail == NULL) { /* It is the first flow */
324 q->tail = x; 397 slot->next = x;
325 q->next[x] = x;
326 q->allot[x] = q->quantum;
327 } else { 398 } else {
328 q->next[x] = q->next[q->tail]; 399 slot->next = q->tail->next;
329 q->next[q->tail] = x; 400 q->tail->next = x;
330 q->tail = x;
331 } 401 }
402 q->tail = slot;
403 slot->allot = q->scaled_quantum;
332 } 404 }
333 if (++sch->q.qlen <= q->limit) { 405 if (++sch->q.qlen <= q->limit) {
334 sch->bstats.bytes += qdisc_pkt_len(skb); 406 qdisc_bstats_update(sch, skb);
335 sch->bstats.packets++;
336 return NET_XMIT_SUCCESS; 407 return NET_XMIT_SUCCESS;
337 } 408 }
338 409
@@ -344,14 +415,12 @@ static struct sk_buff *
344sfq_peek(struct Qdisc *sch) 415sfq_peek(struct Qdisc *sch)
345{ 416{
346 struct sfq_sched_data *q = qdisc_priv(sch); 417 struct sfq_sched_data *q = qdisc_priv(sch);
347 sfq_index a;
348 418
349 /* No active slots */ 419 /* No active slots */
350 if (q->tail == SFQ_DEPTH) 420 if (q->tail == NULL)
351 return NULL; 421 return NULL;
352 422
353 a = q->next[q->tail]; 423 return q->slots[q->tail->next].skblist_next;
354 return skb_peek(&q->qs[a]);
355} 424}
356 425
357static struct sk_buff * 426static struct sk_buff *
@@ -359,34 +428,37 @@ sfq_dequeue(struct Qdisc *sch)
359{ 428{
360 struct sfq_sched_data *q = qdisc_priv(sch); 429 struct sfq_sched_data *q = qdisc_priv(sch);
361 struct sk_buff *skb; 430 struct sk_buff *skb;
362 sfq_index a, old_a; 431 sfq_index a, next_a;
432 struct sfq_slot *slot;
363 433
364 /* No active slots */ 434 /* No active slots */
365 if (q->tail == SFQ_DEPTH) 435 if (q->tail == NULL)
366 return NULL; 436 return NULL;
367 437
368 a = old_a = q->next[q->tail]; 438next_slot:
369 439 a = q->tail->next;
370 /* Grab packet */ 440 slot = &q->slots[a];
371 skb = __skb_dequeue(&q->qs[a]); 441 if (slot->allot <= 0) {
442 q->tail = slot;
443 slot->allot += q->scaled_quantum;
444 goto next_slot;
445 }
446 skb = slot_dequeue_head(slot);
372 sfq_dec(q, a); 447 sfq_dec(q, a);
373 sch->q.qlen--; 448 sch->q.qlen--;
374 sch->qstats.backlog -= qdisc_pkt_len(skb); 449 sch->qstats.backlog -= qdisc_pkt_len(skb);
375 450
376 /* Is the slot empty? */ 451 /* Is the slot empty? */
377 if (q->qs[a].qlen == 0) { 452 if (slot->qlen == 0) {
378 q->ht[q->hash[a]] = SFQ_DEPTH; 453 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
379 a = q->next[a]; 454 next_a = slot->next;
380 if (a == old_a) { 455 if (a == next_a) {
381 q->tail = SFQ_DEPTH; 456 q->tail = NULL; /* no more active slots */
382 return skb; 457 return skb;
383 } 458 }
384 q->next[q->tail] = a; 459 q->tail->next = next_a;
385 q->allot[a] += q->quantum; 460 } else {
386 } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) { 461 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
387 q->tail = a;
388 a = q->next[a];
389 q->allot[a] += q->quantum;
390 } 462 }
391 return skb; 463 return skb;
392} 464}
@@ -422,6 +494,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
422 494
423 sch_tree_lock(sch); 495 sch_tree_lock(sch);
424 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch)); 496 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
497 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
425 q->perturb_period = ctl->perturb_period * HZ; 498 q->perturb_period = ctl->perturb_period * HZ;
426 if (ctl->limit) 499 if (ctl->limit)
427 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); 500 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
@@ -450,19 +523,19 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
450 init_timer_deferrable(&q->perturb_timer); 523 init_timer_deferrable(&q->perturb_timer);
451 524
452 for (i = 0; i < SFQ_HASH_DIVISOR; i++) 525 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
453 q->ht[i] = SFQ_DEPTH; 526 q->ht[i] = SFQ_EMPTY_SLOT;
454 527
455 for (i = 0; i < SFQ_DEPTH; i++) { 528 for (i = 0; i < SFQ_DEPTH; i++) {
456 skb_queue_head_init(&q->qs[i]); 529 q->dep[i].next = i + SFQ_SLOTS;
457 q->dep[i + SFQ_DEPTH].next = i + SFQ_DEPTH; 530 q->dep[i].prev = i + SFQ_SLOTS;
458 q->dep[i + SFQ_DEPTH].prev = i + SFQ_DEPTH;
459 } 531 }
460 532
461 q->limit = SFQ_DEPTH - 1; 533 q->limit = SFQ_DEPTH - 1;
462 q->max_depth = 0; 534 q->cur_depth = 0;
463 q->tail = SFQ_DEPTH; 535 q->tail = NULL;
464 if (opt == NULL) { 536 if (opt == NULL) {
465 q->quantum = psched_mtu(qdisc_dev(sch)); 537 q->quantum = psched_mtu(qdisc_dev(sch));
538 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
466 q->perturb_period = 0; 539 q->perturb_period = 0;
467 q->perturbation = net_random(); 540 q->perturbation = net_random();
468 } else { 541 } else {
@@ -471,8 +544,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
471 return err; 544 return err;
472 } 545 }
473 546
474 for (i = 0; i < SFQ_DEPTH; i++) 547 for (i = 0; i < SFQ_SLOTS; i++) {
548 slot_queue_init(&q->slots[i]);
475 sfq_link(q, i); 549 sfq_link(q, i);
550 }
476 return 0; 551 return 0;
477} 552}
478 553
@@ -547,10 +622,19 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
547 struct gnet_dump *d) 622 struct gnet_dump *d)
548{ 623{
549 struct sfq_sched_data *q = qdisc_priv(sch); 624 struct sfq_sched_data *q = qdisc_priv(sch);
550 sfq_index idx = q->ht[cl-1]; 625 sfq_index idx = q->ht[cl - 1];
551 struct gnet_stats_queue qs = { .qlen = q->qs[idx].qlen }; 626 struct gnet_stats_queue qs = { 0 };
552 struct tc_sfq_xstats xstats = { .allot = q->allot[idx] }; 627 struct tc_sfq_xstats xstats = { 0 };
628 struct sk_buff *skb;
629
630 if (idx != SFQ_EMPTY_SLOT) {
631 const struct sfq_slot *slot = &q->slots[idx];
553 632
633 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
634 qs.qlen = slot->qlen;
635 slot_queue_walk(slot, skb)
636 qs.backlog += qdisc_pkt_len(skb);
637 }
554 if (gnet_stats_copy_queue(d, &qs) < 0) 638 if (gnet_stats_copy_queue(d, &qs) < 0)
555 return -1; 639 return -1;
556 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 640 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
@@ -565,7 +649,7 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
565 return; 649 return;
566 650
567 for (i = 0; i < SFQ_HASH_DIVISOR; i++) { 651 for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
568 if (q->ht[i] == SFQ_DEPTH || 652 if (q->ht[i] == SFQ_EMPTY_SLOT ||
569 arg->count < arg->skip) { 653 arg->count < arg->skip) {
570 arg->count++; 654 arg->count++;
571 continue; 655 continue;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 641a30d64635..77565e721811 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
134 } 134 }
135 135
136 sch->q.qlen++; 136 sch->q.qlen++;
137 sch->bstats.bytes += qdisc_pkt_len(skb); 137 qdisc_bstats_update(sch, skb);
138 sch->bstats.packets++;
139 return NET_XMIT_SUCCESS; 138 return NET_XMIT_SUCCESS;
140} 139}
141 140
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 401af9596709..af9360d1f6eb 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -83,8 +83,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
83 83
84 if (q->q.qlen < dev->tx_queue_len) { 84 if (q->q.qlen < dev->tx_queue_len) {
85 __skb_queue_tail(&q->q, skb); 85 __skb_queue_tail(&q->q, skb);
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 qdisc_bstats_update(sch, skb);
87 sch->bstats.packets++;
88 return NET_XMIT_SUCCESS; 87 return NET_XMIT_SUCCESS;
89 } 88 }
90 89
@@ -309,8 +308,7 @@ restart:
309 if (__netif_tx_trylock(slave_txq)) { 308 if (__netif_tx_trylock(slave_txq)) {
310 unsigned int length = qdisc_pkt_len(skb); 309 unsigned int length = qdisc_pkt_len(skb);
311 310
312 if (!netif_tx_queue_stopped(slave_txq) && 311 if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
313 !netif_tx_queue_frozen(slave_txq) &&
314 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { 312 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
315 txq_trans_update(slave_txq); 313 txq_trans_update(slave_txq);
316 __netif_tx_unlock(slave_txq); 314 __netif_tx_unlock(slave_txq);