aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-01-21 02:31:33 -0500
committerDavid S. Miller <davem@davemloft.net>2011-01-21 02:31:33 -0500
commit9190b3b3208d052d98cb601fcc192f3f71a5658b (patch)
treeb642a00320a1b35e33741fcd162072724f228fbf /net
parentb30532515f0a62bfe17207ab00883dd262497006 (diff)
net_sched: accurate bytes/packets stats/rates
In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed a problem with pfifo_head drops that incorrectly decreased sch->bstats.bytes and sch->bstats.packets Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a previously enqueued packet, and bstats cannot be changed, so bstats/rates are not accurate (over estimated) This patch changes the qdisc_bstats updates to be done at dequeue() time instead of enqueue() time. bstats counters no longer account for dropped frames, and rates are more correct, since enqueue() bursts dont have effect on dequeue() rate. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_drr.c2
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_fifo.c5
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_htb.c12
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_red.c11
-rw-r--r--net/sched/sch_sfq.c5
-rw-r--r--net/sched/sch_tbf.c2
-rw-r--r--net/sched/sch_teql.c3
13 files changed, 24 insertions, 30 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index c80d1c210c5d..5f63ec58942c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,7 +390,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
390 ret = qdisc_enqueue(skb, cl->q); 390 ret = qdisc_enqueue(skb, cl->q);
391 if (ret == NET_XMIT_SUCCESS) { 391 if (ret == NET_XMIT_SUCCESS) {
392 sch->q.qlen++; 392 sch->q.qlen++;
393 qdisc_bstats_update(sch, skb);
394 cbq_mark_toplevel(q, cl); 393 cbq_mark_toplevel(q, cl);
395 if (!cl->next_alive) 394 if (!cl->next_alive)
396 cbq_activate_class(cl); 395 cbq_activate_class(cl);
@@ -649,7 +648,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
649 ret = qdisc_enqueue(skb, cl->q); 648 ret = qdisc_enqueue(skb, cl->q);
650 if (ret == NET_XMIT_SUCCESS) { 649 if (ret == NET_XMIT_SUCCESS) {
651 sch->q.qlen++; 650 sch->q.qlen++;
652 qdisc_bstats_update(sch, skb);
653 if (!cl->next_alive) 651 if (!cl->next_alive)
654 cbq_activate_class(cl); 652 cbq_activate_class(cl);
655 return 0; 653 return 0;
@@ -971,6 +969,7 @@ cbq_dequeue(struct Qdisc *sch)
971 969
972 skb = cbq_dequeue_1(sch); 970 skb = cbq_dequeue_1(sch);
973 if (skb) { 971 if (skb) {
972 qdisc_bstats_update(sch, skb);
974 sch->q.qlen--; 973 sch->q.qlen--;
975 sch->flags &= ~TCQ_F_THROTTLED; 974 sch->flags &= ~TCQ_F_THROTTLED;
976 return skb; 975 return skb;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index de55e642eafc..6b7fe4a84f13 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
376 } 376 }
377 377
378 bstats_update(&cl->bstats, skb); 378 bstats_update(&cl->bstats, skb);
379 qdisc_bstats_update(sch, skb);
380 379
381 sch->q.qlen++; 380 sch->q.qlen++;
382 return err; 381 return err;
@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
403 skb = qdisc_dequeue_peeked(cl->qdisc); 402 skb = qdisc_dequeue_peeked(cl->qdisc);
404 if (cl->qdisc->q.qlen == 0) 403 if (cl->qdisc->q.qlen == 0)
405 list_del(&cl->alist); 404 list_del(&cl->alist);
405 qdisc_bstats_update(sch, skb);
406 sch->q.qlen--; 406 sch->q.qlen--;
407 return skb; 407 return skb;
408 } 408 }
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 60f4bdd4408e..0f7bf3fdfea5 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
260 return err; 260 return err;
261 } 261 }
262 262
263 qdisc_bstats_update(sch, skb);
264 sch->q.qlen++; 263 sch->q.qlen++;
265 264
266 return NET_XMIT_SUCCESS; 265 return NET_XMIT_SUCCESS;
@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
283 if (skb == NULL) 282 if (skb == NULL)
284 return NULL; 283 return NULL;
285 284
285 qdisc_bstats_update(sch, skb);
286 sch->q.qlen--; 286 sch->q.qlen--;
287 287
288 index = skb->tc_index & (p->indices - 1); 288 index = skb->tc_index & (p->indices - 1);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index aa4d6337e43c..d468b479aa93 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -46,17 +46,14 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
46 46
47static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch) 47static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
48{ 48{
49 struct sk_buff *skb_head;
50 struct fifo_sched_data *q = qdisc_priv(sch); 49 struct fifo_sched_data *q = qdisc_priv(sch);
51 50
52 if (likely(skb_queue_len(&sch->q) < q->limit)) 51 if (likely(skb_queue_len(&sch->q) < q->limit))
53 return qdisc_enqueue_tail(skb, sch); 52 return qdisc_enqueue_tail(skb, sch);
54 53
55 /* queue full, remove one skb to fulfill the limit */ 54 /* queue full, remove one skb to fulfill the limit */
56 skb_head = qdisc_dequeue_head(sch); 55 __qdisc_queue_drop_head(sch, &sch->q);
57 sch->qstats.drops++; 56 sch->qstats.drops++;
58 kfree_skb(skb_head);
59
60 qdisc_enqueue_tail(skb, sch); 57 qdisc_enqueue_tail(skb, sch);
61 58
62 return NET_XMIT_CN; 59 return NET_XMIT_CN;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 2e45791d4f6c..14a799de1c35 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1600,7 +1600,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1600 set_active(cl, qdisc_pkt_len(skb)); 1600 set_active(cl, qdisc_pkt_len(skb));
1601 1601
1602 bstats_update(&cl->bstats, skb); 1602 bstats_update(&cl->bstats, skb);
1603 qdisc_bstats_update(sch, skb);
1604 sch->q.qlen++; 1603 sch->q.qlen++;
1605 1604
1606 return NET_XMIT_SUCCESS; 1605 return NET_XMIT_SUCCESS;
@@ -1666,6 +1665,7 @@ hfsc_dequeue(struct Qdisc *sch)
1666 } 1665 }
1667 1666
1668 sch->flags &= ~TCQ_F_THROTTLED; 1667 sch->flags &= ~TCQ_F_THROTTLED;
1668 qdisc_bstats_update(sch, skb);
1669 sch->q.qlen--; 1669 sch->q.qlen--;
1670 1670
1671 return skb; 1671 return skb;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 984c1b0c6836..fc12fe6f5597 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
574 } 574 }
575 575
576 sch->q.qlen++; 576 sch->q.qlen++;
577 qdisc_bstats_update(sch, skb);
578 return NET_XMIT_SUCCESS; 577 return NET_XMIT_SUCCESS;
579} 578}
580 579
@@ -842,7 +841,7 @@ next:
842 841
843static struct sk_buff *htb_dequeue(struct Qdisc *sch) 842static struct sk_buff *htb_dequeue(struct Qdisc *sch)
844{ 843{
845 struct sk_buff *skb = NULL; 844 struct sk_buff *skb;
846 struct htb_sched *q = qdisc_priv(sch); 845 struct htb_sched *q = qdisc_priv(sch);
847 int level; 846 int level;
848 psched_time_t next_event; 847 psched_time_t next_event;
@@ -851,6 +850,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
851 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 850 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
852 skb = __skb_dequeue(&q->direct_queue); 851 skb = __skb_dequeue(&q->direct_queue);
853 if (skb != NULL) { 852 if (skb != NULL) {
853ok:
854 qdisc_bstats_update(sch, skb);
854 sch->flags &= ~TCQ_F_THROTTLED; 855 sch->flags &= ~TCQ_F_THROTTLED;
855 sch->q.qlen--; 856 sch->q.qlen--;
856 return skb; 857 return skb;
@@ -884,11 +885,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
884 int prio = ffz(m); 885 int prio = ffz(m);
885 m |= 1 << prio; 886 m |= 1 << prio;
886 skb = htb_dequeue_tree(q, prio, level); 887 skb = htb_dequeue_tree(q, prio, level);
887 if (likely(skb != NULL)) { 888 if (likely(skb != NULL))
888 sch->q.qlen--; 889 goto ok;
889 sch->flags &= ~TCQ_F_THROTTLED;
890 goto fin;
891 }
892 } 890 }
893 } 891 }
894 sch->qstats.overlimits++; 892 sch->qstats.overlimits++;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 21f13da24763..436a2e75b322 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
83 83
84 ret = qdisc_enqueue(skb, qdisc); 84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) { 85 if (ret == NET_XMIT_SUCCESS) {
86 qdisc_bstats_update(sch, skb);
87 sch->q.qlen++; 86 sch->q.qlen++;
88 return NET_XMIT_SUCCESS; 87 return NET_XMIT_SUCCESS;
89 } 88 }
@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
112 qdisc = q->queues[q->curband]; 111 qdisc = q->queues[q->curband];
113 skb = qdisc->dequeue(qdisc); 112 skb = qdisc->dequeue(qdisc);
114 if (skb) { 113 if (skb) {
114 qdisc_bstats_update(sch, skb);
115 sch->q.qlen--; 115 sch->q.qlen--;
116 return skb; 116 return skb;
117 } 117 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 1c4bce863479..6a3006b38dc5 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,7 +240,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 240
241 if (likely(ret == NET_XMIT_SUCCESS)) { 241 if (likely(ret == NET_XMIT_SUCCESS)) {
242 sch->q.qlen++; 242 sch->q.qlen++;
243 qdisc_bstats_update(sch, skb);
244 } else if (net_xmit_drop_count(ret)) { 243 } else if (net_xmit_drop_count(ret)) {
245 sch->qstats.drops++; 244 sch->qstats.drops++;
246 } 245 }
@@ -289,6 +288,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
289 skb->tstamp.tv64 = 0; 288 skb->tstamp.tv64 = 0;
290#endif 289#endif
291 pr_debug("netem_dequeue: return skb=%p\n", skb); 290 pr_debug("netem_dequeue: return skb=%p\n", skb);
291 qdisc_bstats_update(sch, skb);
292 sch->q.qlen--; 292 sch->q.qlen--;
293 return skb; 293 return skb;
294 } 294 }
@@ -476,7 +476,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
476 __skb_queue_after(list, skb, nskb); 476 __skb_queue_after(list, skb, nskb);
477 477
478 sch->qstats.backlog += qdisc_pkt_len(nskb); 478 sch->qstats.backlog += qdisc_pkt_len(nskb);
479 qdisc_bstats_update(sch, nskb);
480 479
481 return NET_XMIT_SUCCESS; 480 return NET_XMIT_SUCCESS;
482 } 481 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 966158d49dd1..fbd710d619bf 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,7 +84,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
84 84
85 ret = qdisc_enqueue(skb, qdisc); 85 ret = qdisc_enqueue(skb, qdisc);
86 if (ret == NET_XMIT_SUCCESS) { 86 if (ret == NET_XMIT_SUCCESS) {
87 qdisc_bstats_update(sch, skb);
88 sch->q.qlen++; 87 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
90 } 89 }
@@ -116,6 +115,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
116 struct Qdisc *qdisc = q->queues[prio]; 115 struct Qdisc *qdisc = q->queues[prio];
117 struct sk_buff *skb = qdisc->dequeue(qdisc); 116 struct sk_buff *skb = qdisc->dequeue(qdisc);
118 if (skb) { 117 if (skb) {
118 qdisc_bstats_update(sch, skb);
119 sch->q.qlen--; 119 sch->q.qlen--;
120 return skb; 120 return skb;
121 } 121 }
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a6009c5a2c97..9f98dbd32d4c 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,7 +94,6 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
94 94
95 ret = qdisc_enqueue(skb, child); 95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) { 96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 qdisc_bstats_update(sch, skb);
98 sch->q.qlen++; 97 sch->q.qlen++;
99 } else if (net_xmit_drop_count(ret)) { 98 } else if (net_xmit_drop_count(ret)) {
100 q->stats.pdrop++; 99 q->stats.pdrop++;
@@ -114,11 +113,13 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
114 struct Qdisc *child = q->qdisc; 113 struct Qdisc *child = q->qdisc;
115 114
116 skb = child->dequeue(child); 115 skb = child->dequeue(child);
117 if (skb) 116 if (skb) {
117 qdisc_bstats_update(sch, skb);
118 sch->q.qlen--; 118 sch->q.qlen--;
119 else if (!red_is_idling(&q->parms)) 119 } else {
120 red_start_of_idle_period(&q->parms); 120 if (!red_is_idling(&q->parms))
121 121 red_start_of_idle_period(&q->parms);
122 }
122 return skb; 123 return skb;
123} 124}
124 125
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 239ec53a634d..edea8cefec6c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
402 q->tail = slot; 402 q->tail = slot;
403 slot->allot = q->scaled_quantum; 403 slot->allot = q->scaled_quantum;
404 } 404 }
405 if (++sch->q.qlen <= q->limit) { 405 if (++sch->q.qlen <= q->limit)
406 qdisc_bstats_update(sch, skb);
407 return NET_XMIT_SUCCESS; 406 return NET_XMIT_SUCCESS;
408 }
409 407
410 sfq_drop(sch); 408 sfq_drop(sch);
411 return NET_XMIT_CN; 409 return NET_XMIT_CN;
@@ -445,6 +443,7 @@ next_slot:
445 } 443 }
446 skb = slot_dequeue_head(slot); 444 skb = slot_dequeue_head(slot);
447 sfq_dec(q, a); 445 sfq_dec(q, a);
446 qdisc_bstats_update(sch, skb);
448 sch->q.qlen--; 447 sch->q.qlen--;
449 sch->qstats.backlog -= qdisc_pkt_len(skb); 448 sch->qstats.backlog -= qdisc_pkt_len(skb);
450 449
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 77565e721811..e93165820c3f 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,7 +134,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
134 } 134 }
135 135
136 sch->q.qlen++; 136 sch->q.qlen++;
137 qdisc_bstats_update(sch, skb);
138 return NET_XMIT_SUCCESS; 137 return NET_XMIT_SUCCESS;
139} 138}
140 139
@@ -187,6 +186,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
187 q->ptokens = ptoks; 186 q->ptokens = ptoks;
188 sch->q.qlen--; 187 sch->q.qlen--;
189 sch->flags &= ~TCQ_F_THROTTLED; 188 sch->flags &= ~TCQ_F_THROTTLED;
189 qdisc_bstats_update(sch, skb);
190 return skb; 190 return skb;
191 } 191 }
192 192
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 84ce48eadff4..d84e7329660f 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -87,7 +87,6 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
87 87
88 if (q->q.qlen < dev->tx_queue_len) { 88 if (q->q.qlen < dev->tx_queue_len) {
89 __skb_queue_tail(&q->q, skb); 89 __skb_queue_tail(&q->q, skb);
90 qdisc_bstats_update(sch, skb);
91 return NET_XMIT_SUCCESS; 90 return NET_XMIT_SUCCESS;
92 } 91 }
93 92
@@ -111,6 +110,8 @@ teql_dequeue(struct Qdisc* sch)
111 dat->m->slaves = sch; 110 dat->m->slaves = sch;
112 netif_wake_queue(m); 111 netif_wake_queue(m);
113 } 112 }
113 } else {
114 qdisc_bstats_update(sch, skb);
114 } 115 }
115 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; 116 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
116 return skb; 117 return skb;