diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/act_police.c | 33 | ||||
-rw-r--r-- | net/sched/cls_flower.c | 6 | ||||
-rw-r--r-- | net/sched/cls_u32.c | 72 | ||||
-rw-r--r-- | net/sched/sch_drr.c | 4 | ||||
-rw-r--r-- | net/sched/sch_fq_codel.c | 26 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 2 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 12 | ||||
-rw-r--r-- | net/sched/sch_ingress.c | 12 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 4 | ||||
-rw-r--r-- | net/sched/sch_qfq.c | 6 | ||||
-rw-r--r-- | net/sched/sch_red.c | 4 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 4 |
12 files changed, 111 insertions, 74 deletions
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index b884dae692a1..c557789765dc 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -38,7 +38,7 @@ struct tcf_police { | |||
38 | bool peak_present; | 38 | bool peak_present; |
39 | }; | 39 | }; |
40 | #define to_police(pc) \ | 40 | #define to_police(pc) \ |
41 | container_of(pc, struct tcf_police, common) | 41 | container_of(pc->priv, struct tcf_police, common) |
42 | 42 | ||
43 | #define POL_TAB_MASK 15 | 43 | #define POL_TAB_MASK 15 |
44 | 44 | ||
@@ -119,14 +119,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
119 | struct nlattr *est, struct tc_action *a, | 119 | struct nlattr *est, struct tc_action *a, |
120 | int ovr, int bind) | 120 | int ovr, int bind) |
121 | { | 121 | { |
122 | unsigned int h; | ||
123 | int ret = 0, err; | 122 | int ret = 0, err; |
124 | struct nlattr *tb[TCA_POLICE_MAX + 1]; | 123 | struct nlattr *tb[TCA_POLICE_MAX + 1]; |
125 | struct tc_police *parm; | 124 | struct tc_police *parm; |
126 | struct tcf_police *police; | 125 | struct tcf_police *police; |
127 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; | 126 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; |
128 | struct tc_action_net *tn = net_generic(net, police_net_id); | 127 | struct tc_action_net *tn = net_generic(net, police_net_id); |
129 | struct tcf_hashinfo *hinfo = tn->hinfo; | ||
130 | int size; | 128 | int size; |
131 | 129 | ||
132 | if (nla == NULL) | 130 | if (nla == NULL) |
@@ -145,7 +143,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
145 | 143 | ||
146 | if (parm->index) { | 144 | if (parm->index) { |
147 | if (tcf_hash_search(tn, a, parm->index)) { | 145 | if (tcf_hash_search(tn, a, parm->index)) { |
148 | police = to_police(a->priv); | 146 | police = to_police(a); |
149 | if (bind) { | 147 | if (bind) { |
150 | police->tcf_bindcnt += 1; | 148 | police->tcf_bindcnt += 1; |
151 | police->tcf_refcnt += 1; | 149 | police->tcf_refcnt += 1; |
@@ -156,16 +154,15 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
156 | /* not replacing */ | 154 | /* not replacing */ |
157 | return -EEXIST; | 155 | return -EEXIST; |
158 | } | 156 | } |
157 | } else { | ||
158 | ret = tcf_hash_create(tn, parm->index, NULL, a, | ||
159 | sizeof(*police), bind, false); | ||
160 | if (ret) | ||
161 | return ret; | ||
162 | ret = ACT_P_CREATED; | ||
159 | } | 163 | } |
160 | 164 | ||
161 | police = kzalloc(sizeof(*police), GFP_KERNEL); | 165 | police = to_police(a); |
162 | if (police == NULL) | ||
163 | return -ENOMEM; | ||
164 | ret = ACT_P_CREATED; | ||
165 | police->tcf_refcnt = 1; | ||
166 | spin_lock_init(&police->tcf_lock); | ||
167 | if (bind) | ||
168 | police->tcf_bindcnt = 1; | ||
169 | override: | 166 | override: |
170 | if (parm->rate.rate) { | 167 | if (parm->rate.rate) { |
171 | err = -ENOMEM; | 168 | err = -ENOMEM; |
@@ -237,16 +234,8 @@ override: | |||
237 | return ret; | 234 | return ret; |
238 | 235 | ||
239 | police->tcfp_t_c = ktime_get_ns(); | 236 | police->tcfp_t_c = ktime_get_ns(); |
240 | police->tcf_index = parm->index ? parm->index : | 237 | tcf_hash_insert(tn, a); |
241 | tcf_hash_new_index(tn); | ||
242 | police->tcf_tm.install = jiffies; | ||
243 | police->tcf_tm.lastuse = jiffies; | ||
244 | h = tcf_hash(police->tcf_index, POL_TAB_MASK); | ||
245 | spin_lock_bh(&hinfo->lock); | ||
246 | hlist_add_head(&police->tcf_head, &hinfo->htab[h]); | ||
247 | spin_unlock_bh(&hinfo->lock); | ||
248 | 238 | ||
249 | a->priv = police; | ||
250 | return ret; | 239 | return ret; |
251 | 240 | ||
252 | failure_unlock: | 241 | failure_unlock: |
@@ -255,7 +244,7 @@ failure: | |||
255 | qdisc_put_rtab(P_tab); | 244 | qdisc_put_rtab(P_tab); |
256 | qdisc_put_rtab(R_tab); | 245 | qdisc_put_rtab(R_tab); |
257 | if (ret == ACT_P_CREATED) | 246 | if (ret == ACT_P_CREATED) |
258 | kfree(police); | 247 | tcf_hash_cleanup(a, est); |
259 | return err; | 248 | return err; |
260 | } | 249 | } |
261 | 250 | ||
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 730aacafc22d..b3b7978f4182 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie) | |||
171 | struct tc_cls_flower_offload offload = {0}; | 171 | struct tc_cls_flower_offload offload = {0}; |
172 | struct tc_to_netdev tc; | 172 | struct tc_to_netdev tc; |
173 | 173 | ||
174 | if (!tc_should_offload(dev, 0)) | 174 | if (!tc_should_offload(dev, tp, 0)) |
175 | return; | 175 | return; |
176 | 176 | ||
177 | offload.command = TC_CLSFLOWER_DESTROY; | 177 | offload.command = TC_CLSFLOWER_DESTROY; |
@@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp, | |||
194 | struct tc_cls_flower_offload offload = {0}; | 194 | struct tc_cls_flower_offload offload = {0}; |
195 | struct tc_to_netdev tc; | 195 | struct tc_to_netdev tc; |
196 | 196 | ||
197 | if (!tc_should_offload(dev, flags)) | 197 | if (!tc_should_offload(dev, tp, flags)) |
198 | return; | 198 | return; |
199 | 199 | ||
200 | offload.command = TC_CLSFLOWER_REPLACE; | 200 | offload.command = TC_CLSFLOWER_REPLACE; |
@@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) | |||
216 | struct tc_cls_flower_offload offload = {0}; | 216 | struct tc_cls_flower_offload offload = {0}; |
217 | struct tc_to_netdev tc; | 217 | struct tc_to_netdev tc; |
218 | 218 | ||
219 | if (!tc_should_offload(dev, 0)) | 219 | if (!tc_should_offload(dev, tp, 0)) |
220 | return; | 220 | return; |
221 | 221 | ||
222 | offload.command = TC_CLSFLOWER_STATS; | 222 | offload.command = TC_CLSFLOWER_STATS; |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 079b43b3c5d2..ffe593efe930 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) | |||
440 | offload.type = TC_SETUP_CLSU32; | 440 | offload.type = TC_SETUP_CLSU32; |
441 | offload.cls_u32 = &u32_offload; | 441 | offload.cls_u32 = &u32_offload; |
442 | 442 | ||
443 | if (tc_should_offload(dev, 0)) { | 443 | if (tc_should_offload(dev, tp, 0)) { |
444 | offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; | 444 | offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; |
445 | offload.cls_u32->knode.handle = handle; | 445 | offload.cls_u32->knode.handle = handle; |
446 | dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, | 446 | dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, |
@@ -457,20 +457,21 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, | |||
457 | struct tc_to_netdev offload; | 457 | struct tc_to_netdev offload; |
458 | int err; | 458 | int err; |
459 | 459 | ||
460 | if (!tc_should_offload(dev, tp, flags)) | ||
461 | return tc_skip_sw(flags) ? -EINVAL : 0; | ||
462 | |||
460 | offload.type = TC_SETUP_CLSU32; | 463 | offload.type = TC_SETUP_CLSU32; |
461 | offload.cls_u32 = &u32_offload; | 464 | offload.cls_u32 = &u32_offload; |
462 | 465 | ||
463 | if (tc_should_offload(dev, flags)) { | 466 | offload.cls_u32->command = TC_CLSU32_NEW_HNODE; |
464 | offload.cls_u32->command = TC_CLSU32_NEW_HNODE; | 467 | offload.cls_u32->hnode.divisor = h->divisor; |
465 | offload.cls_u32->hnode.divisor = h->divisor; | 468 | offload.cls_u32->hnode.handle = h->handle; |
466 | offload.cls_u32->hnode.handle = h->handle; | 469 | offload.cls_u32->hnode.prio = h->prio; |
467 | offload.cls_u32->hnode.prio = h->prio; | ||
468 | 470 | ||
469 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, | 471 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, |
470 | tp->protocol, &offload); | 472 | tp->protocol, &offload); |
471 | if (tc_skip_sw(flags)) | 473 | if (tc_skip_sw(flags)) |
472 | return err; | 474 | return err; |
473 | } | ||
474 | 475 | ||
475 | return 0; | 476 | return 0; |
476 | } | 477 | } |
@@ -484,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) | |||
484 | offload.type = TC_SETUP_CLSU32; | 485 | offload.type = TC_SETUP_CLSU32; |
485 | offload.cls_u32 = &u32_offload; | 486 | offload.cls_u32 = &u32_offload; |
486 | 487 | ||
487 | if (tc_should_offload(dev, 0)) { | 488 | if (tc_should_offload(dev, tp, 0)) { |
488 | offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; | 489 | offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; |
489 | offload.cls_u32->hnode.divisor = h->divisor; | 490 | offload.cls_u32->hnode.divisor = h->divisor; |
490 | offload.cls_u32->hnode.handle = h->handle; | 491 | offload.cls_u32->hnode.handle = h->handle; |
@@ -507,27 +508,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, | |||
507 | offload.type = TC_SETUP_CLSU32; | 508 | offload.type = TC_SETUP_CLSU32; |
508 | offload.cls_u32 = &u32_offload; | 509 | offload.cls_u32 = &u32_offload; |
509 | 510 | ||
510 | if (tc_should_offload(dev, flags)) { | 511 | if (!tc_should_offload(dev, tp, flags)) |
511 | offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; | 512 | return tc_skip_sw(flags) ? -EINVAL : 0; |
512 | offload.cls_u32->knode.handle = n->handle; | 513 | |
513 | offload.cls_u32->knode.fshift = n->fshift; | 514 | offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; |
515 | offload.cls_u32->knode.handle = n->handle; | ||
516 | offload.cls_u32->knode.fshift = n->fshift; | ||
514 | #ifdef CONFIG_CLS_U32_MARK | 517 | #ifdef CONFIG_CLS_U32_MARK |
515 | offload.cls_u32->knode.val = n->val; | 518 | offload.cls_u32->knode.val = n->val; |
516 | offload.cls_u32->knode.mask = n->mask; | 519 | offload.cls_u32->knode.mask = n->mask; |
517 | #else | 520 | #else |
518 | offload.cls_u32->knode.val = 0; | 521 | offload.cls_u32->knode.val = 0; |
519 | offload.cls_u32->knode.mask = 0; | 522 | offload.cls_u32->knode.mask = 0; |
520 | #endif | 523 | #endif |
521 | offload.cls_u32->knode.sel = &n->sel; | 524 | offload.cls_u32->knode.sel = &n->sel; |
522 | offload.cls_u32->knode.exts = &n->exts; | 525 | offload.cls_u32->knode.exts = &n->exts; |
523 | if (n->ht_down) | 526 | if (n->ht_down) |
524 | offload.cls_u32->knode.link_handle = n->ht_down->handle; | 527 | offload.cls_u32->knode.link_handle = n->ht_down->handle; |
525 | 528 | ||
526 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, | 529 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, |
527 | tp->protocol, &offload); | 530 | tp->protocol, &offload); |
528 | if (tc_skip_sw(flags)) | 531 | if (tc_skip_sw(flags)) |
529 | return err; | 532 | return err; |
530 | } | ||
531 | 533 | ||
532 | return 0; | 534 | return 0; |
533 | } | 535 | } |
@@ -863,7 +865,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
863 | if (tb[TCA_U32_FLAGS]) { | 865 | if (tb[TCA_U32_FLAGS]) { |
864 | flags = nla_get_u32(tb[TCA_U32_FLAGS]); | 866 | flags = nla_get_u32(tb[TCA_U32_FLAGS]); |
865 | if (!tc_flags_valid(flags)) | 867 | if (!tc_flags_valid(flags)) |
866 | return err; | 868 | return -EINVAL; |
867 | } | 869 | } |
868 | 870 | ||
869 | n = (struct tc_u_knode *)*arg; | 871 | n = (struct tc_u_knode *)*arg; |
@@ -921,11 +923,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
921 | ht->divisor = divisor; | 923 | ht->divisor = divisor; |
922 | ht->handle = handle; | 924 | ht->handle = handle; |
923 | ht->prio = tp->prio; | 925 | ht->prio = tp->prio; |
926 | |||
927 | err = u32_replace_hw_hnode(tp, ht, flags); | ||
928 | if (err) { | ||
929 | kfree(ht); | ||
930 | return err; | ||
931 | } | ||
932 | |||
924 | RCU_INIT_POINTER(ht->next, tp_c->hlist); | 933 | RCU_INIT_POINTER(ht->next, tp_c->hlist); |
925 | rcu_assign_pointer(tp_c->hlist, ht); | 934 | rcu_assign_pointer(tp_c->hlist, ht); |
926 | *arg = (unsigned long)ht; | 935 | *arg = (unsigned long)ht; |
927 | 936 | ||
928 | u32_replace_hw_hnode(tp, ht, flags); | ||
929 | return 0; | 937 | return 0; |
930 | } | 938 | } |
931 | 939 | ||
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index a63e879e8975..bf8af2c43c2c 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -375,6 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
375 | cl->deficit = cl->quantum; | 375 | cl->deficit = cl->quantum; |
376 | } | 376 | } |
377 | 377 | ||
378 | qdisc_qstats_backlog_inc(sch, skb); | ||
378 | sch->q.qlen++; | 379 | sch->q.qlen++; |
379 | return err; | 380 | return err; |
380 | } | 381 | } |
@@ -407,6 +408,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) | |||
407 | 408 | ||
408 | bstats_update(&cl->bstats, skb); | 409 | bstats_update(&cl->bstats, skb); |
409 | qdisc_bstats_update(sch, skb); | 410 | qdisc_bstats_update(sch, skb); |
411 | qdisc_qstats_backlog_dec(sch, skb); | ||
410 | sch->q.qlen--; | 412 | sch->q.qlen--; |
411 | return skb; | 413 | return skb; |
412 | } | 414 | } |
@@ -428,6 +430,7 @@ static unsigned int drr_drop(struct Qdisc *sch) | |||
428 | if (cl->qdisc->ops->drop) { | 430 | if (cl->qdisc->ops->drop) { |
429 | len = cl->qdisc->ops->drop(cl->qdisc); | 431 | len = cl->qdisc->ops->drop(cl->qdisc); |
430 | if (len > 0) { | 432 | if (len > 0) { |
433 | sch->qstats.backlog -= len; | ||
431 | sch->q.qlen--; | 434 | sch->q.qlen--; |
432 | if (cl->qdisc->q.qlen == 0) | 435 | if (cl->qdisc->q.qlen == 0) |
433 | list_del(&cl->alist); | 436 | list_del(&cl->alist); |
@@ -463,6 +466,7 @@ static void drr_reset_qdisc(struct Qdisc *sch) | |||
463 | qdisc_reset(cl->qdisc); | 466 | qdisc_reset(cl->qdisc); |
464 | } | 467 | } |
465 | } | 468 | } |
469 | sch->qstats.backlog = 0; | ||
466 | sch->q.qlen = 0; | 470 | sch->q.qlen = 0; |
467 | } | 471 | } |
468 | 472 | ||
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 6883a8971562..da250b2e06ae 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
199 | unsigned int idx, prev_backlog, prev_qlen; | 199 | unsigned int idx, prev_backlog, prev_qlen; |
200 | struct fq_codel_flow *flow; | 200 | struct fq_codel_flow *flow; |
201 | int uninitialized_var(ret); | 201 | int uninitialized_var(ret); |
202 | unsigned int pkt_len; | ||
202 | bool memory_limited; | 203 | bool memory_limited; |
203 | 204 | ||
204 | idx = fq_codel_classify(skb, sch, &ret); | 205 | idx = fq_codel_classify(skb, sch, &ret); |
@@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
230 | prev_backlog = sch->qstats.backlog; | 231 | prev_backlog = sch->qstats.backlog; |
231 | prev_qlen = sch->q.qlen; | 232 | prev_qlen = sch->q.qlen; |
232 | 233 | ||
234 | /* save this packet length as it might be dropped by fq_codel_drop() */ | ||
235 | pkt_len = qdisc_pkt_len(skb); | ||
233 | /* fq_codel_drop() is quite expensive, as it performs a linear search | 236 | /* fq_codel_drop() is quite expensive, as it performs a linear search |
234 | * in q->backlogs[] to find a fat flow. | 237 | * in q->backlogs[] to find a fat flow. |
235 | * So instead of dropping a single packet, drop half of its backlog | 238 | * So instead of dropping a single packet, drop half of its backlog |
@@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
237 | */ | 240 | */ |
238 | ret = fq_codel_drop(sch, q->drop_batch_size); | 241 | ret = fq_codel_drop(sch, q->drop_batch_size); |
239 | 242 | ||
240 | q->drop_overlimit += prev_qlen - sch->q.qlen; | 243 | prev_qlen -= sch->q.qlen; |
244 | prev_backlog -= sch->qstats.backlog; | ||
245 | q->drop_overlimit += prev_qlen; | ||
241 | if (memory_limited) | 246 | if (memory_limited) |
242 | q->drop_overmemory += prev_qlen - sch->q.qlen; | 247 | q->drop_overmemory += prev_qlen; |
243 | /* As we dropped packet(s), better let upper stack know this */ | ||
244 | qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen, | ||
245 | prev_backlog - sch->qstats.backlog); | ||
246 | 248 | ||
247 | return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS; | 249 | /* As we dropped packet(s), better let upper stack know this. |
250 | * If we dropped a packet for this flow, return NET_XMIT_CN, | ||
251 | * but in this case, our parents wont increase their backlogs. | ||
252 | */ | ||
253 | if (ret == idx) { | ||
254 | qdisc_tree_reduce_backlog(sch, prev_qlen - 1, | ||
255 | prev_backlog - pkt_len); | ||
256 | return NET_XMIT_CN; | ||
257 | } | ||
258 | qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); | ||
259 | return NET_XMIT_SUCCESS; | ||
248 | } | 260 | } |
249 | 261 | ||
250 | /* This is the specific function called from codel_dequeue() | 262 | /* This is the specific function called from codel_dequeue() |
@@ -649,7 +661,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
649 | qs.backlog = q->backlogs[idx]; | 661 | qs.backlog = q->backlogs[idx]; |
650 | qs.drops = flow->dropped; | 662 | qs.drops = flow->dropped; |
651 | } | 663 | } |
652 | if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) | 664 | if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) |
653 | return -1; | 665 | return -1; |
654 | if (idx < q->flows_cnt) | 666 | if (idx < q->flows_cnt) |
655 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 667 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 269dd71b3828..f9e0e9c03d0a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | |||
49 | { | 49 | { |
50 | q->gso_skb = skb; | 50 | q->gso_skb = skb; |
51 | q->qstats.requeues++; | 51 | q->qstats.requeues++; |
52 | qdisc_qstats_backlog_inc(q, skb); | ||
52 | q->q.qlen++; /* it's still part of the queue */ | 53 | q->q.qlen++; /* it's still part of the queue */ |
53 | __netif_schedule(q); | 54 | __netif_schedule(q); |
54 | 55 | ||
@@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, | |||
92 | txq = skb_get_tx_queue(txq->dev, skb); | 93 | txq = skb_get_tx_queue(txq->dev, skb); |
93 | if (!netif_xmit_frozen_or_stopped(txq)) { | 94 | if (!netif_xmit_frozen_or_stopped(txq)) { |
94 | q->gso_skb = NULL; | 95 | q->gso_skb = NULL; |
96 | qdisc_qstats_backlog_dec(q, skb); | ||
95 | q->q.qlen--; | 97 | q->q.qlen--; |
96 | } else | 98 | } else |
97 | skb = NULL; | 99 | skb = NULL; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index d783d7cc3348..1ac9f9f03fe3 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch) | |||
1529 | q->eligible = RB_ROOT; | 1529 | q->eligible = RB_ROOT; |
1530 | INIT_LIST_HEAD(&q->droplist); | 1530 | INIT_LIST_HEAD(&q->droplist); |
1531 | qdisc_watchdog_cancel(&q->watchdog); | 1531 | qdisc_watchdog_cancel(&q->watchdog); |
1532 | sch->qstats.backlog = 0; | ||
1532 | sch->q.qlen = 0; | 1533 | sch->q.qlen = 0; |
1533 | } | 1534 | } |
1534 | 1535 | ||
@@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) | |||
1559 | struct hfsc_sched *q = qdisc_priv(sch); | 1560 | struct hfsc_sched *q = qdisc_priv(sch); |
1560 | unsigned char *b = skb_tail_pointer(skb); | 1561 | unsigned char *b = skb_tail_pointer(skb); |
1561 | struct tc_hfsc_qopt qopt; | 1562 | struct tc_hfsc_qopt qopt; |
1562 | struct hfsc_class *cl; | ||
1563 | unsigned int i; | ||
1564 | |||
1565 | sch->qstats.backlog = 0; | ||
1566 | for (i = 0; i < q->clhash.hashsize; i++) { | ||
1567 | hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) | ||
1568 | sch->qstats.backlog += cl->qdisc->qstats.backlog; | ||
1569 | } | ||
1570 | 1563 | ||
1571 | qopt.defcls = q->defcls; | 1564 | qopt.defcls = q->defcls; |
1572 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) | 1565 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) |
@@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1604 | if (cl->qdisc->q.qlen == 1) | 1597 | if (cl->qdisc->q.qlen == 1) |
1605 | set_active(cl, qdisc_pkt_len(skb)); | 1598 | set_active(cl, qdisc_pkt_len(skb)); |
1606 | 1599 | ||
1600 | qdisc_qstats_backlog_inc(sch, skb); | ||
1607 | sch->q.qlen++; | 1601 | sch->q.qlen++; |
1608 | 1602 | ||
1609 | return NET_XMIT_SUCCESS; | 1603 | return NET_XMIT_SUCCESS; |
@@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch) | |||
1672 | 1666 | ||
1673 | qdisc_unthrottled(sch); | 1667 | qdisc_unthrottled(sch); |
1674 | qdisc_bstats_update(sch, skb); | 1668 | qdisc_bstats_update(sch, skb); |
1669 | qdisc_qstats_backlog_dec(sch, skb); | ||
1675 | sch->q.qlen--; | 1670 | sch->q.qlen--; |
1676 | 1671 | ||
1677 | return skb; | 1672 | return skb; |
@@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch) | |||
1695 | } | 1690 | } |
1696 | cl->qstats.drops++; | 1691 | cl->qstats.drops++; |
1697 | qdisc_qstats_drop(sch); | 1692 | qdisc_qstats_drop(sch); |
1693 | sch->qstats.backlog -= len; | ||
1698 | sch->q.qlen--; | 1694 | sch->q.qlen--; |
1699 | return len; | 1695 | return len; |
1700 | } | 1696 | } |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 10adbc617905..8fe6999b642a 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid) | |||
27 | return TC_H_MIN(classid) + 1; | 27 | return TC_H_MIN(classid) + 1; |
28 | } | 28 | } |
29 | 29 | ||
30 | static bool ingress_cl_offload(u32 classid) | ||
31 | { | ||
32 | return true; | ||
33 | } | ||
34 | |||
30 | static unsigned long ingress_bind_filter(struct Qdisc *sch, | 35 | static unsigned long ingress_bind_filter(struct Qdisc *sch, |
31 | unsigned long parent, u32 classid) | 36 | unsigned long parent, u32 classid) |
32 | { | 37 | { |
@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = { | |||
86 | .put = ingress_put, | 91 | .put = ingress_put, |
87 | .walk = ingress_walk, | 92 | .walk = ingress_walk, |
88 | .tcf_chain = ingress_find_tcf, | 93 | .tcf_chain = ingress_find_tcf, |
94 | .tcf_cl_offload = ingress_cl_offload, | ||
89 | .bind_tcf = ingress_bind_filter, | 95 | .bind_tcf = ingress_bind_filter, |
90 | .unbind_tcf = ingress_put, | 96 | .unbind_tcf = ingress_put, |
91 | }; | 97 | }; |
@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid) | |||
110 | } | 116 | } |
111 | } | 117 | } |
112 | 118 | ||
119 | static bool clsact_cl_offload(u32 classid) | ||
120 | { | ||
121 | return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS); | ||
122 | } | ||
123 | |||
113 | static unsigned long clsact_bind_filter(struct Qdisc *sch, | 124 | static unsigned long clsact_bind_filter(struct Qdisc *sch, |
114 | unsigned long parent, u32 classid) | 125 | unsigned long parent, u32 classid) |
115 | { | 126 | { |
@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = { | |||
158 | .put = ingress_put, | 169 | .put = ingress_put, |
159 | .walk = ingress_walk, | 170 | .walk = ingress_walk, |
160 | .tcf_chain = clsact_find_tcf, | 171 | .tcf_chain = clsact_find_tcf, |
172 | .tcf_cl_offload = clsact_cl_offload, | ||
161 | .bind_tcf = clsact_bind_filter, | 173 | .bind_tcf = clsact_bind_filter, |
162 | .unbind_tcf = ingress_put, | 174 | .unbind_tcf = ingress_put, |
163 | }; | 175 | }; |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index fee1b15506b2..4b0a82191bc4 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -85,6 +85,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
85 | 85 | ||
86 | ret = qdisc_enqueue(skb, qdisc); | 86 | ret = qdisc_enqueue(skb, qdisc); |
87 | if (ret == NET_XMIT_SUCCESS) { | 87 | if (ret == NET_XMIT_SUCCESS) { |
88 | qdisc_qstats_backlog_inc(sch, skb); | ||
88 | sch->q.qlen++; | 89 | sch->q.qlen++; |
89 | return NET_XMIT_SUCCESS; | 90 | return NET_XMIT_SUCCESS; |
90 | } | 91 | } |
@@ -117,6 +118,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch) | |||
117 | struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); | 118 | struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); |
118 | if (skb) { | 119 | if (skb) { |
119 | qdisc_bstats_update(sch, skb); | 120 | qdisc_bstats_update(sch, skb); |
121 | qdisc_qstats_backlog_dec(sch, skb); | ||
120 | sch->q.qlen--; | 122 | sch->q.qlen--; |
121 | return skb; | 123 | return skb; |
122 | } | 124 | } |
@@ -135,6 +137,7 @@ static unsigned int prio_drop(struct Qdisc *sch) | |||
135 | for (prio = q->bands-1; prio >= 0; prio--) { | 137 | for (prio = q->bands-1; prio >= 0; prio--) { |
136 | qdisc = q->queues[prio]; | 138 | qdisc = q->queues[prio]; |
137 | if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { | 139 | if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { |
140 | sch->qstats.backlog -= len; | ||
138 | sch->q.qlen--; | 141 | sch->q.qlen--; |
139 | return len; | 142 | return len; |
140 | } | 143 | } |
@@ -151,6 +154,7 @@ prio_reset(struct Qdisc *sch) | |||
151 | 154 | ||
152 | for (prio = 0; prio < q->bands; prio++) | 155 | for (prio = 0; prio < q->bands; prio++) |
153 | qdisc_reset(q->queues[prio]); | 156 | qdisc_reset(q->queues[prio]); |
157 | sch->qstats.backlog = 0; | ||
154 | sch->q.qlen = 0; | 158 | sch->q.qlen = 0; |
155 | } | 159 | } |
156 | 160 | ||
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 8d2d8d953432..f18857febdad 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -1235,8 +1235,10 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1235 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); | 1235 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); |
1236 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, | 1236 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, |
1237 | qdisc_pkt_len(skb)); | 1237 | qdisc_pkt_len(skb)); |
1238 | if (err) | 1238 | if (err) { |
1239 | return err; | 1239 | cl->qstats.drops++; |
1240 | return qdisc_drop(skb, sch); | ||
1241 | } | ||
1240 | } | 1242 | } |
1241 | 1243 | ||
1242 | err = qdisc_enqueue(skb, cl->qdisc); | 1244 | err = qdisc_enqueue(skb, cl->qdisc); |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 8c0508c0e287..91578bdd378c 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
97 | 97 | ||
98 | ret = qdisc_enqueue(skb, child); | 98 | ret = qdisc_enqueue(skb, child); |
99 | if (likely(ret == NET_XMIT_SUCCESS)) { | 99 | if (likely(ret == NET_XMIT_SUCCESS)) { |
100 | qdisc_qstats_backlog_inc(sch, skb); | ||
100 | sch->q.qlen++; | 101 | sch->q.qlen++; |
101 | } else if (net_xmit_drop_count(ret)) { | 102 | } else if (net_xmit_drop_count(ret)) { |
102 | q->stats.pdrop++; | 103 | q->stats.pdrop++; |
@@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch) | |||
118 | skb = child->dequeue(child); | 119 | skb = child->dequeue(child); |
119 | if (skb) { | 120 | if (skb) { |
120 | qdisc_bstats_update(sch, skb); | 121 | qdisc_bstats_update(sch, skb); |
122 | qdisc_qstats_backlog_dec(sch, skb); | ||
121 | sch->q.qlen--; | 123 | sch->q.qlen--; |
122 | } else { | 124 | } else { |
123 | if (!red_is_idling(&q->vars)) | 125 | if (!red_is_idling(&q->vars)) |
@@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch) | |||
143 | if (child->ops->drop && (len = child->ops->drop(child)) > 0) { | 145 | if (child->ops->drop && (len = child->ops->drop(child)) > 0) { |
144 | q->stats.other++; | 146 | q->stats.other++; |
145 | qdisc_qstats_drop(sch); | 147 | qdisc_qstats_drop(sch); |
148 | sch->qstats.backlog -= len; | ||
146 | sch->q.qlen--; | 149 | sch->q.qlen--; |
147 | return len; | 150 | return len; |
148 | } | 151 | } |
@@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch) | |||
158 | struct red_sched_data *q = qdisc_priv(sch); | 161 | struct red_sched_data *q = qdisc_priv(sch); |
159 | 162 | ||
160 | qdisc_reset(q->qdisc); | 163 | qdisc_reset(q->qdisc); |
164 | sch->qstats.backlog = 0; | ||
161 | sch->q.qlen = 0; | 165 | sch->q.qlen = 0; |
162 | red_restart(&q->vars); | 166 | red_restart(&q->vars); |
163 | } | 167 | } |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 83b90b584fae..3161e491990b 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -207,6 +207,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
207 | return ret; | 207 | return ret; |
208 | } | 208 | } |
209 | 209 | ||
210 | qdisc_qstats_backlog_inc(sch, skb); | ||
210 | sch->q.qlen++; | 211 | sch->q.qlen++; |
211 | return NET_XMIT_SUCCESS; | 212 | return NET_XMIT_SUCCESS; |
212 | } | 213 | } |
@@ -217,6 +218,7 @@ static unsigned int tbf_drop(struct Qdisc *sch) | |||
217 | unsigned int len = 0; | 218 | unsigned int len = 0; |
218 | 219 | ||
219 | if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { | 220 | if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { |
221 | sch->qstats.backlog -= len; | ||
220 | sch->q.qlen--; | 222 | sch->q.qlen--; |
221 | qdisc_qstats_drop(sch); | 223 | qdisc_qstats_drop(sch); |
222 | } | 224 | } |
@@ -263,6 +265,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) | |||
263 | q->t_c = now; | 265 | q->t_c = now; |
264 | q->tokens = toks; | 266 | q->tokens = toks; |
265 | q->ptokens = ptoks; | 267 | q->ptokens = ptoks; |
268 | qdisc_qstats_backlog_dec(sch, skb); | ||
266 | sch->q.qlen--; | 269 | sch->q.qlen--; |
267 | qdisc_unthrottled(sch); | 270 | qdisc_unthrottled(sch); |
268 | qdisc_bstats_update(sch, skb); | 271 | qdisc_bstats_update(sch, skb); |
@@ -294,6 +297,7 @@ static void tbf_reset(struct Qdisc *sch) | |||
294 | struct tbf_sched_data *q = qdisc_priv(sch); | 297 | struct tbf_sched_data *q = qdisc_priv(sch); |
295 | 298 | ||
296 | qdisc_reset(q->qdisc); | 299 | qdisc_reset(q->qdisc); |
300 | sch->qstats.backlog = 0; | ||
297 | sch->q.qlen = 0; | 301 | sch->q.qlen = 0; |
298 | q->t_c = ktime_get_ns(); | 302 | q->t_c = ktime_get_ns(); |
299 | q->tokens = q->buffer; | 303 | q->tokens = q->buffer; |