aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_htb.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r--net/sched/sch_htb.c48
1 files changed, 27 insertions, 21 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 9f949abcacef..f1acb0f60dc3 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -103,7 +103,7 @@ struct htb_class {
103 u32 prio; /* these two are used only by leaves... */ 103 u32 prio; /* these two are used only by leaves... */
104 int quantum; /* but stored for parent-to-leaf return */ 104 int quantum; /* but stored for parent-to-leaf return */
105 105
106 struct tcf_proto *filter_list; /* class attached filters */ 106 struct tcf_proto __rcu *filter_list; /* class attached filters */
107 int filter_cnt; 107 int filter_cnt;
108 int refcnt; /* usage count of this class */ 108 int refcnt; /* usage count of this class */
109 109
@@ -153,7 +153,7 @@ struct htb_sched {
153 int rate2quantum; /* quant = rate / rate2quantum */ 153 int rate2quantum; /* quant = rate / rate2quantum */
154 154
155 /* filters for qdisc itself */ 155 /* filters for qdisc itself */
156 struct tcf_proto *filter_list; 156 struct tcf_proto __rcu *filter_list;
157 157
158#define HTB_WARN_TOOMANYEVENTS 0x1 158#define HTB_WARN_TOOMANYEVENTS 0x1
159 unsigned int warned; /* only one warning */ 159 unsigned int warned; /* only one warning */
@@ -223,9 +223,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
223 if (cl->level == 0) 223 if (cl->level == 0)
224 return cl; 224 return cl;
225 /* Start with inner filter chain if a non-leaf class is selected */ 225 /* Start with inner filter chain if a non-leaf class is selected */
226 tcf = cl->filter_list; 226 tcf = rcu_dereference_bh(cl->filter_list);
227 } else { 227 } else {
228 tcf = q->filter_list; 228 tcf = rcu_dereference_bh(q->filter_list);
229 } 229 }
230 230
231 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 231 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@@ -251,7 +251,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
251 return cl; /* we hit leaf; return it */ 251 return cl; /* we hit leaf; return it */
252 252
253 /* we have got inner class; apply inner filter chain */ 253 /* we have got inner class; apply inner filter chain */
254 tcf = cl->filter_list; 254 tcf = rcu_dereference_bh(cl->filter_list);
255 } 255 }
256 /* classification failed; try to use default class */ 256 /* classification failed; try to use default class */
257 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); 257 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
@@ -586,13 +586,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
586#ifdef CONFIG_NET_CLS_ACT 586#ifdef CONFIG_NET_CLS_ACT
587 } else if (!cl) { 587 } else if (!cl) {
588 if (ret & __NET_XMIT_BYPASS) 588 if (ret & __NET_XMIT_BYPASS)
589 sch->qstats.drops++; 589 qdisc_qstats_drop(sch);
590 kfree_skb(skb); 590 kfree_skb(skb);
591 return ret; 591 return ret;
592#endif 592#endif
593 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { 593 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
594 if (net_xmit_drop_count(ret)) { 594 if (net_xmit_drop_count(ret)) {
595 sch->qstats.drops++; 595 qdisc_qstats_drop(sch);
596 cl->qstats.drops++; 596 cl->qstats.drops++;
597 } 597 }
598 return ret; 598 return ret;
@@ -895,7 +895,7 @@ ok:
895 895
896 if (!sch->q.qlen) 896 if (!sch->q.qlen)
897 goto fin; 897 goto fin;
898 q->now = ktime_to_ns(ktime_get()); 898 q->now = ktime_get_ns();
899 start_at = jiffies; 899 start_at = jiffies;
900 900
901 next_event = q->now + 5LLU * NSEC_PER_SEC; 901 next_event = q->now + 5LLU * NSEC_PER_SEC;
@@ -925,14 +925,14 @@ ok:
925 goto ok; 925 goto ok;
926 } 926 }
927 } 927 }
928 sch->qstats.overlimits++; 928 qdisc_qstats_overlimit(sch);
929 if (likely(next_event > q->now)) { 929 if (likely(next_event > q->now)) {
930 if (!test_bit(__QDISC_STATE_DEACTIVATED, 930 if (!test_bit(__QDISC_STATE_DEACTIVATED,
931 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { 931 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
932 ktime_t time = ns_to_ktime(next_event); 932 ktime_t time = ns_to_ktime(next_event);
933 qdisc_throttled(q->watchdog.qdisc); 933 qdisc_throttled(q->watchdog.qdisc);
934 hrtimer_start(&q->watchdog.timer, time, 934 hrtimer_start(&q->watchdog.timer, time,
935 HRTIMER_MODE_ABS); 935 HRTIMER_MODE_ABS_PINNED);
936 } 936 }
937 } else { 937 } else {
938 schedule_work(&q->work); 938 schedule_work(&q->work);
@@ -1044,7 +1044,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1044 1044
1045 qdisc_watchdog_init(&q->watchdog, sch); 1045 qdisc_watchdog_init(&q->watchdog, sch);
1046 INIT_WORK(&q->work, htb_work_func); 1046 INIT_WORK(&q->work, htb_work_func);
1047 skb_queue_head_init(&q->direct_queue); 1047 __skb_queue_head_init(&q->direct_queue);
1048 1048
1049 if (tb[TCA_HTB_DIRECT_QLEN]) 1049 if (tb[TCA_HTB_DIRECT_QLEN])
1050 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); 1050 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
@@ -1138,15 +1138,16 @@ static int
1138htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) 1138htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1139{ 1139{
1140 struct htb_class *cl = (struct htb_class *)arg; 1140 struct htb_class *cl = (struct htb_class *)arg;
1141 __u32 qlen = 0;
1141 1142
1142 if (!cl->level && cl->un.leaf.q) 1143 if (!cl->level && cl->un.leaf.q)
1143 cl->qstats.qlen = cl->un.leaf.q->q.qlen; 1144 qlen = cl->un.leaf.q->q.qlen;
1144 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); 1145 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
1145 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); 1146 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
1146 1147
1147 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1148 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
1148 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || 1149 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1149 gnet_stats_copy_queue(d, &cl->qstats) < 0) 1150 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1150 return -1; 1151 return -1;
1151 1152
1152 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); 1153 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1225,7 +1226,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1225 parent->un.leaf.q = new_q ? new_q : &noop_qdisc; 1226 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1226 parent->tokens = parent->buffer; 1227 parent->tokens = parent->buffer;
1227 parent->ctokens = parent->cbuffer; 1228 parent->ctokens = parent->cbuffer;
1228 parent->t_c = ktime_to_ns(ktime_get()); 1229 parent->t_c = ktime_get_ns();
1229 parent->cmode = HTB_CAN_SEND; 1230 parent->cmode = HTB_CAN_SEND;
1230} 1231}
1231 1232
@@ -1402,7 +1403,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1402 goto failure; 1403 goto failure;
1403 1404
1404 if (htb_rate_est || tca[TCA_RATE]) { 1405 if (htb_rate_est || tca[TCA_RATE]) {
1405 err = gen_new_estimator(&cl->bstats, &cl->rate_est, 1406 err = gen_new_estimator(&cl->bstats, NULL,
1407 &cl->rate_est,
1406 qdisc_root_sleeping_lock(sch), 1408 qdisc_root_sleeping_lock(sch),
1407 tca[TCA_RATE] ? : &est.nla); 1409 tca[TCA_RATE] ? : &est.nla);
1408 if (err) { 1410 if (err) {
@@ -1455,7 +1457,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1455 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); 1457 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1456 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); 1458 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1457 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ 1459 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
1458 cl->t_c = ktime_to_ns(ktime_get()); 1460 cl->t_c = ktime_get_ns();
1459 cl->cmode = HTB_CAN_SEND; 1461 cl->cmode = HTB_CAN_SEND;
1460 1462
1461 /* attach to the hash list and parent's family */ 1463 /* attach to the hash list and parent's family */
@@ -1464,8 +1466,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1464 parent->children++; 1466 parent->children++;
1465 } else { 1467 } else {
1466 if (tca[TCA_RATE]) { 1468 if (tca[TCA_RATE]) {
1467 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 1469 spinlock_t *lock = qdisc_root_sleeping_lock(sch);
1468 qdisc_root_sleeping_lock(sch), 1470
1471 err = gen_replace_estimator(&cl->bstats, NULL,
1472 &cl->rate_est,
1473 lock,
1469 tca[TCA_RATE]); 1474 tca[TCA_RATE]);
1470 if (err) 1475 if (err)
1471 return err; 1476 return err;
@@ -1519,11 +1524,12 @@ failure:
1519 return err; 1524 return err;
1520} 1525}
1521 1526
1522static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg) 1527static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch,
1528 unsigned long arg)
1523{ 1529{
1524 struct htb_sched *q = qdisc_priv(sch); 1530 struct htb_sched *q = qdisc_priv(sch);
1525 struct htb_class *cl = (struct htb_class *)arg; 1531 struct htb_class *cl = (struct htb_class *)arg;
1526 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list; 1532 struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list;
1527 1533
1528 return fl; 1534 return fl;
1529} 1535}