diff options
Diffstat (limited to 'net/sched/sch_api.c')
-rw-r--r-- | net/sched/sch_api.c | 65 |
1 files changed, 47 insertions, 18 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 58bed7599db7..2cf61b3e633c 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -578,31 +578,34 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) | |||
578 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, | 578 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, |
579 | timer); | 579 | timer); |
580 | 580 | ||
581 | rcu_read_lock(); | ||
581 | qdisc_unthrottled(wd->qdisc); | 582 | qdisc_unthrottled(wd->qdisc); |
582 | __netif_schedule(qdisc_root(wd->qdisc)); | 583 | __netif_schedule(qdisc_root(wd->qdisc)); |
584 | rcu_read_unlock(); | ||
583 | 585 | ||
584 | return HRTIMER_NORESTART; | 586 | return HRTIMER_NORESTART; |
585 | } | 587 | } |
586 | 588 | ||
587 | void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) | 589 | void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) |
588 | { | 590 | { |
589 | hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 591 | hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); |
590 | wd->timer.function = qdisc_watchdog; | 592 | wd->timer.function = qdisc_watchdog; |
591 | wd->qdisc = qdisc; | 593 | wd->qdisc = qdisc; |
592 | } | 594 | } |
593 | EXPORT_SYMBOL(qdisc_watchdog_init); | 595 | EXPORT_SYMBOL(qdisc_watchdog_init); |
594 | 596 | ||
595 | void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires) | 597 | void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle) |
596 | { | 598 | { |
597 | if (test_bit(__QDISC_STATE_DEACTIVATED, | 599 | if (test_bit(__QDISC_STATE_DEACTIVATED, |
598 | &qdisc_root_sleeping(wd->qdisc)->state)) | 600 | &qdisc_root_sleeping(wd->qdisc)->state)) |
599 | return; | 601 | return; |
600 | 602 | ||
601 | qdisc_throttled(wd->qdisc); | 603 | if (throttle) |
604 | qdisc_throttled(wd->qdisc); | ||
602 | 605 | ||
603 | hrtimer_start(&wd->timer, | 606 | hrtimer_start(&wd->timer, |
604 | ns_to_ktime(expires), | 607 | ns_to_ktime(expires), |
605 | HRTIMER_MODE_ABS); | 608 | HRTIMER_MODE_ABS_PINNED); |
606 | } | 609 | } |
607 | EXPORT_SYMBOL(qdisc_watchdog_schedule_ns); | 610 | EXPORT_SYMBOL(qdisc_watchdog_schedule_ns); |
608 | 611 | ||
@@ -763,7 +766,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
763 | cops->put(sch, cl); | 766 | cops->put(sch, cl); |
764 | } | 767 | } |
765 | sch->q.qlen -= n; | 768 | sch->q.qlen -= n; |
766 | sch->qstats.drops += drops; | 769 | __qdisc_qstats_drop(sch, drops); |
767 | } | 770 | } |
768 | } | 771 | } |
769 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); | 772 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); |
@@ -942,6 +945,17 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
942 | sch->handle = handle; | 945 | sch->handle = handle; |
943 | 946 | ||
944 | if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { | 947 | if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { |
948 | if (qdisc_is_percpu_stats(sch)) { | ||
949 | sch->cpu_bstats = | ||
950 | alloc_percpu(struct gnet_stats_basic_cpu); | ||
951 | if (!sch->cpu_bstats) | ||
952 | goto err_out4; | ||
953 | |||
954 | sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); | ||
955 | if (!sch->cpu_qstats) | ||
956 | goto err_out4; | ||
957 | } | ||
958 | |||
945 | if (tca[TCA_STAB]) { | 959 | if (tca[TCA_STAB]) { |
946 | stab = qdisc_get_stab(tca[TCA_STAB]); | 960 | stab = qdisc_get_stab(tca[TCA_STAB]); |
947 | if (IS_ERR(stab)) { | 961 | if (IS_ERR(stab)) { |
@@ -964,8 +978,11 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
964 | else | 978 | else |
965 | root_lock = qdisc_lock(sch); | 979 | root_lock = qdisc_lock(sch); |
966 | 980 | ||
967 | err = gen_new_estimator(&sch->bstats, &sch->rate_est, | 981 | err = gen_new_estimator(&sch->bstats, |
968 | root_lock, tca[TCA_RATE]); | 982 | sch->cpu_bstats, |
983 | &sch->rate_est, | ||
984 | root_lock, | ||
985 | tca[TCA_RATE]); | ||
969 | if (err) | 986 | if (err) |
970 | goto err_out4; | 987 | goto err_out4; |
971 | } | 988 | } |
@@ -984,6 +1001,8 @@ err_out: | |||
984 | return NULL; | 1001 | return NULL; |
985 | 1002 | ||
986 | err_out4: | 1003 | err_out4: |
1004 | free_percpu(sch->cpu_bstats); | ||
1005 | free_percpu(sch->cpu_qstats); | ||
987 | /* | 1006 | /* |
988 | * Any broken qdiscs that would require a ops->reset() here? | 1007 | * Any broken qdiscs that would require a ops->reset() here? |
989 | * The qdisc was never in action so it shouldn't be necessary. | 1008 | * The qdisc was never in action so it shouldn't be necessary. |
@@ -1022,9 +1041,11 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) | |||
1022 | because change can't be undone. */ | 1041 | because change can't be undone. */ |
1023 | if (sch->flags & TCQ_F_MQROOT) | 1042 | if (sch->flags & TCQ_F_MQROOT) |
1024 | goto out; | 1043 | goto out; |
1025 | gen_replace_estimator(&sch->bstats, &sch->rate_est, | 1044 | gen_replace_estimator(&sch->bstats, |
1026 | qdisc_root_sleeping_lock(sch), | 1045 | sch->cpu_bstats, |
1027 | tca[TCA_RATE]); | 1046 | &sch->rate_est, |
1047 | qdisc_root_sleeping_lock(sch), | ||
1048 | tca[TCA_RATE]); | ||
1028 | } | 1049 | } |
1029 | out: | 1050 | out: |
1030 | return 0; | 1051 | return 0; |
@@ -1299,11 +1320,14 @@ graft: | |||
1299 | static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | 1320 | static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, |
1300 | u32 portid, u32 seq, u16 flags, int event) | 1321 | u32 portid, u32 seq, u16 flags, int event) |
1301 | { | 1322 | { |
1323 | struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL; | ||
1324 | struct gnet_stats_queue __percpu *cpu_qstats = NULL; | ||
1302 | struct tcmsg *tcm; | 1325 | struct tcmsg *tcm; |
1303 | struct nlmsghdr *nlh; | 1326 | struct nlmsghdr *nlh; |
1304 | unsigned char *b = skb_tail_pointer(skb); | 1327 | unsigned char *b = skb_tail_pointer(skb); |
1305 | struct gnet_dump d; | 1328 | struct gnet_dump d; |
1306 | struct qdisc_size_table *stab; | 1329 | struct qdisc_size_table *stab; |
1330 | __u32 qlen; | ||
1307 | 1331 | ||
1308 | cond_resched(); | 1332 | cond_resched(); |
1309 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); | 1333 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); |
@@ -1321,7 +1345,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | |||
1321 | goto nla_put_failure; | 1345 | goto nla_put_failure; |
1322 | if (q->ops->dump && q->ops->dump(q, skb) < 0) | 1346 | if (q->ops->dump && q->ops->dump(q, skb) < 0) |
1323 | goto nla_put_failure; | 1347 | goto nla_put_failure; |
1324 | q->qstats.qlen = q->q.qlen; | 1348 | qlen = q->q.qlen; |
1325 | 1349 | ||
1326 | stab = rtnl_dereference(q->stab); | 1350 | stab = rtnl_dereference(q->stab); |
1327 | if (stab && qdisc_dump_stab(skb, stab) < 0) | 1351 | if (stab && qdisc_dump_stab(skb, stab) < 0) |
@@ -1334,9 +1358,14 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | |||
1334 | if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) | 1358 | if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) |
1335 | goto nla_put_failure; | 1359 | goto nla_put_failure; |
1336 | 1360 | ||
1337 | if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || | 1361 | if (qdisc_is_percpu_stats(q)) { |
1362 | cpu_bstats = q->cpu_bstats; | ||
1363 | cpu_qstats = q->cpu_qstats; | ||
1364 | } | ||
1365 | |||
1366 | if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 || | ||
1338 | gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || | 1367 | gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || |
1339 | gnet_stats_copy_queue(&d, &q->qstats) < 0) | 1368 | gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) |
1340 | goto nla_put_failure; | 1369 | goto nla_put_failure; |
1341 | 1370 | ||
1342 | if (gnet_stats_finish_copy(&d) < 0) | 1371 | if (gnet_stats_finish_copy(&d) < 0) |
@@ -1781,7 +1810,7 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp, | |||
1781 | __be16 protocol = skb->protocol; | 1810 | __be16 protocol = skb->protocol; |
1782 | int err; | 1811 | int err; |
1783 | 1812 | ||
1784 | for (; tp; tp = tp->next) { | 1813 | for (; tp; tp = rcu_dereference_bh(tp->next)) { |
1785 | if (tp->protocol != protocol && | 1814 | if (tp->protocol != protocol && |
1786 | tp->protocol != htons(ETH_P_ALL)) | 1815 | tp->protocol != htons(ETH_P_ALL)) |
1787 | continue; | 1816 | continue; |
@@ -1833,15 +1862,15 @@ void tcf_destroy(struct tcf_proto *tp) | |||
1833 | { | 1862 | { |
1834 | tp->ops->destroy(tp); | 1863 | tp->ops->destroy(tp); |
1835 | module_put(tp->ops->owner); | 1864 | module_put(tp->ops->owner); |
1836 | kfree(tp); | 1865 | kfree_rcu(tp, rcu); |
1837 | } | 1866 | } |
1838 | 1867 | ||
1839 | void tcf_destroy_chain(struct tcf_proto **fl) | 1868 | void tcf_destroy_chain(struct tcf_proto __rcu **fl) |
1840 | { | 1869 | { |
1841 | struct tcf_proto *tp; | 1870 | struct tcf_proto *tp; |
1842 | 1871 | ||
1843 | while ((tp = *fl) != NULL) { | 1872 | while ((tp = rtnl_dereference(*fl)) != NULL) { |
1844 | *fl = tp->next; | 1873 | RCU_INIT_POINTER(*fl, tp->next); |
1845 | tcf_destroy(tp); | 1874 | tcf_destroy(tp); |
1846 | } | 1875 | } |
1847 | } | 1876 | } |