aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/sch_api.c26
-rw-r--r--net/sched/sch_cbq.c6
-rw-r--r--net/sched/sch_generic.c4
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sched/sch_htb.c8
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_teql.c2
9 files changed, 32 insertions, 24 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 5cafdd4c8018..8eb79e92e94c 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -205,7 +205,7 @@ replay:
205 } 205 }
206 } 206 }
207 207
208 root_lock = qdisc_root_lock(q); 208 root_lock = qdisc_root_sleeping_lock(q);
209 209
210 if (tp == NULL) { 210 if (tp == NULL) {
211 /* Proto-tcf does not exist, create new one */ 211 /* Proto-tcf does not exist, create new one */
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 481260a4f10f..e3d8455eebc2 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -75,7 +75,7 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
75static inline 75static inline
76void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) 76void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
77{ 77{
78 spinlock_t *root_lock = qdisc_root_lock(q); 78 spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
79 79
80 spin_lock_bh(root_lock); 80 spin_lock_bh(root_lock);
81 memset(head->fastmap, 0, sizeof(head->fastmap)); 81 memset(head->fastmap, 0, sizeof(head->fastmap));
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index e7fb9e0d21b4..1122c952aa99 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -624,7 +624,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
624 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 624 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
625 spinlock_t *root_lock; 625 spinlock_t *root_lock;
626 626
627 root_lock = qdisc_root_lock(oqdisc); 627 root_lock = qdisc_lock(oqdisc);
628 spin_lock_bh(root_lock); 628 spin_lock_bh(root_lock);
629 629
630 /* Prune old scheduler */ 630 /* Prune old scheduler */
@@ -635,7 +635,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
635 if (qdisc == NULL) 635 if (qdisc == NULL)
636 qdisc = &noop_qdisc; 636 qdisc = &noop_qdisc;
637 dev_queue->qdisc_sleeping = qdisc; 637 dev_queue->qdisc_sleeping = qdisc;
638 dev_queue->qdisc = &noop_qdisc; 638 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
639 639
640 spin_unlock_bh(root_lock); 640 spin_unlock_bh(root_lock);
641 641
@@ -830,9 +830,16 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
830 sch->stab = stab; 830 sch->stab = stab;
831 } 831 }
832 if (tca[TCA_RATE]) { 832 if (tca[TCA_RATE]) {
833 spinlock_t *root_lock;
834
835 if ((sch->parent != TC_H_ROOT) &&
836 !(sch->flags & TCQ_F_INGRESS))
837 root_lock = qdisc_root_sleeping_lock(sch);
838 else
839 root_lock = qdisc_lock(sch);
840
833 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 841 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
834 qdisc_root_lock(sch), 842 root_lock, tca[TCA_RATE]);
835 tca[TCA_RATE]);
836 if (err) { 843 if (err) {
837 /* 844 /*
838 * Any broken qdiscs that would require 845 * Any broken qdiscs that would require
@@ -884,7 +891,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
884 891
885 if (tca[TCA_RATE]) 892 if (tca[TCA_RATE])
886 gen_replace_estimator(&sch->bstats, &sch->rate_est, 893 gen_replace_estimator(&sch->bstats, &sch->rate_est,
887 qdisc_root_lock(sch), tca[TCA_RATE]); 894 qdisc_root_sleeping_lock(sch),
895 tca[TCA_RATE]);
888 return 0; 896 return 0;
889} 897}
890 898
@@ -1161,8 +1169,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1161 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0) 1169 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
1162 goto nla_put_failure; 1170 goto nla_put_failure;
1163 1171
1164 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1172 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1165 TCA_XSTATS, qdisc_root_lock(q), &d) < 0) 1173 qdisc_root_sleeping_lock(q), &d) < 0)
1166 goto nla_put_failure; 1174 goto nla_put_failure;
1167 1175
1168 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 1176 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1453,8 +1461,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1453 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) 1461 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1454 goto nla_put_failure; 1462 goto nla_put_failure;
1455 1463
1456 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1464 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1457 TCA_XSTATS, qdisc_root_lock(q), &d) < 0) 1465 qdisc_root_sleeping_lock(q), &d) < 0)
1458 goto nla_put_failure; 1466 goto nla_put_failure;
1459 1467
1460 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1468 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 8fa90d68ec6d..8b06fa900482 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1754,7 +1754,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1754 1754
1755 if (--cl->refcnt == 0) { 1755 if (--cl->refcnt == 0) {
1756#ifdef CONFIG_NET_CLS_ACT 1756#ifdef CONFIG_NET_CLS_ACT
1757 spinlock_t *root_lock = qdisc_root_lock(sch); 1757 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1758 struct cbq_sched_data *q = qdisc_priv(sch); 1758 struct cbq_sched_data *q = qdisc_priv(sch);
1759 1759
1760 spin_lock_bh(root_lock); 1760 spin_lock_bh(root_lock);
@@ -1839,7 +1839,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1839 1839
1840 if (tca[TCA_RATE]) 1840 if (tca[TCA_RATE])
1841 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1841 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1842 qdisc_root_lock(sch), 1842 qdisc_root_sleeping_lock(sch),
1843 tca[TCA_RATE]); 1843 tca[TCA_RATE]);
1844 return 0; 1844 return 0;
1845 } 1845 }
@@ -1930,7 +1930,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1930 1930
1931 if (tca[TCA_RATE]) 1931 if (tca[TCA_RATE])
1932 gen_new_estimator(&cl->bstats, &cl->rate_est, 1932 gen_new_estimator(&cl->bstats, &cl->rate_est,
1933 qdisc_root_lock(sch), tca[TCA_RATE]); 1933 qdisc_root_sleeping_lock(sch), tca[TCA_RATE]);
1934 1934
1935 *arg = (unsigned long)cl; 1935 *arg = (unsigned long)cl;
1936 return 0; 1936 return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5f0ade7806a7..9634091ee2f0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -634,7 +634,7 @@ static void dev_deactivate_queue(struct net_device *dev,
634 if (!(qdisc->flags & TCQ_F_BUILTIN)) 634 if (!(qdisc->flags & TCQ_F_BUILTIN))
635 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 635 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
636 636
637 dev_queue->qdisc = qdisc_default; 637 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
638 qdisc_reset(qdisc); 638 qdisc_reset(qdisc);
639 639
640 spin_unlock_bh(qdisc_lock(qdisc)); 640 spin_unlock_bh(qdisc_lock(qdisc));
@@ -709,7 +709,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
709 struct Qdisc *qdisc_default = _qdisc_default; 709 struct Qdisc *qdisc_default = _qdisc_default;
710 710
711 if (qdisc) { 711 if (qdisc) {
712 dev_queue->qdisc = qdisc_default; 712 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
713 dev_queue->qdisc_sleeping = qdisc_default; 713 dev_queue->qdisc_sleeping = qdisc_default;
714 714
715 qdisc_destroy(qdisc); 715 qdisc_destroy(qdisc);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index c2b8d9cce3d2..c1e77da8cd09 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1045 1045
1046 if (tca[TCA_RATE]) 1046 if (tca[TCA_RATE])
1047 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1048 qdisc_root_lock(sch), 1048 qdisc_root_sleeping_lock(sch),
1049 tca[TCA_RATE]); 1049 tca[TCA_RATE]);
1050 return 0; 1050 return 0;
1051 } 1051 }
@@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1104 1104
1105 if (tca[TCA_RATE]) 1105 if (tca[TCA_RATE])
1106 gen_new_estimator(&cl->bstats, &cl->rate_est, 1106 gen_new_estimator(&cl->bstats, &cl->rate_est,
1107 qdisc_root_lock(sch), tca[TCA_RATE]); 1107 qdisc_root_sleeping_lock(sch), tca[TCA_RATE]);
1108 *arg = (unsigned long)cl; 1108 *arg = (unsigned long)cl;
1109 return 0; 1109 return 0;
1110} 1110}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0df0df202ed0..d14f02056ae6 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1043,7 +1043,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1043 1043
1044static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1044static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1045{ 1045{
1046 spinlock_t *root_lock = qdisc_root_lock(sch); 1046 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1047 struct htb_sched *q = qdisc_priv(sch); 1047 struct htb_sched *q = qdisc_priv(sch);
1048 struct nlattr *nest; 1048 struct nlattr *nest;
1049 struct tc_htb_glob gopt; 1049 struct tc_htb_glob gopt;
@@ -1075,7 +1075,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1075 struct sk_buff *skb, struct tcmsg *tcm) 1075 struct sk_buff *skb, struct tcmsg *tcm)
1076{ 1076{
1077 struct htb_class *cl = (struct htb_class *)arg; 1077 struct htb_class *cl = (struct htb_class *)arg;
1078 spinlock_t *root_lock = qdisc_root_lock(sch); 1078 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1079 struct nlattr *nest; 1079 struct nlattr *nest;
1080 struct tc_htb_opt opt; 1080 struct tc_htb_opt opt;
1081 1081
@@ -1372,7 +1372,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1372 goto failure; 1372 goto failure;
1373 1373
1374 gen_new_estimator(&cl->bstats, &cl->rate_est, 1374 gen_new_estimator(&cl->bstats, &cl->rate_est,
1375 qdisc_root_lock(sch), 1375 qdisc_root_sleeping_lock(sch),
1376 tca[TCA_RATE] ? : &est.nla); 1376 tca[TCA_RATE] ? : &est.nla);
1377 cl->refcnt = 1; 1377 cl->refcnt = 1;
1378 cl->children = 0; 1378 cl->children = 0;
@@ -1427,7 +1427,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1427 } else { 1427 } else {
1428 if (tca[TCA_RATE]) 1428 if (tca[TCA_RATE])
1429 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1429 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1430 qdisc_root_lock(sch), 1430 qdisc_root_sleeping_lock(sch),
1431 tca[TCA_RATE]); 1431 tca[TCA_RATE]);
1432 sch_tree_lock(sch); 1432 sch_tree_lock(sch);
1433 } 1433 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index fb0294d0b55e..3781e55046d0 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -341,7 +341,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
341 for (i = 0; i < n; i++) 341 for (i = 0; i < n; i++)
342 d->table[i] = data[i]; 342 d->table[i] = data[i];
343 343
344 root_lock = qdisc_root_lock(sch); 344 root_lock = qdisc_root_sleeping_lock(sch);
345 345
346 spin_lock_bh(root_lock); 346 spin_lock_bh(root_lock);
347 d = xchg(&q->delay_dist, d); 347 d = xchg(&q->delay_dist, d);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 2c35c678563b..d35ef059abb1 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -161,7 +161,7 @@ teql_destroy(struct Qdisc* sch)
161 txq = netdev_get_tx_queue(master->dev, 0); 161 txq = netdev_get_tx_queue(master->dev, 0);
162 master->slaves = NULL; 162 master->slaves = NULL;
163 163
164 root_lock = qdisc_root_lock(txq->qdisc); 164 root_lock = qdisc_root_sleeping_lock(txq->qdisc);
165 spin_lock_bh(root_lock); 165 spin_lock_bh(root_lock);
166 qdisc_reset(txq->qdisc); 166 qdisc_reset(txq->qdisc);
167 spin_unlock_bh(root_lock); 167 spin_unlock_bh(root_lock);