diff options
author | John Fastabend <john.fastabend@gmail.com> | 2014-09-12 23:04:52 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-13 12:30:25 -0400 |
commit | 46e5da40aec256155cfedee96dd21a75da941f2c (patch) | |
tree | cc3986c52025d252c2a063053692595e60c80e13 /net | |
parent | d1015645dd535bbf10e52a3ef6d02ee0c3e0b267 (diff) |
net: qdisc: use rcu prefix and silence sparse warnings
Add __rcu notation to qdisc handling by doing this we can make
smatch output more legible. And anyways some of the cases should
be using rcu_dereference() see qdisc_all_tx_empty(),
qdisc_tx_chainging(), and so on.
Also *wake_queue() API is commonly called from driver timer routines
without rcu lock or rtnl lock. So I added rcu_read_lock() blocks
around netif_wake_subqueue and netif_tx_wake_queue.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 51 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 4 | ||||
-rw-r--r-- | net/sched/sch_mqprio.c | 6 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 13 |
4 files changed, 63 insertions, 11 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 3c6a967e5830..b3d6dbc0c696 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2177,6 +2177,53 @@ static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) | |||
2177 | return (struct dev_kfree_skb_cb *)skb->cb; | 2177 | return (struct dev_kfree_skb_cb *)skb->cb; |
2178 | } | 2178 | } |
2179 | 2179 | ||
2180 | void netif_schedule_queue(struct netdev_queue *txq) | ||
2181 | { | ||
2182 | rcu_read_lock(); | ||
2183 | if (!(txq->state & QUEUE_STATE_ANY_XOFF)) { | ||
2184 | struct Qdisc *q = rcu_dereference(txq->qdisc); | ||
2185 | |||
2186 | __netif_schedule(q); | ||
2187 | } | ||
2188 | rcu_read_unlock(); | ||
2189 | } | ||
2190 | EXPORT_SYMBOL(netif_schedule_queue); | ||
2191 | |||
2192 | /** | ||
2193 | * netif_wake_subqueue - allow sending packets on subqueue | ||
2194 | * @dev: network device | ||
2195 | * @queue_index: sub queue index | ||
2196 | * | ||
2197 | * Resume individual transmit queue of a device with multiple transmit queues. | ||
2198 | */ | ||
2199 | void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | ||
2200 | { | ||
2201 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | ||
2202 | |||
2203 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) { | ||
2204 | struct Qdisc *q; | ||
2205 | |||
2206 | rcu_read_lock(); | ||
2207 | q = rcu_dereference(txq->qdisc); | ||
2208 | __netif_schedule(q); | ||
2209 | rcu_read_unlock(); | ||
2210 | } | ||
2211 | } | ||
2212 | EXPORT_SYMBOL(netif_wake_subqueue); | ||
2213 | |||
2214 | void netif_tx_wake_queue(struct netdev_queue *dev_queue) | ||
2215 | { | ||
2216 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { | ||
2217 | struct Qdisc *q; | ||
2218 | |||
2219 | rcu_read_lock(); | ||
2220 | q = rcu_dereference(dev_queue->qdisc); | ||
2221 | __netif_schedule(q); | ||
2222 | rcu_read_unlock(); | ||
2223 | } | ||
2224 | } | ||
2225 | EXPORT_SYMBOL(netif_tx_wake_queue); | ||
2226 | |||
2180 | void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) | 2227 | void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) |
2181 | { | 2228 | { |
2182 | unsigned long flags; | 2229 | unsigned long flags; |
@@ -3432,7 +3479,7 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) | |||
3432 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); | 3479 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); |
3433 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); | 3480 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); |
3434 | 3481 | ||
3435 | q = rxq->qdisc; | 3482 | q = rcu_dereference(rxq->qdisc); |
3436 | if (q != &noop_qdisc) { | 3483 | if (q != &noop_qdisc) { |
3437 | spin_lock(qdisc_lock(q)); | 3484 | spin_lock(qdisc_lock(q)); |
3438 | if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) | 3485 | if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) |
@@ -3449,7 +3496,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
3449 | { | 3496 | { |
3450 | struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); | 3497 | struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); |
3451 | 3498 | ||
3452 | if (!rxq || rxq->qdisc == &noop_qdisc) | 3499 | if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc) |
3453 | goto out; | 3500 | goto out; |
3454 | 3501 | ||
3455 | if (*pt_prev) { | 3502 | if (*pt_prev) { |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 19696ebe9ebc..346ef85617d3 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -783,7 +783,7 @@ static void dev_deactivate_queue(struct net_device *dev, | |||
783 | struct Qdisc *qdisc_default = _qdisc_default; | 783 | struct Qdisc *qdisc_default = _qdisc_default; |
784 | struct Qdisc *qdisc; | 784 | struct Qdisc *qdisc; |
785 | 785 | ||
786 | qdisc = dev_queue->qdisc; | 786 | qdisc = rtnl_dereference(dev_queue->qdisc); |
787 | if (qdisc) { | 787 | if (qdisc) { |
788 | spin_lock_bh(qdisc_lock(qdisc)); | 788 | spin_lock_bh(qdisc_lock(qdisc)); |
789 | 789 | ||
@@ -876,7 +876,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, | |||
876 | { | 876 | { |
877 | struct Qdisc *qdisc = _qdisc; | 877 | struct Qdisc *qdisc = _qdisc; |
878 | 878 | ||
879 | dev_queue->qdisc = qdisc; | 879 | rcu_assign_pointer(dev_queue->qdisc, qdisc); |
880 | dev_queue->qdisc_sleeping = qdisc; | 880 | dev_queue->qdisc_sleeping = qdisc; |
881 | } | 881 | } |
882 | 882 | ||
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 6749e2f540d0..37e7d25d21f1 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c | |||
@@ -231,7 +231,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
231 | memset(&sch->qstats, 0, sizeof(sch->qstats)); | 231 | memset(&sch->qstats, 0, sizeof(sch->qstats)); |
232 | 232 | ||
233 | for (i = 0; i < dev->num_tx_queues; i++) { | 233 | for (i = 0; i < dev->num_tx_queues; i++) { |
234 | qdisc = netdev_get_tx_queue(dev, i)->qdisc; | 234 | qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); |
235 | spin_lock_bh(qdisc_lock(qdisc)); | 235 | spin_lock_bh(qdisc_lock(qdisc)); |
236 | sch->q.qlen += qdisc->q.qlen; | 236 | sch->q.qlen += qdisc->q.qlen; |
237 | sch->bstats.bytes += qdisc->bstats.bytes; | 237 | sch->bstats.bytes += qdisc->bstats.bytes; |
@@ -340,7 +340,9 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
340 | spin_unlock_bh(d->lock); | 340 | spin_unlock_bh(d->lock); |
341 | 341 | ||
342 | for (i = tc.offset; i < tc.offset + tc.count; i++) { | 342 | for (i = tc.offset; i < tc.offset + tc.count; i++) { |
343 | qdisc = netdev_get_tx_queue(dev, i)->qdisc; | 343 | struct netdev_queue *q = netdev_get_tx_queue(dev, i); |
344 | |||
345 | qdisc = rtnl_dereference(q->qdisc); | ||
344 | spin_lock_bh(qdisc_lock(qdisc)); | 346 | spin_lock_bh(qdisc_lock(qdisc)); |
345 | bstats.bytes += qdisc->bstats.bytes; | 347 | bstats.bytes += qdisc->bstats.bytes; |
346 | bstats.packets += qdisc->bstats.packets; | 348 | bstats.packets += qdisc->bstats.packets; |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index aaa8d03ed054..5cd291bd00e4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -96,11 +96,14 @@ teql_dequeue(struct Qdisc *sch) | |||
96 | struct teql_sched_data *dat = qdisc_priv(sch); | 96 | struct teql_sched_data *dat = qdisc_priv(sch); |
97 | struct netdev_queue *dat_queue; | 97 | struct netdev_queue *dat_queue; |
98 | struct sk_buff *skb; | 98 | struct sk_buff *skb; |
99 | struct Qdisc *q; | ||
99 | 100 | ||
100 | skb = __skb_dequeue(&dat->q); | 101 | skb = __skb_dequeue(&dat->q); |
101 | dat_queue = netdev_get_tx_queue(dat->m->dev, 0); | 102 | dat_queue = netdev_get_tx_queue(dat->m->dev, 0); |
103 | q = rcu_dereference_bh(dat_queue->qdisc); | ||
104 | |||
102 | if (skb == NULL) { | 105 | if (skb == NULL) { |
103 | struct net_device *m = qdisc_dev(dat_queue->qdisc); | 106 | struct net_device *m = qdisc_dev(q); |
104 | if (m) { | 107 | if (m) { |
105 | dat->m->slaves = sch; | 108 | dat->m->slaves = sch; |
106 | netif_wake_queue(m); | 109 | netif_wake_queue(m); |
@@ -108,7 +111,7 @@ teql_dequeue(struct Qdisc *sch) | |||
108 | } else { | 111 | } else { |
109 | qdisc_bstats_update(sch, skb); | 112 | qdisc_bstats_update(sch, skb); |
110 | } | 113 | } |
111 | sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; | 114 | sch->q.qlen = dat->q.qlen + q->q.qlen; |
112 | return skb; | 115 | return skb; |
113 | } | 116 | } |
114 | 117 | ||
@@ -157,9 +160,9 @@ teql_destroy(struct Qdisc *sch) | |||
157 | txq = netdev_get_tx_queue(master->dev, 0); | 160 | txq = netdev_get_tx_queue(master->dev, 0); |
158 | master->slaves = NULL; | 161 | master->slaves = NULL; |
159 | 162 | ||
160 | root_lock = qdisc_root_sleeping_lock(txq->qdisc); | 163 | root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); |
161 | spin_lock_bh(root_lock); | 164 | spin_lock_bh(root_lock); |
162 | qdisc_reset(txq->qdisc); | 165 | qdisc_reset(rtnl_dereference(txq->qdisc)); |
163 | spin_unlock_bh(root_lock); | 166 | spin_unlock_bh(root_lock); |
164 | } | 167 | } |
165 | } | 168 | } |
@@ -266,7 +269,7 @@ static inline int teql_resolve(struct sk_buff *skb, | |||
266 | struct dst_entry *dst = skb_dst(skb); | 269 | struct dst_entry *dst = skb_dst(skb); |
267 | int res; | 270 | int res; |
268 | 271 | ||
269 | if (txq->qdisc == &noop_qdisc) | 272 | if (rcu_access_pointer(txq->qdisc) == &noop_qdisc) |
270 | return -ENODEV; | 273 | return -ENODEV; |
271 | 274 | ||
272 | if (!dev->header_ops || !dst) | 275 | if (!dev->header_ops || !dst) |