diff options
author | John Fastabend <john.fastabend@gmail.com> | 2014-09-12 23:04:52 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-13 12:30:25 -0400 |
commit | 46e5da40aec256155cfedee96dd21a75da941f2c (patch) | |
tree | cc3986c52025d252c2a063053692595e60c80e13 | |
parent | d1015645dd535bbf10e52a3ef6d02ee0c3e0b267 (diff) |
net: qdisc: use rcu prefix and silence sparse warnings
Add __rcu notation to qdisc handling by doing this we can make
smatch output more legible. And anyways some of the cases should
be using rcu_dereference() see qdisc_all_tx_empty(),
qdisc_tx_chainging(), and so on.
Also *wake_queue() API is commonly called from driver timer routines
without rcu lock or rtnl lock. So I added rcu_read_lock() blocks
around netif_wake_subqueue and netif_tx_wake_queue.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netdevice.h | 29 | ||||
-rw-r--r-- | include/net/sch_generic.h | 21 | ||||
-rw-r--r-- | net/core/dev.c | 51 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 4 | ||||
-rw-r--r-- | net/sched/sch_mqprio.c | 6 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 13 |
6 files changed, 82 insertions, 42 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ba72f6baae1a..ae721f53739e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -543,7 +543,7 @@ struct netdev_queue { | |||
543 | * read mostly part | 543 | * read mostly part |
544 | */ | 544 | */ |
545 | struct net_device *dev; | 545 | struct net_device *dev; |
546 | struct Qdisc *qdisc; | 546 | struct Qdisc __rcu *qdisc; |
547 | struct Qdisc *qdisc_sleeping; | 547 | struct Qdisc *qdisc_sleeping; |
548 | #ifdef CONFIG_SYSFS | 548 | #ifdef CONFIG_SYSFS |
549 | struct kobject kobj; | 549 | struct kobject kobj; |
@@ -2356,12 +2356,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd, | |||
2356 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); | 2356 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
2357 | 2357 | ||
2358 | void __netif_schedule(struct Qdisc *q); | 2358 | void __netif_schedule(struct Qdisc *q); |
2359 | 2359 | void netif_schedule_queue(struct netdev_queue *txq); | |
2360 | static inline void netif_schedule_queue(struct netdev_queue *txq) | ||
2361 | { | ||
2362 | if (!(txq->state & QUEUE_STATE_ANY_XOFF)) | ||
2363 | __netif_schedule(txq->qdisc); | ||
2364 | } | ||
2365 | 2360 | ||
2366 | static inline void netif_tx_schedule_all(struct net_device *dev) | 2361 | static inline void netif_tx_schedule_all(struct net_device *dev) |
2367 | { | 2362 | { |
@@ -2397,11 +2392,7 @@ static inline void netif_tx_start_all_queues(struct net_device *dev) | |||
2397 | } | 2392 | } |
2398 | } | 2393 | } |
2399 | 2394 | ||
2400 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | 2395 | void netif_tx_wake_queue(struct netdev_queue *dev_queue); |
2401 | { | ||
2402 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) | ||
2403 | __netif_schedule(dev_queue->qdisc); | ||
2404 | } | ||
2405 | 2396 | ||
2406 | /** | 2397 | /** |
2407 | * netif_wake_queue - restart transmit | 2398 | * netif_wake_queue - restart transmit |
@@ -2673,19 +2664,7 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev, | |||
2673 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | 2664 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); |
2674 | } | 2665 | } |
2675 | 2666 | ||
2676 | /** | 2667 | void netif_wake_subqueue(struct net_device *dev, u16 queue_index); |
2677 | * netif_wake_subqueue - allow sending packets on subqueue | ||
2678 | * @dev: network device | ||
2679 | * @queue_index: sub queue index | ||
2680 | * | ||
2681 | * Resume individual transmit queue of a device with multiple transmit queues. | ||
2682 | */ | ||
2683 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | ||
2684 | { | ||
2685 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | ||
2686 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) | ||
2687 | __netif_schedule(txq->qdisc); | ||
2688 | } | ||
2689 | 2668 | ||
2690 | #ifdef CONFIG_XPS | 2669 | #ifdef CONFIG_XPS |
2691 | int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, | 2670 | int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a3cfb8ebeb53..56838ab29b42 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -259,7 +259,9 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) | |||
259 | 259 | ||
260 | static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) | 260 | static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) |
261 | { | 261 | { |
262 | return qdisc->dev_queue->qdisc; | 262 | struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); |
263 | |||
264 | return q; | ||
263 | } | 265 | } |
264 | 266 | ||
265 | static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) | 267 | static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) |
@@ -384,7 +386,7 @@ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) | |||
384 | struct Qdisc *qdisc; | 386 | struct Qdisc *qdisc; |
385 | 387 | ||
386 | for (; i < dev->num_tx_queues; i++) { | 388 | for (; i < dev->num_tx_queues; i++) { |
387 | qdisc = netdev_get_tx_queue(dev, i)->qdisc; | 389 | qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); |
388 | if (qdisc) { | 390 | if (qdisc) { |
389 | spin_lock_bh(qdisc_lock(qdisc)); | 391 | spin_lock_bh(qdisc_lock(qdisc)); |
390 | qdisc_reset(qdisc); | 392 | qdisc_reset(qdisc); |
@@ -402,13 +404,18 @@ static inline void qdisc_reset_all_tx(struct net_device *dev) | |||
402 | static inline bool qdisc_all_tx_empty(const struct net_device *dev) | 404 | static inline bool qdisc_all_tx_empty(const struct net_device *dev) |
403 | { | 405 | { |
404 | unsigned int i; | 406 | unsigned int i; |
407 | |||
408 | rcu_read_lock(); | ||
405 | for (i = 0; i < dev->num_tx_queues; i++) { | 409 | for (i = 0; i < dev->num_tx_queues; i++) { |
406 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 410 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
407 | const struct Qdisc *q = txq->qdisc; | 411 | const struct Qdisc *q = rcu_dereference(txq->qdisc); |
408 | 412 | ||
409 | if (q->q.qlen) | 413 | if (q->q.qlen) { |
414 | rcu_read_unlock(); | ||
410 | return false; | 415 | return false; |
416 | } | ||
411 | } | 417 | } |
418 | rcu_read_unlock(); | ||
412 | return true; | 419 | return true; |
413 | } | 420 | } |
414 | 421 | ||
@@ -416,9 +423,10 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev) | |||
416 | static inline bool qdisc_tx_changing(const struct net_device *dev) | 423 | static inline bool qdisc_tx_changing(const struct net_device *dev) |
417 | { | 424 | { |
418 | unsigned int i; | 425 | unsigned int i; |
426 | |||
419 | for (i = 0; i < dev->num_tx_queues; i++) { | 427 | for (i = 0; i < dev->num_tx_queues; i++) { |
420 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 428 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
421 | if (txq->qdisc != txq->qdisc_sleeping) | 429 | if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) |
422 | return true; | 430 | return true; |
423 | } | 431 | } |
424 | return false; | 432 | return false; |
@@ -428,9 +436,10 @@ static inline bool qdisc_tx_changing(const struct net_device *dev) | |||
428 | static inline bool qdisc_tx_is_noop(const struct net_device *dev) | 436 | static inline bool qdisc_tx_is_noop(const struct net_device *dev) |
429 | { | 437 | { |
430 | unsigned int i; | 438 | unsigned int i; |
439 | |||
431 | for (i = 0; i < dev->num_tx_queues; i++) { | 440 | for (i = 0; i < dev->num_tx_queues; i++) { |
432 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 441 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
433 | if (txq->qdisc != &noop_qdisc) | 442 | if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) |
434 | return false; | 443 | return false; |
435 | } | 444 | } |
436 | return true; | 445 | return true; |
diff --git a/net/core/dev.c b/net/core/dev.c index 3c6a967e5830..b3d6dbc0c696 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2177,6 +2177,53 @@ static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) | |||
2177 | return (struct dev_kfree_skb_cb *)skb->cb; | 2177 | return (struct dev_kfree_skb_cb *)skb->cb; |
2178 | } | 2178 | } |
2179 | 2179 | ||
2180 | void netif_schedule_queue(struct netdev_queue *txq) | ||
2181 | { | ||
2182 | rcu_read_lock(); | ||
2183 | if (!(txq->state & QUEUE_STATE_ANY_XOFF)) { | ||
2184 | struct Qdisc *q = rcu_dereference(txq->qdisc); | ||
2185 | |||
2186 | __netif_schedule(q); | ||
2187 | } | ||
2188 | rcu_read_unlock(); | ||
2189 | } | ||
2190 | EXPORT_SYMBOL(netif_schedule_queue); | ||
2191 | |||
2192 | /** | ||
2193 | * netif_wake_subqueue - allow sending packets on subqueue | ||
2194 | * @dev: network device | ||
2195 | * @queue_index: sub queue index | ||
2196 | * | ||
2197 | * Resume individual transmit queue of a device with multiple transmit queues. | ||
2198 | */ | ||
2199 | void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | ||
2200 | { | ||
2201 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | ||
2202 | |||
2203 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) { | ||
2204 | struct Qdisc *q; | ||
2205 | |||
2206 | rcu_read_lock(); | ||
2207 | q = rcu_dereference(txq->qdisc); | ||
2208 | __netif_schedule(q); | ||
2209 | rcu_read_unlock(); | ||
2210 | } | ||
2211 | } | ||
2212 | EXPORT_SYMBOL(netif_wake_subqueue); | ||
2213 | |||
2214 | void netif_tx_wake_queue(struct netdev_queue *dev_queue) | ||
2215 | { | ||
2216 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { | ||
2217 | struct Qdisc *q; | ||
2218 | |||
2219 | rcu_read_lock(); | ||
2220 | q = rcu_dereference(dev_queue->qdisc); | ||
2221 | __netif_schedule(q); | ||
2222 | rcu_read_unlock(); | ||
2223 | } | ||
2224 | } | ||
2225 | EXPORT_SYMBOL(netif_tx_wake_queue); | ||
2226 | |||
2180 | void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) | 2227 | void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) |
2181 | { | 2228 | { |
2182 | unsigned long flags; | 2229 | unsigned long flags; |
@@ -3432,7 +3479,7 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) | |||
3432 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); | 3479 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); |
3433 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); | 3480 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); |
3434 | 3481 | ||
3435 | q = rxq->qdisc; | 3482 | q = rcu_dereference(rxq->qdisc); |
3436 | if (q != &noop_qdisc) { | 3483 | if (q != &noop_qdisc) { |
3437 | spin_lock(qdisc_lock(q)); | 3484 | spin_lock(qdisc_lock(q)); |
3438 | if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) | 3485 | if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) |
@@ -3449,7 +3496,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
3449 | { | 3496 | { |
3450 | struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); | 3497 | struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); |
3451 | 3498 | ||
3452 | if (!rxq || rxq->qdisc == &noop_qdisc) | 3499 | if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc) |
3453 | goto out; | 3500 | goto out; |
3454 | 3501 | ||
3455 | if (*pt_prev) { | 3502 | if (*pt_prev) { |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 19696ebe9ebc..346ef85617d3 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -783,7 +783,7 @@ static void dev_deactivate_queue(struct net_device *dev, | |||
783 | struct Qdisc *qdisc_default = _qdisc_default; | 783 | struct Qdisc *qdisc_default = _qdisc_default; |
784 | struct Qdisc *qdisc; | 784 | struct Qdisc *qdisc; |
785 | 785 | ||
786 | qdisc = dev_queue->qdisc; | 786 | qdisc = rtnl_dereference(dev_queue->qdisc); |
787 | if (qdisc) { | 787 | if (qdisc) { |
788 | spin_lock_bh(qdisc_lock(qdisc)); | 788 | spin_lock_bh(qdisc_lock(qdisc)); |
789 | 789 | ||
@@ -876,7 +876,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, | |||
876 | { | 876 | { |
877 | struct Qdisc *qdisc = _qdisc; | 877 | struct Qdisc *qdisc = _qdisc; |
878 | 878 | ||
879 | dev_queue->qdisc = qdisc; | 879 | rcu_assign_pointer(dev_queue->qdisc, qdisc); |
880 | dev_queue->qdisc_sleeping = qdisc; | 880 | dev_queue->qdisc_sleeping = qdisc; |
881 | } | 881 | } |
882 | 882 | ||
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 6749e2f540d0..37e7d25d21f1 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c | |||
@@ -231,7 +231,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
231 | memset(&sch->qstats, 0, sizeof(sch->qstats)); | 231 | memset(&sch->qstats, 0, sizeof(sch->qstats)); |
232 | 232 | ||
233 | for (i = 0; i < dev->num_tx_queues; i++) { | 233 | for (i = 0; i < dev->num_tx_queues; i++) { |
234 | qdisc = netdev_get_tx_queue(dev, i)->qdisc; | 234 | qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); |
235 | spin_lock_bh(qdisc_lock(qdisc)); | 235 | spin_lock_bh(qdisc_lock(qdisc)); |
236 | sch->q.qlen += qdisc->q.qlen; | 236 | sch->q.qlen += qdisc->q.qlen; |
237 | sch->bstats.bytes += qdisc->bstats.bytes; | 237 | sch->bstats.bytes += qdisc->bstats.bytes; |
@@ -340,7 +340,9 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
340 | spin_unlock_bh(d->lock); | 340 | spin_unlock_bh(d->lock); |
341 | 341 | ||
342 | for (i = tc.offset; i < tc.offset + tc.count; i++) { | 342 | for (i = tc.offset; i < tc.offset + tc.count; i++) { |
343 | qdisc = netdev_get_tx_queue(dev, i)->qdisc; | 343 | struct netdev_queue *q = netdev_get_tx_queue(dev, i); |
344 | |||
345 | qdisc = rtnl_dereference(q->qdisc); | ||
344 | spin_lock_bh(qdisc_lock(qdisc)); | 346 | spin_lock_bh(qdisc_lock(qdisc)); |
345 | bstats.bytes += qdisc->bstats.bytes; | 347 | bstats.bytes += qdisc->bstats.bytes; |
346 | bstats.packets += qdisc->bstats.packets; | 348 | bstats.packets += qdisc->bstats.packets; |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index aaa8d03ed054..5cd291bd00e4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -96,11 +96,14 @@ teql_dequeue(struct Qdisc *sch) | |||
96 | struct teql_sched_data *dat = qdisc_priv(sch); | 96 | struct teql_sched_data *dat = qdisc_priv(sch); |
97 | struct netdev_queue *dat_queue; | 97 | struct netdev_queue *dat_queue; |
98 | struct sk_buff *skb; | 98 | struct sk_buff *skb; |
99 | struct Qdisc *q; | ||
99 | 100 | ||
100 | skb = __skb_dequeue(&dat->q); | 101 | skb = __skb_dequeue(&dat->q); |
101 | dat_queue = netdev_get_tx_queue(dat->m->dev, 0); | 102 | dat_queue = netdev_get_tx_queue(dat->m->dev, 0); |
103 | q = rcu_dereference_bh(dat_queue->qdisc); | ||
104 | |||
102 | if (skb == NULL) { | 105 | if (skb == NULL) { |
103 | struct net_device *m = qdisc_dev(dat_queue->qdisc); | 106 | struct net_device *m = qdisc_dev(q); |
104 | if (m) { | 107 | if (m) { |
105 | dat->m->slaves = sch; | 108 | dat->m->slaves = sch; |
106 | netif_wake_queue(m); | 109 | netif_wake_queue(m); |
@@ -108,7 +111,7 @@ teql_dequeue(struct Qdisc *sch) | |||
108 | } else { | 111 | } else { |
109 | qdisc_bstats_update(sch, skb); | 112 | qdisc_bstats_update(sch, skb); |
110 | } | 113 | } |
111 | sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; | 114 | sch->q.qlen = dat->q.qlen + q->q.qlen; |
112 | return skb; | 115 | return skb; |
113 | } | 116 | } |
114 | 117 | ||
@@ -157,9 +160,9 @@ teql_destroy(struct Qdisc *sch) | |||
157 | txq = netdev_get_tx_queue(master->dev, 0); | 160 | txq = netdev_get_tx_queue(master->dev, 0); |
158 | master->slaves = NULL; | 161 | master->slaves = NULL; |
159 | 162 | ||
160 | root_lock = qdisc_root_sleeping_lock(txq->qdisc); | 163 | root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); |
161 | spin_lock_bh(root_lock); | 164 | spin_lock_bh(root_lock); |
162 | qdisc_reset(txq->qdisc); | 165 | qdisc_reset(rtnl_dereference(txq->qdisc)); |
163 | spin_unlock_bh(root_lock); | 166 | spin_unlock_bh(root_lock); |
164 | } | 167 | } |
165 | } | 168 | } |
@@ -266,7 +269,7 @@ static inline int teql_resolve(struct sk_buff *skb, | |||
266 | struct dst_entry *dst = skb_dst(skb); | 269 | struct dst_entry *dst = skb_dst(skb); |
267 | int res; | 270 | int res; |
268 | 271 | ||
269 | if (txq->qdisc == &noop_qdisc) | 272 | if (rcu_access_pointer(txq->qdisc) == &noop_qdisc) |
270 | return -ENODEV; | 273 | return -ENODEV; |
271 | 274 | ||
272 | if (!dev->header_ops || !dst) | 275 | if (!dev->header_ops || !dst) |