diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-01-20 00:27:16 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-20 19:59:32 -0500 |
commit | fd245a4adb5288eac37250875f237c40a20a1944 (patch) | |
tree | 1c16670c53dab9d9d05b26a7e7ae8a6a8267e847 | |
parent | 817fb15dfd988d8dda916ee04fa506f0c466b9d6 (diff) |
net_sched: move TCQ_F_THROTTLED flag
In commit 371121057607e (net: QDISC_STATE_RUNNING dont need atomic bit
ops) I moved QDISC_STATE_RUNNING flag to __state container, located in
the cache line containing qdisc lock and often dirtied fields.
I now move TCQ_F_THROTTLED bit too, so that we let first cache line read
mostly, and shared by all cpus. This should speedup HTB/CBQ for example.
Not using test_bit()/__clear_bit()/__test_and_set_bit allows to use an
"unsigned int" for __state container, reducing by 8 bytes Qdisc size.
Introduce helpers to hide implementation details.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Patrick McHardy <kaber@trash.net>
CC: Jesper Dangaard Brouer <hawk@diku.dk>
CC: Jarek Poplawski <jarkao2@gmail.com>
CC: Jamal Hadi Salim <hadi@cyberus.ca>
CC: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/sch_generic.h | 38 | ||||
-rw-r--r-- | net/sched/sch_api.c | 6 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 6 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 2 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 4 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 2 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 2 |
7 files changed, 39 insertions, 21 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index e9eee99d8b1..f6345f55041 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -31,7 +31,8 @@ enum qdisc_state_t { | |||
31 | * following bits are only changed while qdisc lock is held | 31 | * following bits are only changed while qdisc lock is held |
32 | */ | 32 | */ |
33 | enum qdisc___state_t { | 33 | enum qdisc___state_t { |
34 | __QDISC___STATE_RUNNING, | 34 | __QDISC___STATE_RUNNING = 1, |
35 | __QDISC___STATE_THROTTLED = 2, | ||
35 | }; | 36 | }; |
36 | 37 | ||
37 | struct qdisc_size_table { | 38 | struct qdisc_size_table { |
@@ -46,10 +47,9 @@ struct Qdisc { | |||
46 | struct sk_buff * (*dequeue)(struct Qdisc *dev); | 47 | struct sk_buff * (*dequeue)(struct Qdisc *dev); |
47 | unsigned flags; | 48 | unsigned flags; |
48 | #define TCQ_F_BUILTIN 1 | 49 | #define TCQ_F_BUILTIN 1 |
49 | #define TCQ_F_THROTTLED 2 | 50 | #define TCQ_F_INGRESS 2 |
50 | #define TCQ_F_INGRESS 4 | 51 | #define TCQ_F_CAN_BYPASS 4 |
51 | #define TCQ_F_CAN_BYPASS 8 | 52 | #define TCQ_F_MQROOT 8 |
52 | #define TCQ_F_MQROOT 16 | ||
53 | #define TCQ_F_WARN_NONWC (1 << 16) | 53 | #define TCQ_F_WARN_NONWC (1 << 16) |
54 | int padded; | 54 | int padded; |
55 | struct Qdisc_ops *ops; | 55 | struct Qdisc_ops *ops; |
@@ -78,25 +78,43 @@ struct Qdisc { | |||
78 | unsigned long state; | 78 | unsigned long state; |
79 | struct sk_buff_head q; | 79 | struct sk_buff_head q; |
80 | struct gnet_stats_basic_packed bstats; | 80 | struct gnet_stats_basic_packed bstats; |
81 | unsigned long __state; | 81 | unsigned int __state; |
82 | struct gnet_stats_queue qstats; | 82 | struct gnet_stats_queue qstats; |
83 | struct rcu_head rcu_head; | 83 | struct rcu_head rcu_head; |
84 | spinlock_t busylock; | 84 | spinlock_t busylock; |
85 | }; | 85 | }; |
86 | 86 | ||
87 | static inline bool qdisc_is_running(struct Qdisc *qdisc) | 87 | static inline bool qdisc_is_running(const struct Qdisc *qdisc) |
88 | { | 88 | { |
89 | return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state); | 89 | return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false; |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline bool qdisc_run_begin(struct Qdisc *qdisc) | 92 | static inline bool qdisc_run_begin(struct Qdisc *qdisc) |
93 | { | 93 | { |
94 | return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state); | 94 | if (qdisc_is_running(qdisc)) |
95 | return false; | ||
96 | qdisc->__state |= __QDISC___STATE_RUNNING; | ||
97 | return true; | ||
95 | } | 98 | } |
96 | 99 | ||
97 | static inline void qdisc_run_end(struct Qdisc *qdisc) | 100 | static inline void qdisc_run_end(struct Qdisc *qdisc) |
98 | { | 101 | { |
99 | __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state); | 102 | qdisc->__state &= ~__QDISC___STATE_RUNNING; |
103 | } | ||
104 | |||
105 | static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) | ||
106 | { | ||
107 | return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false; | ||
108 | } | ||
109 | |||
110 | static inline void qdisc_throttled(struct Qdisc *qdisc) | ||
111 | { | ||
112 | qdisc->__state |= __QDISC___STATE_THROTTLED; | ||
113 | } | ||
114 | |||
115 | static inline void qdisc_unthrottled(struct Qdisc *qdisc) | ||
116 | { | ||
117 | qdisc->__state &= ~__QDISC___STATE_THROTTLED; | ||
100 | } | 118 | } |
101 | 119 | ||
102 | struct Qdisc_class_ops { | 120 | struct Qdisc_class_ops { |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 36ac0ec81ce..374fcbef80e 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -473,7 +473,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) | |||
473 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, | 473 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, |
474 | timer); | 474 | timer); |
475 | 475 | ||
476 | wd->qdisc->flags &= ~TCQ_F_THROTTLED; | 476 | qdisc_unthrottled(wd->qdisc); |
477 | __netif_schedule(qdisc_root(wd->qdisc)); | 477 | __netif_schedule(qdisc_root(wd->qdisc)); |
478 | 478 | ||
479 | return HRTIMER_NORESTART; | 479 | return HRTIMER_NORESTART; |
@@ -495,7 +495,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) | |||
495 | &qdisc_root_sleeping(wd->qdisc)->state)) | 495 | &qdisc_root_sleeping(wd->qdisc)->state)) |
496 | return; | 496 | return; |
497 | 497 | ||
498 | wd->qdisc->flags |= TCQ_F_THROTTLED; | 498 | qdisc_throttled(wd->qdisc); |
499 | time = ktime_set(0, 0); | 499 | time = ktime_set(0, 0); |
500 | time = ktime_add_ns(time, PSCHED_TICKS2NS(expires)); | 500 | time = ktime_add_ns(time, PSCHED_TICKS2NS(expires)); |
501 | hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); | 501 | hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); |
@@ -505,7 +505,7 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule); | |||
505 | void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) | 505 | void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) |
506 | { | 506 | { |
507 | hrtimer_cancel(&wd->timer); | 507 | hrtimer_cancel(&wd->timer); |
508 | wd->qdisc->flags &= ~TCQ_F_THROTTLED; | 508 | qdisc_unthrottled(wd->qdisc); |
509 | } | 509 | } |
510 | EXPORT_SYMBOL(qdisc_watchdog_cancel); | 510 | EXPORT_SYMBOL(qdisc_watchdog_cancel); |
511 | 511 | ||
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 4aaf44c95c5..25ed522b289 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -351,7 +351,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) | |||
351 | { | 351 | { |
352 | int toplevel = q->toplevel; | 352 | int toplevel = q->toplevel; |
353 | 353 | ||
354 | if (toplevel > cl->level && !(cl->q->flags & TCQ_F_THROTTLED)) { | 354 | if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { |
355 | psched_time_t now; | 355 | psched_time_t now; |
356 | psched_tdiff_t incr; | 356 | psched_tdiff_t incr; |
357 | 357 | ||
@@ -625,7 +625,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) | |||
625 | hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); | 625 | hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); |
626 | } | 626 | } |
627 | 627 | ||
628 | sch->flags &= ~TCQ_F_THROTTLED; | 628 | qdisc_unthrottled(sch); |
629 | __netif_schedule(qdisc_root(sch)); | 629 | __netif_schedule(qdisc_root(sch)); |
630 | return HRTIMER_NORESTART; | 630 | return HRTIMER_NORESTART; |
631 | } | 631 | } |
@@ -974,7 +974,7 @@ cbq_dequeue(struct Qdisc *sch) | |||
974 | skb = cbq_dequeue_1(sch); | 974 | skb = cbq_dequeue_1(sch); |
975 | if (skb) { | 975 | if (skb) { |
976 | sch->q.qlen--; | 976 | sch->q.qlen--; |
977 | sch->flags &= ~TCQ_F_THROTTLED; | 977 | qdisc_unthrottled(sch); |
978 | return skb; | 978 | return skb; |
979 | } | 979 | } |
980 | 980 | ||
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index dea4009615f..b632d925191 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1664,7 +1664,7 @@ hfsc_dequeue(struct Qdisc *sch) | |||
1664 | set_passive(cl); | 1664 | set_passive(cl); |
1665 | } | 1665 | } |
1666 | 1666 | ||
1667 | sch->flags &= ~TCQ_F_THROTTLED; | 1667 | qdisc_unthrottled(sch); |
1668 | sch->q.qlen--; | 1668 | sch->q.qlen--; |
1669 | 1669 | ||
1670 | return skb; | 1670 | return skb; |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 3e86fd3a1b7..39db75cd8c1 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -865,7 +865,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
865 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ | 865 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ |
866 | skb = __skb_dequeue(&q->direct_queue); | 866 | skb = __skb_dequeue(&q->direct_queue); |
867 | if (skb != NULL) { | 867 | if (skb != NULL) { |
868 | sch->flags &= ~TCQ_F_THROTTLED; | 868 | qdisc_unthrottled(sch); |
869 | sch->q.qlen--; | 869 | sch->q.qlen--; |
870 | return skb; | 870 | return skb; |
871 | } | 871 | } |
@@ -901,7 +901,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
901 | skb = htb_dequeue_tree(q, prio, level); | 901 | skb = htb_dequeue_tree(q, prio, level); |
902 | if (likely(skb != NULL)) { | 902 | if (likely(skb != NULL)) { |
903 | sch->q.qlen--; | 903 | sch->q.qlen--; |
904 | sch->flags &= ~TCQ_F_THROTTLED; | 904 | qdisc_unthrottled(sch); |
905 | goto fin; | 905 | goto fin; |
906 | } | 906 | } |
907 | } | 907 | } |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index c2bbbe60d54..c26ef3614f7 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -266,7 +266,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) | |||
266 | struct netem_sched_data *q = qdisc_priv(sch); | 266 | struct netem_sched_data *q = qdisc_priv(sch); |
267 | struct sk_buff *skb; | 267 | struct sk_buff *skb; |
268 | 268 | ||
269 | if (sch->flags & TCQ_F_THROTTLED) | 269 | if (qdisc_is_throttled(sch)) |
270 | return NULL; | 270 | return NULL; |
271 | 271 | ||
272 | skb = q->qdisc->ops->peek(q->qdisc); | 272 | skb = q->qdisc->ops->peek(q->qdisc); |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 475edfb69c2..86c01669697 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -185,7 +185,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) | |||
185 | q->tokens = toks; | 185 | q->tokens = toks; |
186 | q->ptokens = ptoks; | 186 | q->ptokens = ptoks; |
187 | sch->q.qlen--; | 187 | sch->q.qlen--; |
188 | sch->flags &= ~TCQ_F_THROTTLED; | 188 | qdisc_unthrottled(sch); |
189 | return skb; | 189 | return skb; |
190 | } | 190 | } |
191 | 191 | ||