aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2019-04-10 08:32:41 -0400
committerDavid S. Miller <davem@davemloft.net>2019-04-10 15:20:46 -0400
commit73eb628ddfd3884d1e58a8022de2e78de7807fc6 (patch)
treef9963afb03df6761396729cfad94621e7d3d5ab0
parent8a53e616de294873fec1a75ddb77ecb3d225cee0 (diff)
Revert: "net: sched: put back q.qlen into a single location"
This revert commit 46b1c18f9deb ("net: sched: put back q.qlen into a single location"). After the previous patch, when a NOLOCK qdisc is enslaved to a locking qdisc it switches to global stats accounting. As a consequence, when a classful qdisc accesses directly a child qdisc's qlen, such qdisc is not doing per CPU accounting and qlen value is consistent. In the control path nobody uses directly qlen since commit e5f0e8f8e45 ("net: sched: introduce and use qdisc tree flush/purge helpers"), so we can remove the contented atomic ops from the datapath. v1 -> v2: - complete the qdisc_qstats_atomic_qlen_dec() -> qdisc_qstats_cpu_qlen_dec() replacement, fix build issue - more descriptive commit message Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sch_generic.h37
-rw-r--r--net/core/gen_stats.c2
-rw-r--r--net/sched/sch_generic.c9
3 files changed, 28 insertions, 20 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f069011524ba..e8f85cd2afce 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -52,10 +52,7 @@ struct qdisc_size_table {
52struct qdisc_skb_head { 52struct qdisc_skb_head {
53 struct sk_buff *head; 53 struct sk_buff *head;
54 struct sk_buff *tail; 54 struct sk_buff *tail;
55 union { 55 __u32 qlen;
56 u32 qlen;
57 atomic_t atomic_qlen;
58 };
59 spinlock_t lock; 56 spinlock_t lock;
60}; 57};
61 58
@@ -486,19 +483,27 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
486 BUILD_BUG_ON(sizeof(qcb->data) < sz); 483 BUILD_BUG_ON(sizeof(qcb->data) < sz);
487} 484}
488 485
486static inline int qdisc_qlen_cpu(const struct Qdisc *q)
487{
488 return this_cpu_ptr(q->cpu_qstats)->qlen;
489}
490
489static inline int qdisc_qlen(const struct Qdisc *q) 491static inline int qdisc_qlen(const struct Qdisc *q)
490{ 492{
491 return q->q.qlen; 493 return q->q.qlen;
492} 494}
493 495
494static inline u32 qdisc_qlen_sum(const struct Qdisc *q) 496static inline int qdisc_qlen_sum(const struct Qdisc *q)
495{ 497{
496 u32 qlen = q->qstats.qlen; 498 __u32 qlen = q->qstats.qlen;
499 int i;
497 500
498 if (qdisc_is_percpu_stats(q)) 501 if (qdisc_is_percpu_stats(q)) {
499 qlen += atomic_read(&q->q.atomic_qlen); 502 for_each_possible_cpu(i)
500 else 503 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
504 } else {
501 qlen += q->q.qlen; 505 qlen += q->q.qlen;
506 }
502 507
503 return qlen; 508 return qlen;
504} 509}
@@ -889,14 +894,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
889 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 894 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
890} 895}
891 896
892static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch) 897static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
893{ 898{
894 atomic_inc(&sch->q.atomic_qlen); 899 this_cpu_inc(sch->cpu_qstats->qlen);
895} 900}
896 901
897static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch) 902static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
898{ 903{
899 atomic_dec(&sch->q.atomic_qlen); 904 this_cpu_dec(sch->cpu_qstats->qlen);
900} 905}
901 906
902static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 907static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
@@ -1112,7 +1117,7 @@ static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1112 if (qdisc_is_percpu_stats(sch)) { 1117 if (qdisc_is_percpu_stats(sch)) {
1113 qdisc_qstats_cpu_backlog_dec(sch, skb); 1118 qdisc_qstats_cpu_backlog_dec(sch, skb);
1114 qdisc_bstats_cpu_update(sch, skb); 1119 qdisc_bstats_cpu_update(sch, skb);
1115 qdisc_qstats_atomic_qlen_dec(sch); 1120 qdisc_qstats_cpu_qlen_dec(sch);
1116 } else { 1121 } else {
1117 qdisc_qstats_backlog_dec(sch, skb); 1122 qdisc_qstats_backlog_dec(sch, skb);
1118 qdisc_bstats_update(sch, skb); 1123 qdisc_bstats_update(sch, skb);
@@ -1124,7 +1129,7 @@ static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1124 unsigned int pkt_len) 1129 unsigned int pkt_len)
1125{ 1130{
1126 if (qdisc_is_percpu_stats(sch)) { 1131 if (qdisc_is_percpu_stats(sch)) {
1127 qdisc_qstats_atomic_qlen_inc(sch); 1132 qdisc_qstats_cpu_qlen_inc(sch);
1128 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); 1133 this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1129 } else { 1134 } else {
1130 sch->qstats.backlog += pkt_len; 1135 sch->qstats.backlog += pkt_len;
@@ -1141,7 +1146,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
1141 skb = __skb_dequeue(&sch->gso_skb); 1146 skb = __skb_dequeue(&sch->gso_skb);
1142 if (qdisc_is_percpu_stats(sch)) { 1147 if (qdisc_is_percpu_stats(sch)) {
1143 qdisc_qstats_cpu_backlog_dec(sch, skb); 1148 qdisc_qstats_cpu_backlog_dec(sch, skb);
1144 qdisc_qstats_atomic_qlen_dec(sch); 1149 qdisc_qstats_cpu_qlen_dec(sch);
1145 } else { 1150 } else {
1146 qdisc_qstats_backlog_dec(sch, skb); 1151 qdisc_qstats_backlog_dec(sch, skb);
1147 sch->q.qlen--; 1152 sch->q.qlen--;
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index ac679f74ba47..9bf1b9ad1780 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -291,6 +291,7 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
291 for_each_possible_cpu(i) { 291 for_each_possible_cpu(i) {
292 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); 292 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
293 293
294 qstats->qlen = 0;
294 qstats->backlog += qcpu->backlog; 295 qstats->backlog += qcpu->backlog;
295 qstats->drops += qcpu->drops; 296 qstats->drops += qcpu->drops;
296 qstats->requeues += qcpu->requeues; 297 qstats->requeues += qcpu->requeues;
@@ -306,6 +307,7 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
306 if (cpu) { 307 if (cpu) {
307 __gnet_stats_copy_queue_cpu(qstats, cpu); 308 __gnet_stats_copy_queue_cpu(qstats, cpu);
308 } else { 309 } else {
310 qstats->qlen = q->qlen;
309 qstats->backlog = q->backlog; 311 qstats->backlog = q->backlog;
310 qstats->drops = q->drops; 312 qstats->drops = q->drops;
311 qstats->requeues = q->requeues; 313 qstats->requeues = q->requeues;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 12a6e1a39fa0..848aab3693bd 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -68,7 +68,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
68 skb = __skb_dequeue(&q->skb_bad_txq); 68 skb = __skb_dequeue(&q->skb_bad_txq);
69 if (qdisc_is_percpu_stats(q)) { 69 if (qdisc_is_percpu_stats(q)) {
70 qdisc_qstats_cpu_backlog_dec(q, skb); 70 qdisc_qstats_cpu_backlog_dec(q, skb);
71 qdisc_qstats_atomic_qlen_dec(q); 71 qdisc_qstats_cpu_qlen_dec(q);
72 } else { 72 } else {
73 qdisc_qstats_backlog_dec(q, skb); 73 qdisc_qstats_backlog_dec(q, skb);
74 q->q.qlen--; 74 q->q.qlen--;
@@ -108,7 +108,7 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
108 108
109 if (qdisc_is_percpu_stats(q)) { 109 if (qdisc_is_percpu_stats(q)) {
110 qdisc_qstats_cpu_backlog_inc(q, skb); 110 qdisc_qstats_cpu_backlog_inc(q, skb);
111 qdisc_qstats_atomic_qlen_inc(q); 111 qdisc_qstats_cpu_qlen_inc(q);
112 } else { 112 } else {
113 qdisc_qstats_backlog_inc(q, skb); 113 qdisc_qstats_backlog_inc(q, skb);
114 q->q.qlen++; 114 q->q.qlen++;
@@ -136,7 +136,7 @@ static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
136 if (qdisc_is_percpu_stats(q)) { 136 if (qdisc_is_percpu_stats(q)) {
137 qdisc_qstats_cpu_requeues_inc(q); 137 qdisc_qstats_cpu_requeues_inc(q);
138 qdisc_qstats_cpu_backlog_inc(q, skb); 138 qdisc_qstats_cpu_backlog_inc(q, skb);
139 qdisc_qstats_atomic_qlen_inc(q); 139 qdisc_qstats_cpu_qlen_inc(q);
140 } else { 140 } else {
141 q->qstats.requeues++; 141 q->qstats.requeues++;
142 qdisc_qstats_backlog_inc(q, skb); 142 qdisc_qstats_backlog_inc(q, skb);
@@ -236,7 +236,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
236 skb = __skb_dequeue(&q->gso_skb); 236 skb = __skb_dequeue(&q->gso_skb);
237 if (qdisc_is_percpu_stats(q)) { 237 if (qdisc_is_percpu_stats(q)) {
238 qdisc_qstats_cpu_backlog_dec(q, skb); 238 qdisc_qstats_cpu_backlog_dec(q, skb);
239 qdisc_qstats_atomic_qlen_dec(q); 239 qdisc_qstats_cpu_qlen_dec(q);
240 } else { 240 } else {
241 qdisc_qstats_backlog_dec(q, skb); 241 qdisc_qstats_backlog_dec(q, skb);
242 q->q.qlen--; 242 q->q.qlen--;
@@ -694,6 +694,7 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
694 struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); 694 struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
695 695
696 q->backlog = 0; 696 q->backlog = 0;
697 q->qlen = 0;
697 } 698 }
698} 699}
699 700