aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-06-22 02:16:50 -0400
committerDavid S. Miller <davem@davemloft.net>2016-06-25 12:19:35 -0400
commit008830bc321c0fc22c0db8d5b0b56f854ed90a5c (patch)
tree99d4bfa6d32a42e48b598223a5443eb3443ca532
parent520ac30f45519b0a82dd92117c181d1d6144677b (diff)
net_sched: fq_codel: cache skb->truesize into skb->cb
Now we defer skb drops, it makes sense to keep a copy of skb->truesize in struct codel_skb_cb to avoid one cache line miss per dropped skb in fq_codel_drop(), to reduce latencies a bit further. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/codel_qdisc.h1
-rw-r--r--net/sched/sch_fq_codel.c7
2 files changed, 5 insertions, 3 deletions
diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h
index 8144d9cd2908..098630f83a55 100644
--- a/include/net/codel_qdisc.h
+++ b/include/net/codel_qdisc.h
@@ -52,6 +52,7 @@
52/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ 52/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
53struct codel_skb_cb { 53struct codel_skb_cb {
54 codel_time_t enqueue_time; 54 codel_time_t enqueue_time;
55 unsigned int mem_usage;
55}; 56};
56 57
57static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) 58static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index f715195459c9..a5ea0e9b6be4 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -172,7 +172,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
172 do { 172 do {
173 skb = dequeue_head(flow); 173 skb = dequeue_head(flow);
174 len += qdisc_pkt_len(skb); 174 len += qdisc_pkt_len(skb);
175 mem += skb->truesize; 175 mem += get_codel_cb(skb)->mem_usage;
176 __qdisc_drop(skb, to_free); 176 __qdisc_drop(skb, to_free);
177 } while (++i < max_packets && len < threshold); 177 } while (++i < max_packets && len < threshold);
178 178
@@ -216,7 +216,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
216 flow->deficit = q->quantum; 216 flow->deficit = q->quantum;
217 flow->dropped = 0; 217 flow->dropped = 0;
218 } 218 }
219 q->memory_usage += skb->truesize; 219 get_codel_cb(skb)->mem_usage = skb->truesize;
220 q->memory_usage += get_codel_cb(skb)->mem_usage;
220 memory_limited = q->memory_usage > q->memory_limit; 221 memory_limited = q->memory_usage > q->memory_limit;
221 if (++sch->q.qlen <= sch->limit && !memory_limited) 222 if (++sch->q.qlen <= sch->limit && !memory_limited)
222 return NET_XMIT_SUCCESS; 223 return NET_XMIT_SUCCESS;
@@ -267,7 +268,7 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
267 if (flow->head) { 268 if (flow->head) {
268 skb = dequeue_head(flow); 269 skb = dequeue_head(flow);
269 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); 270 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
270 q->memory_usage -= skb->truesize; 271 q->memory_usage -= get_codel_cb(skb)->mem_usage;
271 sch->q.qlen--; 272 sch->q.qlen--;
272 sch->qstats.backlog -= qdisc_pkt_len(skb); 273 sch->qstats.backlog -= qdisc_pkt_len(skb);
273 } 274 }