aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-03-03 14:10:02 -0500
committerDavid S. Miller <davem@davemloft.net>2011-03-03 14:10:02 -0500
commitd276055c4e90a7278cd5167ba9755c9b214bcff7 (patch)
treed99d4e808f880921660dfdf73af8e97e3a6fdba9 /net/sched
parentc53fa1ed92cd671a1dfb1e7569e9ab672612ddc6 (diff)
net_sched: reduce fifo qdisc size
Because of various alignements [SLUB / qdisc], we use 512 bytes of memory for one {p|b}fifo qdisc, instead of 256 bytes on 64bit arches and 192 bytes on 32bit ones. Move the "u32 limit" inside "struct Qdisc" (no impact on other qdiscs) Change qdisc_alloc(), first trying a regular allocation before an oversized one. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_fifo.c34
-rw-r--r--net/sched/sch_generic.c18
2 files changed, 22 insertions, 30 deletions
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index be33f9ddf9dd..66effe2da8e0 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -19,15 +19,9 @@
19 19
20/* 1 band FIFO pseudo-"scheduler" */ 20/* 1 band FIFO pseudo-"scheduler" */
21 21
22struct fifo_sched_data {
23 u32 limit;
24};
25
26static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) 22static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
27{ 23{
28 struct fifo_sched_data *q = qdisc_priv(sch); 24 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
29
30 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit))
31 return qdisc_enqueue_tail(skb, sch); 25 return qdisc_enqueue_tail(skb, sch);
32 26
33 return qdisc_reshape_fail(skb, sch); 27 return qdisc_reshape_fail(skb, sch);
@@ -35,9 +29,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
35 29
36static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) 30static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
37{ 31{
38 struct fifo_sched_data *q = qdisc_priv(sch); 32 if (likely(skb_queue_len(&sch->q) < sch->limit))
39
40 if (likely(skb_queue_len(&sch->q) < q->limit))
41 return qdisc_enqueue_tail(skb, sch); 33 return qdisc_enqueue_tail(skb, sch);
42 34
43 return qdisc_reshape_fail(skb, sch); 35 return qdisc_reshape_fail(skb, sch);
@@ -45,9 +37,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
45 37
46static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) 38static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
47{ 39{
48 struct fifo_sched_data *q = qdisc_priv(sch); 40 if (likely(skb_queue_len(&sch->q) < sch->limit))
49
50 if (likely(skb_queue_len(&sch->q) < q->limit))
51 return qdisc_enqueue_tail(skb, sch); 41 return qdisc_enqueue_tail(skb, sch);
52 42
53 /* queue full, remove one skb to fulfill the limit */ 43 /* queue full, remove one skb to fulfill the limit */
@@ -60,7 +50,6 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
60 50
61static int fifo_init(struct Qdisc *sch, struct nlattr *opt) 51static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
62{ 52{
63 struct fifo_sched_data *q = qdisc_priv(sch);
64 bool bypass; 53 bool bypass;
65 bool is_bfifo = sch->ops == &bfifo_qdisc_ops; 54 bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
66 55
@@ -70,20 +59,20 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
70 if (is_bfifo) 59 if (is_bfifo)
71 limit *= psched_mtu(qdisc_dev(sch)); 60 limit *= psched_mtu(qdisc_dev(sch));
72 61
73 q->limit = limit; 62 sch->limit = limit;
74 } else { 63 } else {
75 struct tc_fifo_qopt *ctl = nla_data(opt); 64 struct tc_fifo_qopt *ctl = nla_data(opt);
76 65
77 if (nla_len(opt) < sizeof(*ctl)) 66 if (nla_len(opt) < sizeof(*ctl))
78 return -EINVAL; 67 return -EINVAL;
79 68
80 q->limit = ctl->limit; 69 sch->limit = ctl->limit;
81 } 70 }
82 71
83 if (is_bfifo) 72 if (is_bfifo)
84 bypass = q->limit >= psched_mtu(qdisc_dev(sch)); 73 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
85 else 74 else
86 bypass = q->limit >= 1; 75 bypass = sch->limit >= 1;
87 76
88 if (bypass) 77 if (bypass)
89 sch->flags |= TCQ_F_CAN_BYPASS; 78 sch->flags |= TCQ_F_CAN_BYPASS;
@@ -94,8 +83,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
94 83
95static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb) 84static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
96{ 85{
97 struct fifo_sched_data *q = qdisc_priv(sch); 86 struct tc_fifo_qopt opt = { .limit = sch->limit };
98 struct tc_fifo_qopt opt = { .limit = q->limit };
99 87
100 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 88 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
101 return skb->len; 89 return skb->len;
@@ -106,7 +94,7 @@ nla_put_failure:
106 94
107struct Qdisc_ops pfifo_qdisc_ops __read_mostly = { 95struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
108 .id = "pfifo", 96 .id = "pfifo",
109 .priv_size = sizeof(struct fifo_sched_data), 97 .priv_size = 0,
110 .enqueue = pfifo_enqueue, 98 .enqueue = pfifo_enqueue,
111 .dequeue = qdisc_dequeue_head, 99 .dequeue = qdisc_dequeue_head,
112 .peek = qdisc_peek_head, 100 .peek = qdisc_peek_head,
@@ -121,7 +109,7 @@ EXPORT_SYMBOL(pfifo_qdisc_ops);
121 109
122struct Qdisc_ops bfifo_qdisc_ops __read_mostly = { 110struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
123 .id = "bfifo", 111 .id = "bfifo",
124 .priv_size = sizeof(struct fifo_sched_data), 112 .priv_size = 0,
125 .enqueue = bfifo_enqueue, 113 .enqueue = bfifo_enqueue,
126 .dequeue = qdisc_dequeue_head, 114 .dequeue = qdisc_dequeue_head,
127 .peek = qdisc_peek_head, 115 .peek = qdisc_peek_head,
@@ -136,7 +124,7 @@ EXPORT_SYMBOL(bfifo_qdisc_ops);
136 124
137struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = { 125struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
138 .id = "pfifo_head_drop", 126 .id = "pfifo_head_drop",
139 .priv_size = sizeof(struct fifo_sched_data), 127 .priv_size = 0,
140 .enqueue = pfifo_tail_enqueue, 128 .enqueue = pfifo_tail_enqueue,
141 .dequeue = qdisc_dequeue_head, 129 .dequeue = qdisc_dequeue_head,
142 .peek = qdisc_peek_head, 130 .peek = qdisc_peek_head,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 0da09d508737..a854cab03f1e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -550,21 +550,25 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
550{ 550{
551 void *p; 551 void *p;
552 struct Qdisc *sch; 552 struct Qdisc *sch;
553 unsigned int size; 553 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
554 int err = -ENOBUFS; 554 int err = -ENOBUFS;
555 555
556 /* ensure that the Qdisc and the private data are 64-byte aligned */
557 size = QDISC_ALIGN(sizeof(*sch));
558 size += ops->priv_size + (QDISC_ALIGNTO - 1);
559
560 p = kzalloc_node(size, GFP_KERNEL, 556 p = kzalloc_node(size, GFP_KERNEL,
561 netdev_queue_numa_node_read(dev_queue)); 557 netdev_queue_numa_node_read(dev_queue));
562 558
563 if (!p) 559 if (!p)
564 goto errout; 560 goto errout;
565 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
566 sch->padded = (char *) sch - (char *) p; 562 /* if we got non aligned memory, ask more and do alignment ourself */
567 563 if (sch != p) {
564 kfree(p);
565 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
566 netdev_queue_numa_node_read(dev_queue));
567 if (!p)
568 goto errout;
569 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
570 sch->padded = (char *) sch - (char *) p;
571 }
568 INIT_LIST_HEAD(&sch->list); 572 INIT_LIST_HEAD(&sch->list);
569 skb_queue_head_init(&sch->q); 573 skb_queue_head_init(&sch->q);
570 spin_lock_init(&sch->busylock); 574 spin_lock_init(&sch->busylock);