diff options
author | Thomas Graf <tgraf@suug.ch> | 2005-06-19 01:57:42 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-06-19 01:57:42 -0400 |
commit | aaae3013d186d71a01e1059c9633c4ec8729d891 (patch) | |
tree | 533e5512f5f8497ae293db9bd828a771e957fd8a /net/sched | |
parent | 9972b25d0c6e7f8f893eb3444dea37b42b1201de (diff) |
[PKT_SCHED]: Transform fifo qdisc to use generic queue management interface
The simplicity of the fifo qdisc allows several qdisc operations to be
redirected to the relevant queue management function directly. Saves
a lot of code lines and gives the pfifo a byte based backlog.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_fifo.c | 102 |
1 files changed, 14 insertions, 88 deletions
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 4888305c96da..83a4db4d3cdc 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -47,61 +47,10 @@ bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
47 | { | 47 | { |
48 | struct fifo_sched_data *q = qdisc_priv(sch); | 48 | struct fifo_sched_data *q = qdisc_priv(sch); |
49 | 49 | ||
50 | if (sch->qstats.backlog + skb->len <= q->limit) { | 50 | if (likely(sch->qstats.backlog + skb->len <= q->limit)) |
51 | __skb_queue_tail(&sch->q, skb); | 51 | return qdisc_enqueue_tail(skb, sch); |
52 | sch->qstats.backlog += skb->len; | ||
53 | sch->bstats.bytes += skb->len; | ||
54 | sch->bstats.packets++; | ||
55 | return 0; | ||
56 | } | ||
57 | sch->qstats.drops++; | ||
58 | #ifdef CONFIG_NET_CLS_POLICE | ||
59 | if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch)) | ||
60 | #endif | ||
61 | kfree_skb(skb); | ||
62 | return NET_XMIT_DROP; | ||
63 | } | ||
64 | |||
65 | static int | ||
66 | bfifo_requeue(struct sk_buff *skb, struct Qdisc* sch) | ||
67 | { | ||
68 | __skb_queue_head(&sch->q, skb); | ||
69 | sch->qstats.backlog += skb->len; | ||
70 | sch->qstats.requeues++; | ||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static struct sk_buff * | ||
75 | bfifo_dequeue(struct Qdisc* sch) | ||
76 | { | ||
77 | struct sk_buff *skb; | ||
78 | |||
79 | skb = __skb_dequeue(&sch->q); | ||
80 | if (skb) | ||
81 | sch->qstats.backlog -= skb->len; | ||
82 | return skb; | ||
83 | } | ||
84 | |||
85 | static unsigned int | ||
86 | fifo_drop(struct Qdisc* sch) | ||
87 | { | ||
88 | struct sk_buff *skb; | ||
89 | |||
90 | skb = __skb_dequeue_tail(&sch->q); | ||
91 | if (skb) { | ||
92 | unsigned int len = skb->len; | ||
93 | sch->qstats.backlog -= len; | ||
94 | kfree_skb(skb); | ||
95 | return len; | ||
96 | } | ||
97 | return 0; | ||
98 | } | ||
99 | 52 | ||
100 | static void | 53 | return qdisc_reshape_fail(skb, sch); |
101 | fifo_reset(struct Qdisc* sch) | ||
102 | { | ||
103 | skb_queue_purge(&sch->q); | ||
104 | sch->qstats.backlog = 0; | ||
105 | } | 54 | } |
106 | 55 | ||
107 | static int | 56 | static int |
@@ -109,33 +58,10 @@ pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
109 | { | 58 | { |
110 | struct fifo_sched_data *q = qdisc_priv(sch); | 59 | struct fifo_sched_data *q = qdisc_priv(sch); |
111 | 60 | ||
112 | if (sch->q.qlen < q->limit) { | 61 | if (likely(skb_queue_len(&sch->q) < q->limit)) |
113 | __skb_queue_tail(&sch->q, skb); | 62 | return qdisc_enqueue_tail(skb, sch); |
114 | sch->bstats.bytes += skb->len; | ||
115 | sch->bstats.packets++; | ||
116 | return 0; | ||
117 | } | ||
118 | sch->qstats.drops++; | ||
119 | #ifdef CONFIG_NET_CLS_POLICE | ||
120 | if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch)) | ||
121 | #endif | ||
122 | kfree_skb(skb); | ||
123 | return NET_XMIT_DROP; | ||
124 | } | ||
125 | 63 | ||
126 | static int | 64 | return qdisc_reshape_fail(skb, sch); |
127 | pfifo_requeue(struct sk_buff *skb, struct Qdisc* sch) | ||
128 | { | ||
129 | __skb_queue_head(&sch->q, skb); | ||
130 | sch->qstats.requeues++; | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | |||
135 | static struct sk_buff * | ||
136 | pfifo_dequeue(struct Qdisc* sch) | ||
137 | { | ||
138 | return __skb_dequeue(&sch->q); | ||
139 | } | 65 | } |
140 | 66 | ||
141 | static int fifo_init(struct Qdisc *sch, struct rtattr *opt) | 67 | static int fifo_init(struct Qdisc *sch, struct rtattr *opt) |
@@ -180,11 +106,11 @@ struct Qdisc_ops pfifo_qdisc_ops = { | |||
180 | .id = "pfifo", | 106 | .id = "pfifo", |
181 | .priv_size = sizeof(struct fifo_sched_data), | 107 | .priv_size = sizeof(struct fifo_sched_data), |
182 | .enqueue = pfifo_enqueue, | 108 | .enqueue = pfifo_enqueue, |
183 | .dequeue = pfifo_dequeue, | 109 | .dequeue = qdisc_dequeue_head, |
184 | .requeue = pfifo_requeue, | 110 | .requeue = qdisc_requeue, |
185 | .drop = fifo_drop, | 111 | .drop = qdisc_queue_drop, |
186 | .init = fifo_init, | 112 | .init = fifo_init, |
187 | .reset = fifo_reset, | 113 | .reset = qdisc_reset_queue, |
188 | .destroy = NULL, | 114 | .destroy = NULL, |
189 | .change = fifo_init, | 115 | .change = fifo_init, |
190 | .dump = fifo_dump, | 116 | .dump = fifo_dump, |
@@ -197,11 +123,11 @@ struct Qdisc_ops bfifo_qdisc_ops = { | |||
197 | .id = "bfifo", | 123 | .id = "bfifo", |
198 | .priv_size = sizeof(struct fifo_sched_data), | 124 | .priv_size = sizeof(struct fifo_sched_data), |
199 | .enqueue = bfifo_enqueue, | 125 | .enqueue = bfifo_enqueue, |
200 | .dequeue = bfifo_dequeue, | 126 | .dequeue = qdisc_dequeue_head, |
201 | .requeue = bfifo_requeue, | 127 | .requeue = qdisc_requeue, |
202 | .drop = fifo_drop, | 128 | .drop = qdisc_queue_drop, |
203 | .init = fifo_init, | 129 | .init = fifo_init, |
204 | .reset = fifo_reset, | 130 | .reset = qdisc_reset_queue, |
205 | .destroy = NULL, | 131 | .destroy = NULL, |
206 | .change = fifo_init, | 132 | .change = fifo_init, |
207 | .dump = fifo_dump, | 133 | .dump = fifo_dump, |