aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2005-11-05 15:14:08 -0500
committerThomas Graf <tgr@axs.localdomain>2005-11-05 16:02:25 -0500
commitdba051f36a47989b20b248248ffef7984a2f6013 (patch)
tree056d069b9be8cbf1e029f816278945a6dd03a38e /net
parent6a1b63d467281eb6bd64aafbbf6130a1b42c8c2e (diff)
[PKT_SCHED]: RED: Cleanup and remove unnecessary code
Removes the skb trimming code which is not needed since we never touch the skb upon failure. Removes unnecessary includes, initializers, and simplifies the code a bit. Removes Jamal's obsolete email addresses upon his own request. Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_red.c65
1 files changed, 21 insertions, 44 deletions
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 76e8df8447d9..0d89dee751a9 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -9,38 +9,19 @@
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * 10 *
11 * Changes: 11 * Changes:
12 * J Hadi Salim <hadi@nortel.com> 980914: computation fixes 12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly. 13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim <hadi@nortelnetworks.com> 980816: ECN support 14 * J Hadi Salim 980816: ECN support
15 */ 15 */
16 16
17#include <linux/config.h> 17#include <linux/config.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <asm/uaccess.h>
20#include <asm/system.h>
21#include <linux/bitops.h>
22#include <linux/types.h> 19#include <linux/types.h>
23#include <linux/kernel.h> 20#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/string.h>
26#include <linux/mm.h>
27#include <linux/socket.h>
28#include <linux/sockios.h>
29#include <linux/in.h>
30#include <linux/errno.h>
31#include <linux/interrupt.h>
32#include <linux/if_ether.h>
33#include <linux/inet.h>
34#include <linux/netdevice.h> 21#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/notifier.h>
37#include <net/ip.h>
38#include <net/route.h>
39#include <linux/skbuff.h> 22#include <linux/skbuff.h>
40#include <net/sock.h>
41#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
42#include <net/inet_ecn.h> 24#include <net/inet_ecn.h>
43#include <net/dsfield.h>
44#include <net/red.h> 25#include <net/red.h>
45 26
46 27
@@ -70,8 +51,7 @@ static inline int red_use_ecn(struct red_sched_data *q)
70 return q->flags & TC_RED_ECN; 51 return q->flags & TC_RED_ECN;
71} 52}
72 53
73static int 54static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
74red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
75{ 55{
76 struct red_sched_data *q = qdisc_priv(sch); 56 struct red_sched_data *q = qdisc_priv(sch);
77 57
@@ -116,8 +96,7 @@ congestion_drop:
116 return NET_XMIT_CN; 96 return NET_XMIT_CN;
117} 97}
118 98
119static int 99static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
120red_requeue(struct sk_buff *skb, struct Qdisc* sch)
121{ 100{
122 struct red_sched_data *q = qdisc_priv(sch); 101 struct red_sched_data *q = qdisc_priv(sch);
123 102
@@ -127,8 +106,7 @@ red_requeue(struct sk_buff *skb, struct Qdisc* sch)
127 return qdisc_requeue(skb, sch); 106 return qdisc_requeue(skb, sch);
128} 107}
129 108
130static struct sk_buff * 109static struct sk_buff * red_dequeue(struct Qdisc* sch)
131red_dequeue(struct Qdisc* sch)
132{ 110{
133 struct sk_buff *skb; 111 struct sk_buff *skb;
134 struct red_sched_data *q = qdisc_priv(sch); 112 struct red_sched_data *q = qdisc_priv(sch);
@@ -171,14 +149,16 @@ static void red_reset(struct Qdisc* sch)
171static int red_change(struct Qdisc *sch, struct rtattr *opt) 149static int red_change(struct Qdisc *sch, struct rtattr *opt)
172{ 150{
173 struct red_sched_data *q = qdisc_priv(sch); 151 struct red_sched_data *q = qdisc_priv(sch);
174 struct rtattr *tb[TCA_RED_STAB]; 152 struct rtattr *tb[TCA_RED_MAX];
175 struct tc_red_qopt *ctl; 153 struct tc_red_qopt *ctl;
176 154
177 if (opt == NULL || 155 if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt))
178 rtattr_parse_nested(tb, TCA_RED_STAB, opt) || 156 return -EINVAL;
179 tb[TCA_RED_PARMS-1] == 0 || tb[TCA_RED_STAB-1] == 0 || 157
158 if (tb[TCA_RED_PARMS-1] == NULL ||
180 RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) || 159 RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
181 RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < 256) 160 tb[TCA_RED_STAB-1] == NULL ||
161 RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE)
182 return -EINVAL; 162 return -EINVAL;
183 163
184 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]); 164 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
@@ -193,6 +173,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt)
193 173
194 if (skb_queue_empty(&sch->q)) 174 if (skb_queue_empty(&sch->q))
195 red_end_of_idle_period(&q->parms); 175 red_end_of_idle_period(&q->parms);
176
196 sch_tree_unlock(sch); 177 sch_tree_unlock(sch);
197 return 0; 178 return 0;
198} 179}
@@ -205,8 +186,7 @@ static int red_init(struct Qdisc* sch, struct rtattr *opt)
205static int red_dump(struct Qdisc *sch, struct sk_buff *skb) 186static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
206{ 187{
207 struct red_sched_data *q = qdisc_priv(sch); 188 struct red_sched_data *q = qdisc_priv(sch);
208 unsigned char *b = skb->tail; 189 struct rtattr *opts = NULL;
209 struct rtattr *rta;
210 struct tc_red_qopt opt = { 190 struct tc_red_qopt opt = {
211 .limit = q->limit, 191 .limit = q->limit,
212 .flags = q->flags, 192 .flags = q->flags,
@@ -217,16 +197,12 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
217 .Scell_log = q->parms.Scell_log, 197 .Scell_log = q->parms.Scell_log,
218 }; 198 };
219 199
220 rta = (struct rtattr*)b; 200 opts = RTA_NEST(skb, TCA_OPTIONS);
221 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
222 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); 201 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
223 rta->rta_len = skb->tail - b; 202 return RTA_NEST_END(skb, opts);
224
225 return skb->len;
226 203
227rtattr_failure: 204rtattr_failure:
228 skb_trim(skb, b - skb->data); 205 return RTA_NEST_CANCEL(skb, opts);
229 return -1;
230} 206}
231 207
232static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 208static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
@@ -243,8 +219,6 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
243} 219}
244 220
245static struct Qdisc_ops red_qdisc_ops = { 221static struct Qdisc_ops red_qdisc_ops = {
246 .next = NULL,
247 .cl_ops = NULL,
248 .id = "red", 222 .id = "red",
249 .priv_size = sizeof(struct red_sched_data), 223 .priv_size = sizeof(struct red_sched_data),
250 .enqueue = red_enqueue, 224 .enqueue = red_enqueue,
@@ -263,10 +237,13 @@ static int __init red_module_init(void)
263{ 237{
264 return register_qdisc(&red_qdisc_ops); 238 return register_qdisc(&red_qdisc_ops);
265} 239}
266static void __exit red_module_exit(void) 240
241static void __exit red_module_exit(void)
267{ 242{
268 unregister_qdisc(&red_qdisc_ops); 243 unregister_qdisc(&red_qdisc_ops);
269} 244}
245
270module_init(red_module_init) 246module_init(red_module_init)
271module_exit(red_module_exit) 247module_exit(red_module_exit)
248
272MODULE_LICENSE("GPL"); 249MODULE_LICENSE("GPL");