diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-01-19 14:26:56 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-20 02:31:12 -0500 |
commit | cc7ec456f82da7f89a5b376e613b3ac4311b3e9a (patch) | |
tree | 534729db08c10f40c090261cdc191dd2303dfc5c /net/sched/sch_red.c | |
parent | 7180a03118cac7256fb04f929fe34d0aeee92c40 (diff) |
net_sched: cleanups
Cleanup net/sched code to current CodingStyle and practices.
Reduce inline abuse
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_red.c')
-rw-r--r-- | net/sched/sch_red.c | 61 |
1 files changed, 30 insertions, 31 deletions
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a6009c5a2c97..689157555fa4 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -36,8 +36,7 @@ | |||
36 | if RED works correctly. | 36 | if RED works correctly. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | struct red_sched_data | 39 | struct red_sched_data { |
40 | { | ||
41 | u32 limit; /* HARD maximal queue length */ | 40 | u32 limit; /* HARD maximal queue length */ |
42 | unsigned char flags; | 41 | unsigned char flags; |
43 | struct red_parms parms; | 42 | struct red_parms parms; |
@@ -55,7 +54,7 @@ static inline int red_use_harddrop(struct red_sched_data *q) | |||
55 | return q->flags & TC_RED_HARDDROP; | 54 | return q->flags & TC_RED_HARDDROP; |
56 | } | 55 | } |
57 | 56 | ||
58 | static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 57 | static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
59 | { | 58 | { |
60 | struct red_sched_data *q = qdisc_priv(sch); | 59 | struct red_sched_data *q = qdisc_priv(sch); |
61 | struct Qdisc *child = q->qdisc; | 60 | struct Qdisc *child = q->qdisc; |
@@ -67,29 +66,29 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
67 | red_end_of_idle_period(&q->parms); | 66 | red_end_of_idle_period(&q->parms); |
68 | 67 | ||
69 | switch (red_action(&q->parms, q->parms.qavg)) { | 68 | switch (red_action(&q->parms, q->parms.qavg)) { |
70 | case RED_DONT_MARK: | 69 | case RED_DONT_MARK: |
71 | break; | 70 | break; |
72 | 71 | ||
73 | case RED_PROB_MARK: | 72 | case RED_PROB_MARK: |
74 | sch->qstats.overlimits++; | 73 | sch->qstats.overlimits++; |
75 | if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { | 74 | if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { |
76 | q->stats.prob_drop++; | 75 | q->stats.prob_drop++; |
77 | goto congestion_drop; | 76 | goto congestion_drop; |
78 | } | 77 | } |
79 | 78 | ||
80 | q->stats.prob_mark++; | 79 | q->stats.prob_mark++; |
81 | break; | 80 | break; |
82 | 81 | ||
83 | case RED_HARD_MARK: | 82 | case RED_HARD_MARK: |
84 | sch->qstats.overlimits++; | 83 | sch->qstats.overlimits++; |
85 | if (red_use_harddrop(q) || !red_use_ecn(q) || | 84 | if (red_use_harddrop(q) || !red_use_ecn(q) || |
86 | !INET_ECN_set_ce(skb)) { | 85 | !INET_ECN_set_ce(skb)) { |
87 | q->stats.forced_drop++; | 86 | q->stats.forced_drop++; |
88 | goto congestion_drop; | 87 | goto congestion_drop; |
89 | } | 88 | } |
90 | 89 | ||
91 | q->stats.forced_mark++; | 90 | q->stats.forced_mark++; |
92 | break; | 91 | break; |
93 | } | 92 | } |
94 | 93 | ||
95 | ret = qdisc_enqueue(skb, child); | 94 | ret = qdisc_enqueue(skb, child); |
@@ -107,7 +106,7 @@ congestion_drop: | |||
107 | return NET_XMIT_CN; | 106 | return NET_XMIT_CN; |
108 | } | 107 | } |
109 | 108 | ||
110 | static struct sk_buff * red_dequeue(struct Qdisc* sch) | 109 | static struct sk_buff *red_dequeue(struct Qdisc *sch) |
111 | { | 110 | { |
112 | struct sk_buff *skb; | 111 | struct sk_buff *skb; |
113 | struct red_sched_data *q = qdisc_priv(sch); | 112 | struct red_sched_data *q = qdisc_priv(sch); |
@@ -122,7 +121,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch) | |||
122 | return skb; | 121 | return skb; |
123 | } | 122 | } |
124 | 123 | ||
125 | static struct sk_buff * red_peek(struct Qdisc* sch) | 124 | static struct sk_buff *red_peek(struct Qdisc *sch) |
126 | { | 125 | { |
127 | struct red_sched_data *q = qdisc_priv(sch); | 126 | struct red_sched_data *q = qdisc_priv(sch); |
128 | struct Qdisc *child = q->qdisc; | 127 | struct Qdisc *child = q->qdisc; |
@@ -130,7 +129,7 @@ static struct sk_buff * red_peek(struct Qdisc* sch) | |||
130 | return child->ops->peek(child); | 129 | return child->ops->peek(child); |
131 | } | 130 | } |
132 | 131 | ||
133 | static unsigned int red_drop(struct Qdisc* sch) | 132 | static unsigned int red_drop(struct Qdisc *sch) |
134 | { | 133 | { |
135 | struct red_sched_data *q = qdisc_priv(sch); | 134 | struct red_sched_data *q = qdisc_priv(sch); |
136 | struct Qdisc *child = q->qdisc; | 135 | struct Qdisc *child = q->qdisc; |
@@ -149,7 +148,7 @@ static unsigned int red_drop(struct Qdisc* sch) | |||
149 | return 0; | 148 | return 0; |
150 | } | 149 | } |
151 | 150 | ||
152 | static void red_reset(struct Qdisc* sch) | 151 | static void red_reset(struct Qdisc *sch) |
153 | { | 152 | { |
154 | struct red_sched_data *q = qdisc_priv(sch); | 153 | struct red_sched_data *q = qdisc_priv(sch); |
155 | 154 | ||
@@ -216,7 +215,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) | |||
216 | return 0; | 215 | return 0; |
217 | } | 216 | } |
218 | 217 | ||
219 | static int red_init(struct Qdisc* sch, struct nlattr *opt) | 218 | static int red_init(struct Qdisc *sch, struct nlattr *opt) |
220 | { | 219 | { |
221 | struct red_sched_data *q = qdisc_priv(sch); | 220 | struct red_sched_data *q = qdisc_priv(sch); |
222 | 221 | ||