aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_hfsc.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-01-19 14:26:56 -0500
committerDavid S. Miller <davem@davemloft.net>2011-01-20 02:31:12 -0500
commitcc7ec456f82da7f89a5b376e613b3ac4311b3e9a (patch)
tree534729db08c10f40c090261cdc191dd2303dfc5c /net/sched/sch_hfsc.c
parent7180a03118cac7256fb04f929fe34d0aeee92c40 (diff)
net_sched: cleanups
Cleanup net/sched code to current CodingStyle and practices. Reduce inline abuse Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_hfsc.c')
-rw-r--r--net/sched/sch_hfsc.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 2e45791d4f6c..dea4009615f9 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -81,8 +81,7 @@
81 * that are expensive on 32-bit architectures. 81 * that are expensive on 32-bit architectures.
82 */ 82 */
83 83
84struct internal_sc 84struct internal_sc {
85{
86 u64 sm1; /* scaled slope of the 1st segment */ 85 u64 sm1; /* scaled slope of the 1st segment */
87 u64 ism1; /* scaled inverse-slope of the 1st segment */ 86 u64 ism1; /* scaled inverse-slope of the 1st segment */
88 u64 dx; /* the x-projection of the 1st segment */ 87 u64 dx; /* the x-projection of the 1st segment */
@@ -92,8 +91,7 @@ struct internal_sc
92}; 91};
93 92
94/* runtime service curve */ 93/* runtime service curve */
95struct runtime_sc 94struct runtime_sc {
96{
97 u64 x; /* current starting position on x-axis */ 95 u64 x; /* current starting position on x-axis */
98 u64 y; /* current starting position on y-axis */ 96 u64 y; /* current starting position on y-axis */
99 u64 sm1; /* scaled slope of the 1st segment */ 97 u64 sm1; /* scaled slope of the 1st segment */
@@ -104,15 +102,13 @@ struct runtime_sc
104 u64 ism2; /* scaled inverse-slope of the 2nd segment */ 102 u64 ism2; /* scaled inverse-slope of the 2nd segment */
105}; 103};
106 104
107enum hfsc_class_flags 105enum hfsc_class_flags {
108{
109 HFSC_RSC = 0x1, 106 HFSC_RSC = 0x1,
110 HFSC_FSC = 0x2, 107 HFSC_FSC = 0x2,
111 HFSC_USC = 0x4 108 HFSC_USC = 0x4
112}; 109};
113 110
114struct hfsc_class 111struct hfsc_class {
115{
116 struct Qdisc_class_common cl_common; 112 struct Qdisc_class_common cl_common;
117 unsigned int refcnt; /* usage count */ 113 unsigned int refcnt; /* usage count */
118 114
@@ -140,8 +136,8 @@ struct hfsc_class
140 u64 cl_cumul; /* cumulative work in bytes done by 136 u64 cl_cumul; /* cumulative work in bytes done by
141 real-time criteria */ 137 real-time criteria */
142 138
143 u64 cl_d; /* deadline*/ 139 u64 cl_d; /* deadline*/
144 u64 cl_e; /* eligible time */ 140 u64 cl_e; /* eligible time */
145 u64 cl_vt; /* virtual time */ 141 u64 cl_vt; /* virtual time */
146 u64 cl_f; /* time when this class will fit for 142 u64 cl_f; /* time when this class will fit for
147 link-sharing, max(myf, cfmin) */ 143 link-sharing, max(myf, cfmin) */
@@ -176,8 +172,7 @@ struct hfsc_class
176 unsigned long cl_nactive; /* number of active children */ 172 unsigned long cl_nactive; /* number of active children */
177}; 173};
178 174
179struct hfsc_sched 175struct hfsc_sched {
180{
181 u16 defcls; /* default class id */ 176 u16 defcls; /* default class id */
182 struct hfsc_class root; /* root class */ 177 struct hfsc_class root; /* root class */
183 struct Qdisc_class_hash clhash; /* class hash */ 178 struct Qdisc_class_hash clhash; /* class hash */
@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len)
693 if (go_active) { 688 if (go_active) {
694 n = rb_last(&cl->cl_parent->vt_tree); 689 n = rb_last(&cl->cl_parent->vt_tree);
695 if (n != NULL) { 690 if (n != NULL) {
696 max_cl = rb_entry(n, struct hfsc_class,vt_node); 691 max_cl = rb_entry(n, struct hfsc_class, vt_node);
697 /* 692 /*
698 * set vt to the average of the min and max 693 * set vt to the average of the min and max
699 * classes. if the parent's period didn't 694 * classes. if the parent's period didn't
@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1177 return NULL; 1172 return NULL;
1178 } 1173 }
1179#endif 1174#endif
1180 if ((cl = (struct hfsc_class *)res.class) == NULL) { 1175 cl = (struct hfsc_class *)res.class;
1181 if ((cl = hfsc_find_class(res.classid, sch)) == NULL) 1176 if (!cl) {
1177 cl = hfsc_find_class(res.classid, sch);
1178 if (!cl)
1182 break; /* filter selected invalid classid */ 1179 break; /* filter selected invalid classid */
1183 if (cl->level >= head->level) 1180 if (cl->level >= head->level)
1184 break; /* filter may only point downwards */ 1181 break; /* filter may only point downwards */
@@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1316 return -1; 1313 return -1;
1317} 1314}
1318 1315
1319static inline int 1316static int
1320hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) 1317hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1321{ 1318{
1322 if ((cl->cl_flags & HFSC_RSC) && 1319 if ((cl->cl_flags & HFSC_RSC) &&
@@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
1420 struct hfsc_class *cl; 1417 struct hfsc_class *cl;
1421 u64 next_time = 0; 1418 u64 next_time = 0;
1422 1419
1423 if ((cl = eltree_get_minel(q)) != NULL) 1420 cl = eltree_get_minel(q);
1421 if (cl)
1424 next_time = cl->cl_e; 1422 next_time = cl->cl_e;
1425 if (q->root.cl_cfmin != 0) { 1423 if (q->root.cl_cfmin != 0) {
1426 if (next_time == 0 || next_time > q->root.cl_cfmin) 1424 if (next_time == 0 || next_time > q->root.cl_cfmin)
@@ -1626,7 +1624,8 @@ hfsc_dequeue(struct Qdisc *sch)
1626 * find the class with the minimum deadline among 1624 * find the class with the minimum deadline among
1627 * the eligible classes. 1625 * the eligible classes.
1628 */ 1626 */
1629 if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { 1627 cl = eltree_get_mindl(q, cur_time);
1628 if (cl) {
1630 realtime = 1; 1629 realtime = 1;
1631 } else { 1630 } else {
1632 /* 1631 /*