diff options
Diffstat (limited to 'net/sched/sch_hfsc.c')
-rw-r--r-- | net/sched/sch_hfsc.c | 51 |
1 files changed, 23 insertions, 28 deletions
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 47496098d35c..6488e6425652 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -81,8 +81,7 @@ | |||
81 | * that are expensive on 32-bit architectures. | 81 | * that are expensive on 32-bit architectures. |
82 | */ | 82 | */ |
83 | 83 | ||
84 | struct internal_sc | 84 | struct internal_sc { |
85 | { | ||
86 | u64 sm1; /* scaled slope of the 1st segment */ | 85 | u64 sm1; /* scaled slope of the 1st segment */ |
87 | u64 ism1; /* scaled inverse-slope of the 1st segment */ | 86 | u64 ism1; /* scaled inverse-slope of the 1st segment */ |
88 | u64 dx; /* the x-projection of the 1st segment */ | 87 | u64 dx; /* the x-projection of the 1st segment */ |
@@ -92,8 +91,7 @@ struct internal_sc | |||
92 | }; | 91 | }; |
93 | 92 | ||
94 | /* runtime service curve */ | 93 | /* runtime service curve */ |
95 | struct runtime_sc | 94 | struct runtime_sc { |
96 | { | ||
97 | u64 x; /* current starting position on x-axis */ | 95 | u64 x; /* current starting position on x-axis */ |
98 | u64 y; /* current starting position on y-axis */ | 96 | u64 y; /* current starting position on y-axis */ |
99 | u64 sm1; /* scaled slope of the 1st segment */ | 97 | u64 sm1; /* scaled slope of the 1st segment */ |
@@ -104,15 +102,13 @@ struct runtime_sc | |||
104 | u64 ism2; /* scaled inverse-slope of the 2nd segment */ | 102 | u64 ism2; /* scaled inverse-slope of the 2nd segment */ |
105 | }; | 103 | }; |
106 | 104 | ||
107 | enum hfsc_class_flags | 105 | enum hfsc_class_flags { |
108 | { | ||
109 | HFSC_RSC = 0x1, | 106 | HFSC_RSC = 0x1, |
110 | HFSC_FSC = 0x2, | 107 | HFSC_FSC = 0x2, |
111 | HFSC_USC = 0x4 | 108 | HFSC_USC = 0x4 |
112 | }; | 109 | }; |
113 | 110 | ||
114 | struct hfsc_class | 111 | struct hfsc_class { |
115 | { | ||
116 | struct Qdisc_class_common cl_common; | 112 | struct Qdisc_class_common cl_common; |
117 | unsigned int refcnt; /* usage count */ | 113 | unsigned int refcnt; /* usage count */ |
118 | 114 | ||
@@ -140,8 +136,8 @@ struct hfsc_class | |||
140 | u64 cl_cumul; /* cumulative work in bytes done by | 136 | u64 cl_cumul; /* cumulative work in bytes done by |
141 | real-time criteria */ | 137 | real-time criteria */ |
142 | 138 | ||
143 | u64 cl_d; /* deadline*/ | 139 | u64 cl_d; /* deadline*/ |
144 | u64 cl_e; /* eligible time */ | 140 | u64 cl_e; /* eligible time */ |
145 | u64 cl_vt; /* virtual time */ | 141 | u64 cl_vt; /* virtual time */ |
146 | u64 cl_f; /* time when this class will fit for | 142 | u64 cl_f; /* time when this class will fit for |
147 | link-sharing, max(myf, cfmin) */ | 143 | link-sharing, max(myf, cfmin) */ |
@@ -176,8 +172,7 @@ struct hfsc_class | |||
176 | unsigned long cl_nactive; /* number of active children */ | 172 | unsigned long cl_nactive; /* number of active children */ |
177 | }; | 173 | }; |
178 | 174 | ||
179 | struct hfsc_sched | 175 | struct hfsc_sched { |
180 | { | ||
181 | u16 defcls; /* default class id */ | 176 | u16 defcls; /* default class id */ |
182 | struct hfsc_class root; /* root class */ | 177 | struct hfsc_class root; /* root class */ |
183 | struct Qdisc_class_hash clhash; /* class hash */ | 178 | struct Qdisc_class_hash clhash; /* class hash */ |
@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len) | |||
693 | if (go_active) { | 688 | if (go_active) { |
694 | n = rb_last(&cl->cl_parent->vt_tree); | 689 | n = rb_last(&cl->cl_parent->vt_tree); |
695 | if (n != NULL) { | 690 | if (n != NULL) { |
696 | max_cl = rb_entry(n, struct hfsc_class,vt_node); | 691 | max_cl = rb_entry(n, struct hfsc_class, vt_node); |
697 | /* | 692 | /* |
698 | * set vt to the average of the min and max | 693 | * set vt to the average of the min and max |
699 | * classes. if the parent's period didn't | 694 | * classes. if the parent's period didn't |
@@ -1088,7 +1083,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1088 | cl->refcnt = 1; | 1083 | cl->refcnt = 1; |
1089 | cl->sched = q; | 1084 | cl->sched = q; |
1090 | cl->cl_parent = parent; | 1085 | cl->cl_parent = parent; |
1091 | cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 1086 | cl->qdisc = qdisc_create_dflt(sch->dev_queue, |
1092 | &pfifo_qdisc_ops, classid); | 1087 | &pfifo_qdisc_ops, classid); |
1093 | if (cl->qdisc == NULL) | 1088 | if (cl->qdisc == NULL) |
1094 | cl->qdisc = &noop_qdisc; | 1089 | cl->qdisc = &noop_qdisc; |
@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
1177 | return NULL; | 1172 | return NULL; |
1178 | } | 1173 | } |
1179 | #endif | 1174 | #endif |
1180 | if ((cl = (struct hfsc_class *)res.class) == NULL) { | 1175 | cl = (struct hfsc_class *)res.class; |
1181 | if ((cl = hfsc_find_class(res.classid, sch)) == NULL) | 1176 | if (!cl) { |
1177 | cl = hfsc_find_class(res.classid, sch); | ||
1178 | if (!cl) | ||
1182 | break; /* filter selected invalid classid */ | 1179 | break; /* filter selected invalid classid */ |
1183 | if (cl->level >= head->level) | 1180 | if (cl->level >= head->level) |
1184 | break; /* filter may only point downwards */ | 1181 | break; /* filter may only point downwards */ |
@@ -1209,8 +1206,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1209 | if (cl->level > 0) | 1206 | if (cl->level > 0) |
1210 | return -EINVAL; | 1207 | return -EINVAL; |
1211 | if (new == NULL) { | 1208 | if (new == NULL) { |
1212 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 1209 | new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, |
1213 | &pfifo_qdisc_ops, | ||
1214 | cl->cl_common.classid); | 1210 | cl->cl_common.classid); |
1215 | if (new == NULL) | 1211 | if (new == NULL) |
1216 | new = &noop_qdisc; | 1212 | new = &noop_qdisc; |
@@ -1317,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) | |||
1317 | return -1; | 1313 | return -1; |
1318 | } | 1314 | } |
1319 | 1315 | ||
1320 | static inline int | 1316 | static int |
1321 | hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) | 1317 | hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) |
1322 | { | 1318 | { |
1323 | if ((cl->cl_flags & HFSC_RSC) && | 1319 | if ((cl->cl_flags & HFSC_RSC) && |
@@ -1421,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch) | |||
1421 | struct hfsc_class *cl; | 1417 | struct hfsc_class *cl; |
1422 | u64 next_time = 0; | 1418 | u64 next_time = 0; |
1423 | 1419 | ||
1424 | if ((cl = eltree_get_minel(q)) != NULL) | 1420 | cl = eltree_get_minel(q); |
1421 | if (cl) | ||
1425 | next_time = cl->cl_e; | 1422 | next_time = cl->cl_e; |
1426 | if (q->root.cl_cfmin != 0) { | 1423 | if (q->root.cl_cfmin != 0) { |
1427 | if (next_time == 0 || next_time > q->root.cl_cfmin) | 1424 | if (next_time == 0 || next_time > q->root.cl_cfmin) |
@@ -1452,8 +1449,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | |||
1452 | q->root.cl_common.classid = sch->handle; | 1449 | q->root.cl_common.classid = sch->handle; |
1453 | q->root.refcnt = 1; | 1450 | q->root.refcnt = 1; |
1454 | q->root.sched = q; | 1451 | q->root.sched = q; |
1455 | q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 1452 | q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, |
1456 | &pfifo_qdisc_ops, | ||
1457 | sch->handle); | 1453 | sch->handle); |
1458 | if (q->root.qdisc == NULL) | 1454 | if (q->root.qdisc == NULL) |
1459 | q->root.qdisc = &noop_qdisc; | 1455 | q->root.qdisc = &noop_qdisc; |
@@ -1601,10 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1601 | if (cl->qdisc->q.qlen == 1) | 1597 | if (cl->qdisc->q.qlen == 1) |
1602 | set_active(cl, qdisc_pkt_len(skb)); | 1598 | set_active(cl, qdisc_pkt_len(skb)); |
1603 | 1599 | ||
1604 | cl->bstats.packets++; | 1600 | bstats_update(&cl->bstats, skb); |
1605 | cl->bstats.bytes += qdisc_pkt_len(skb); | ||
1606 | sch->bstats.packets++; | ||
1607 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
1608 | sch->q.qlen++; | 1601 | sch->q.qlen++; |
1609 | 1602 | ||
1610 | return NET_XMIT_SUCCESS; | 1603 | return NET_XMIT_SUCCESS; |
@@ -1630,7 +1623,8 @@ hfsc_dequeue(struct Qdisc *sch) | |||
1630 | * find the class with the minimum deadline among | 1623 | * find the class with the minimum deadline among |
1631 | * the eligible classes. | 1624 | * the eligible classes. |
1632 | */ | 1625 | */ |
1633 | if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { | 1626 | cl = eltree_get_mindl(q, cur_time); |
1627 | if (cl) { | ||
1634 | realtime = 1; | 1628 | realtime = 1; |
1635 | } else { | 1629 | } else { |
1636 | /* | 1630 | /* |
@@ -1669,7 +1663,8 @@ hfsc_dequeue(struct Qdisc *sch) | |||
1669 | set_passive(cl); | 1663 | set_passive(cl); |
1670 | } | 1664 | } |
1671 | 1665 | ||
1672 | sch->flags &= ~TCQ_F_THROTTLED; | 1666 | qdisc_unthrottled(sch); |
1667 | qdisc_bstats_update(sch, skb); | ||
1673 | sch->q.qlen--; | 1668 | sch->q.qlen--; |
1674 | 1669 | ||
1675 | return skb; | 1670 | return skb; |