aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorPaolo Valente <paolo.valente@unimore.it>2012-08-07 03:27:25 -0400
committerDavid S. Miller <davem@davemloft.net>2012-08-08 19:02:05 -0400
commitbe72f63b4cd5d6778cf7eb0d0a86f18f96b51c0c (patch)
treeda43c1f094372eb1e9d1cb4d7dfa70e0ea9ba2bd /net/sched
parenta37e6e344910a43b9ebc2bbf29a029f5ea942598 (diff)
sched: add missing group change to qfq_change_class
[Resending again, as the text was corrupted by the email client] To speed up operations, QFQ internally divides classes into groups. Which group a class belongs to depends on the ratio between the maximum packet length and the weight of the class. Unfortunately the function qfq_change_class lacks the steps for changing the group of a class when the ratio max_pkt_len/weight of the class changes. For example, when the last of the following three commands is executed, the group of class 1:1 is not correctly changed: tc disc add dev XXX root handle 1: qfq tc class add dev XXX parent 1: qfq classid 1:1 weight 1 tc class change dev XXX parent 1: classid 1:1 qfq weight 4 Not changing the group of a class does not affect the long-term bandwidth guaranteed to the class, as the latter is independent of the maximum packet length, and correctly changes (only) if the weight of the class changes. In contrast, if the group of the class is not updated, the class is still guaranteed the short-term bandwidth and packet delay related to its old group, instead of the guarantees that it should receive according to its new weight and/or maximum packet length. This may also break service guarantees for other classes. This patch adds the missing operations. Signed-off-by: Paolo Valente <paolo.valente@unimore.it> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_qfq.c95
1 files changed, 69 insertions, 26 deletions
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9af01f3df18c..e4723d31fdd5 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -203,6 +203,34 @@ out:
203 return index; 203 return index;
204} 204}
205 205
206/* Length of the next packet (0 if the queue is empty). */
207static unsigned int qdisc_peek_len(struct Qdisc *sch)
208{
209 struct sk_buff *skb;
210
211 skb = sch->ops->peek(sch);
212 return skb ? qdisc_pkt_len(skb) : 0;
213}
214
215static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
216static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
217 unsigned int len);
218
219static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
220 u32 lmax, u32 inv_w, int delta_w)
221{
222 int i;
223
224 /* update qfq-specific data */
225 cl->lmax = lmax;
226 cl->inv_w = inv_w;
227 i = qfq_calc_index(cl->inv_w, cl->lmax);
228
229 cl->grp = &q->groups[i];
230
231 q->wsum += delta_w;
232}
233
206static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 234static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
207 struct nlattr **tca, unsigned long *arg) 235 struct nlattr **tca, unsigned long *arg)
208{ 236{
@@ -250,6 +278,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
250 lmax = 1UL << QFQ_MTU_SHIFT; 278 lmax = 1UL << QFQ_MTU_SHIFT;
251 279
252 if (cl != NULL) { 280 if (cl != NULL) {
281 bool need_reactivation = false;
282
253 if (tca[TCA_RATE]) { 283 if (tca[TCA_RATE]) {
254 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 284 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
255 qdisc_root_sleeping_lock(sch), 285 qdisc_root_sleeping_lock(sch),
@@ -258,12 +288,29 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
258 return err; 288 return err;
259 } 289 }
260 290
261 if (inv_w != cl->inv_w) { 291 if (lmax == cl->lmax && inv_w == cl->inv_w)
262 sch_tree_lock(sch); 292 return 0; /* nothing to update */
263 q->wsum += delta_w; 293
264 cl->inv_w = inv_w; 294 i = qfq_calc_index(inv_w, lmax);
265 sch_tree_unlock(sch); 295 sch_tree_lock(sch);
296 if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
297 /*
298 * shift cl->F back, to not charge the
299 * class for the not-yet-served head
300 * packet
301 */
302 cl->F = cl->S;
303 /* remove class from its slot in the old group */
304 qfq_deactivate_class(q, cl);
305 need_reactivation = true;
266 } 306 }
307
308 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
309
310 if (need_reactivation) /* activate in new group */
311 qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
312 sch_tree_unlock(sch);
313
267 return 0; 314 return 0;
268 } 315 }
269 316
@@ -273,11 +320,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
273 320
274 cl->refcnt = 1; 321 cl->refcnt = 1;
275 cl->common.classid = classid; 322 cl->common.classid = classid;
276 cl->lmax = lmax;
277 cl->inv_w = inv_w;
278 i = qfq_calc_index(cl->inv_w, cl->lmax);
279 323
280 cl->grp = &q->groups[i]; 324 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
281 325
282 cl->qdisc = qdisc_create_dflt(sch->dev_queue, 326 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
283 &pfifo_qdisc_ops, classid); 327 &pfifo_qdisc_ops, classid);
@@ -294,7 +338,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
294 return err; 338 return err;
295 } 339 }
296 } 340 }
297 q->wsum += weight;
298 341
299 sch_tree_lock(sch); 342 sch_tree_lock(sch);
300 qdisc_class_hash_insert(&q->clhash, &cl->common); 343 qdisc_class_hash_insert(&q->clhash, &cl->common);
@@ -711,15 +754,6 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
711 } 754 }
712} 755}
713 756
714/* What is length of next packet in queue (0 if queue is empty) */
715static unsigned int qdisc_peek_len(struct Qdisc *sch)
716{
717 struct sk_buff *skb;
718
719 skb = sch->ops->peek(sch);
720 return skb ? qdisc_pkt_len(skb) : 0;
721}
722
723/* 757/*
724 * Updates the class, returns true if also the group needs to be updated. 758 * Updates the class, returns true if also the group needs to be updated.
725 */ 759 */
@@ -843,11 +877,8 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
843static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 877static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
844{ 878{
845 struct qfq_sched *q = qdisc_priv(sch); 879 struct qfq_sched *q = qdisc_priv(sch);
846 struct qfq_group *grp;
847 struct qfq_class *cl; 880 struct qfq_class *cl;
848 int err; 881 int err;
849 u64 roundedS;
850 int s;
851 882
852 cl = qfq_classify(skb, sch, &err); 883 cl = qfq_classify(skb, sch, &err);
853 if (cl == NULL) { 884 if (cl == NULL) {
@@ -876,11 +907,25 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
876 return err; 907 return err;
877 908
878 /* If reach this point, queue q was idle */ 909 /* If reach this point, queue q was idle */
879 grp = cl->grp; 910 qfq_activate_class(q, cl, qdisc_pkt_len(skb));
911
912 return err;
913}
914
915/*
916 * Handle class switch from idle to backlogged.
917 */
918static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
919 unsigned int pkt_len)
920{
921 struct qfq_group *grp = cl->grp;
922 u64 roundedS;
923 int s;
924
880 qfq_update_start(q, cl); 925 qfq_update_start(q, cl);
881 926
882 /* compute new finish time and rounded start. */ 927 /* compute new finish time and rounded start. */
883 cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w; 928 cl->F = cl->S + (u64)pkt_len * cl->inv_w;
884 roundedS = qfq_round_down(cl->S, grp->slot_shift); 929 roundedS = qfq_round_down(cl->S, grp->slot_shift);
885 930
886 /* 931 /*
@@ -917,8 +962,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
917 962
918skip_update: 963skip_update:
919 qfq_slot_insert(grp, cl, roundedS); 964 qfq_slot_insert(grp, cl, roundedS);
920
921 return err;
922} 965}
923 966
924 967