aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-08-22 17:21:38 -0400
committerDavid S. Miller <davem@davemloft.net>2012-08-22 17:21:38 -0400
commit1304a7343b30fc4f16045412efdbb4179a3d9255 (patch)
tree83d667ac4f62e30f70305ce4cc7e030e3465f92e /net/sched
parent1d76efe1577b4323609b1bcbfafa8b731eda071a (diff)
parent23dcfa61bac244e1200ff9ad19c6e9144dcb6bb5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/act_mirred.c11
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/act_simple.c5
-rw-r--r--net/sched/sch_qfq.c95
5 files changed, 88 insertions, 35 deletions
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60e281ad0f07..58fb3c7aab9e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -185,7 +185,12 @@ err3:
185err2: 185err2:
186 kfree(tname); 186 kfree(tname);
187err1: 187err1:
188 kfree(pc); 188 if (ret == ACT_P_CREATED) {
189 if (est)
190 gen_kill_estimator(&pc->tcfc_bstats,
191 &pc->tcfc_rate_est);
192 kfree_rcu(pc, tcfc_rcu);
193 }
189 return err; 194 return err;
190} 195}
191 196
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index fe81cc18e9e0..9c0fd0c78814 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -200,13 +200,12 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
200out: 200out:
201 if (err) { 201 if (err) {
202 m->tcf_qstats.overlimits++; 202 m->tcf_qstats.overlimits++;
203 /* should we be asking for packet to be dropped? 203 if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
204 * may make sense for redirect case only 204 retval = TC_ACT_SHOT;
205 */ 205 else
206 retval = TC_ACT_SHOT; 206 retval = m->tcf_action;
207 } else { 207 } else
208 retval = m->tcf_action; 208 retval = m->tcf_action;
209 }
210 spin_unlock(&m->tcf_lock); 209 spin_unlock(&m->tcf_lock);
211 210
212 return retval; 211 return retval;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 26aa2f6ce257..45c53ab067a6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -74,7 +74,10 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
74 p = to_pedit(pc); 74 p = to_pedit(pc);
75 keys = kmalloc(ksize, GFP_KERNEL); 75 keys = kmalloc(ksize, GFP_KERNEL);
76 if (keys == NULL) { 76 if (keys == NULL) {
77 kfree(pc); 77 if (est)
78 gen_kill_estimator(&pc->tcfc_bstats,
79 &pc->tcfc_rate_est);
80 kfree_rcu(pc, tcfc_rcu);
78 return -ENOMEM; 81 return -ENOMEM;
79 } 82 }
80 ret = ACT_P_CREATED; 83 ret = ACT_P_CREATED;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 3922f2a2821b..3714f60f0b3c 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -131,7 +131,10 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
131 d = to_defact(pc); 131 d = to_defact(pc);
132 ret = alloc_defdata(d, defdata); 132 ret = alloc_defdata(d, defdata);
133 if (ret < 0) { 133 if (ret < 0) {
134 kfree(pc); 134 if (est)
135 gen_kill_estimator(&pc->tcfc_bstats,
136 &pc->tcfc_rate_est);
137 kfree_rcu(pc, tcfc_rcu);
135 return ret; 138 return ret;
136 } 139 }
137 d->tcf_action = parm->action; 140 d->tcf_action = parm->action;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9af01f3df18c..e4723d31fdd5 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -203,6 +203,34 @@ out:
203 return index; 203 return index;
204} 204}
205 205
206/* Length of the next packet (0 if the queue is empty). */
207static unsigned int qdisc_peek_len(struct Qdisc *sch)
208{
209 struct sk_buff *skb;
210
211 skb = sch->ops->peek(sch);
212 return skb ? qdisc_pkt_len(skb) : 0;
213}
214
215static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
216static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
217 unsigned int len);
218
219static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
220 u32 lmax, u32 inv_w, int delta_w)
221{
222 int i;
223
224 /* update qfq-specific data */
225 cl->lmax = lmax;
226 cl->inv_w = inv_w;
227 i = qfq_calc_index(cl->inv_w, cl->lmax);
228
229 cl->grp = &q->groups[i];
230
231 q->wsum += delta_w;
232}
233
206static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 234static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
207 struct nlattr **tca, unsigned long *arg) 235 struct nlattr **tca, unsigned long *arg)
208{ 236{
@@ -250,6 +278,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
250 lmax = 1UL << QFQ_MTU_SHIFT; 278 lmax = 1UL << QFQ_MTU_SHIFT;
251 279
252 if (cl != NULL) { 280 if (cl != NULL) {
281 bool need_reactivation = false;
282
253 if (tca[TCA_RATE]) { 283 if (tca[TCA_RATE]) {
254 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 284 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
255 qdisc_root_sleeping_lock(sch), 285 qdisc_root_sleeping_lock(sch),
@@ -258,12 +288,29 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
258 return err; 288 return err;
259 } 289 }
260 290
261 if (inv_w != cl->inv_w) { 291 if (lmax == cl->lmax && inv_w == cl->inv_w)
262 sch_tree_lock(sch); 292 return 0; /* nothing to update */
263 q->wsum += delta_w; 293
264 cl->inv_w = inv_w; 294 i = qfq_calc_index(inv_w, lmax);
265 sch_tree_unlock(sch); 295 sch_tree_lock(sch);
296 if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
297 /*
298 * shift cl->F back, to not charge the
299 * class for the not-yet-served head
300 * packet
301 */
302 cl->F = cl->S;
303 /* remove class from its slot in the old group */
304 qfq_deactivate_class(q, cl);
305 need_reactivation = true;
266 } 306 }
307
308 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
309
310 if (need_reactivation) /* activate in new group */
311 qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
312 sch_tree_unlock(sch);
313
267 return 0; 314 return 0;
268 } 315 }
269 316
@@ -273,11 +320,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
273 320
274 cl->refcnt = 1; 321 cl->refcnt = 1;
275 cl->common.classid = classid; 322 cl->common.classid = classid;
276 cl->lmax = lmax;
277 cl->inv_w = inv_w;
278 i = qfq_calc_index(cl->inv_w, cl->lmax);
279 323
280 cl->grp = &q->groups[i]; 324 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
281 325
282 cl->qdisc = qdisc_create_dflt(sch->dev_queue, 326 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
283 &pfifo_qdisc_ops, classid); 327 &pfifo_qdisc_ops, classid);
@@ -294,7 +338,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
294 return err; 338 return err;
295 } 339 }
296 } 340 }
297 q->wsum += weight;
298 341
299 sch_tree_lock(sch); 342 sch_tree_lock(sch);
300 qdisc_class_hash_insert(&q->clhash, &cl->common); 343 qdisc_class_hash_insert(&q->clhash, &cl->common);
@@ -711,15 +754,6 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
711 } 754 }
712} 755}
713 756
714/* What is length of next packet in queue (0 if queue is empty) */
715static unsigned int qdisc_peek_len(struct Qdisc *sch)
716{
717 struct sk_buff *skb;
718
719 skb = sch->ops->peek(sch);
720 return skb ? qdisc_pkt_len(skb) : 0;
721}
722
723/* 757/*
724 * Updates the class, returns true if also the group needs to be updated. 758 * Updates the class, returns true if also the group needs to be updated.
725 */ 759 */
@@ -843,11 +877,8 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
843static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 877static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
844{ 878{
845 struct qfq_sched *q = qdisc_priv(sch); 879 struct qfq_sched *q = qdisc_priv(sch);
846 struct qfq_group *grp;
847 struct qfq_class *cl; 880 struct qfq_class *cl;
848 int err; 881 int err;
849 u64 roundedS;
850 int s;
851 882
852 cl = qfq_classify(skb, sch, &err); 883 cl = qfq_classify(skb, sch, &err);
853 if (cl == NULL) { 884 if (cl == NULL) {
@@ -876,11 +907,25 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
876 return err; 907 return err;
877 908
878 /* If reach this point, queue q was idle */ 909 /* If reach this point, queue q was idle */
879 grp = cl->grp; 910 qfq_activate_class(q, cl, qdisc_pkt_len(skb));
911
912 return err;
913}
914
915/*
916 * Handle class switch from idle to backlogged.
917 */
918static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
919 unsigned int pkt_len)
920{
921 struct qfq_group *grp = cl->grp;
922 u64 roundedS;
923 int s;
924
880 qfq_update_start(q, cl); 925 qfq_update_start(q, cl);
881 926
882 /* compute new finish time and rounded start. */ 927 /* compute new finish time and rounded start. */
883 cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w; 928 cl->F = cl->S + (u64)pkt_len * cl->inv_w;
884 roundedS = qfq_round_down(cl->S, grp->slot_shift); 929 roundedS = qfq_round_down(cl->S, grp->slot_shift);
885 930
886 /* 931 /*
@@ -917,8 +962,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
917 962
918skip_update: 963skip_update:
919 qfq_slot_insert(grp, cl, roundedS); 964 qfq_slot_insert(grp, cl, roundedS);
920
921 return err;
922} 965}
923 966
924 967