aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-12-18 16:42:06 -0500
committerDavid S. Miller <davem@davemloft.net>2013-12-18 16:42:06 -0500
commit143c9054949436cb05e468439dc5e46231f33d09 (patch)
treec2e972d8188fb1b36368e9acb5b6b59466c9d903 /net/sched
parent0b6807034791160d5e584138943d2daea765436d (diff)
parent35eecf052250f663f07a4cded7d3503fd1b50729 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/intel/i40e/i40e_main.c drivers/net/macvtap.c Both minor merge hassles, simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_htb.c20
-rw-r--r--net/sched/sch_tbf.c117
2 files changed, 84 insertions, 53 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 830c64f25539..6b0e854b0115 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1471,11 +1471,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1471 sch_tree_lock(sch); 1471 sch_tree_lock(sch);
1472 } 1472 }
1473 1473
1474 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1475
1476 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1477
1478 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1479 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1480
1474 /* it used to be a nasty bug here, we have to check that node 1481 /* it used to be a nasty bug here, we have to check that node
1475 * is really leaf before changing cl->un.leaf ! 1482 * is really leaf before changing cl->un.leaf !
1476 */ 1483 */
1477 if (!cl->level) { 1484 if (!cl->level) {
1478 cl->quantum = hopt->rate.rate / q->rate2quantum; 1485 u64 quantum = cl->rate.rate_bytes_ps;
1486
1487 do_div(quantum, q->rate2quantum);
1488 cl->quantum = min_t(u64, quantum, INT_MAX);
1489
1479 if (!hopt->quantum && cl->quantum < 1000) { 1490 if (!hopt->quantum && cl->quantum < 1000) {
1480 pr_warning( 1491 pr_warning(
1481 "HTB: quantum of class %X is small. Consider r2q change.\n", 1492 "HTB: quantum of class %X is small. Consider r2q change.\n",
@@ -1494,13 +1505,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1494 cl->prio = TC_HTB_NUMPRIO - 1; 1505 cl->prio = TC_HTB_NUMPRIO - 1;
1495 } 1506 }
1496 1507
1497 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1498
1499 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1500
1501 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1502 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1503
1504 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); 1508 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1505 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); 1509 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1506 1510
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index a6090051c5db..887e672f9d7d 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -118,6 +118,32 @@ struct tbf_sched_data {
118}; 118};
119 119
120 120
121/* Time to Length, convert time in ns to length in bytes
122 * to determinate how many bytes can be sent in given time.
123 */
124static u64 psched_ns_t2l(const struct psched_ratecfg *r,
125 u64 time_in_ns)
126{
127 /* The formula is :
128 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
129 */
130 u64 len = time_in_ns * r->rate_bytes_ps;
131
132 do_div(len, NSEC_PER_SEC);
133
134 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
135 do_div(len, 53);
136 len = len * 48;
137 }
138
139 if (len > r->overhead)
140 len -= r->overhead;
141 else
142 len = 0;
143
144 return len;
145}
146
121/* 147/*
122 * Return length of individual segments of a gso packet, 148 * Return length of individual segments of a gso packet,
123 * including all headers (MAC, IP, TCP/UDP) 149 * including all headers (MAC, IP, TCP/UDP)
@@ -289,10 +315,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
289 struct tbf_sched_data *q = qdisc_priv(sch); 315 struct tbf_sched_data *q = qdisc_priv(sch);
290 struct nlattr *tb[TCA_TBF_MAX + 1]; 316 struct nlattr *tb[TCA_TBF_MAX + 1];
291 struct tc_tbf_qopt *qopt; 317 struct tc_tbf_qopt *qopt;
292 struct qdisc_rate_table *rtab = NULL;
293 struct qdisc_rate_table *ptab = NULL;
294 struct Qdisc *child = NULL; 318 struct Qdisc *child = NULL;
295 int max_size, n; 319 struct psched_ratecfg rate;
320 struct psched_ratecfg peak;
321 u64 max_size;
322 s64 buffer, mtu;
296 u64 rate64 = 0, prate64 = 0; 323 u64 rate64 = 0, prate64 = 0;
297 324
298 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); 325 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
@@ -304,38 +331,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
304 goto done; 331 goto done;
305 332
306 qopt = nla_data(tb[TCA_TBF_PARMS]); 333 qopt = nla_data(tb[TCA_TBF_PARMS]);
307 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); 334 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
308 if (rtab == NULL) 335 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
309 goto done; 336 tb[TCA_TBF_RTAB]));
310
311 if (qopt->peakrate.rate) {
312 if (qopt->peakrate.rate > qopt->rate.rate)
313 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
314 if (ptab == NULL)
315 goto done;
316 }
317
318 for (n = 0; n < 256; n++)
319 if (rtab->data[n] > qopt->buffer)
320 break;
321 max_size = (n << qopt->rate.cell_log) - 1;
322 if (ptab) {
323 int size;
324
325 for (n = 0; n < 256; n++)
326 if (ptab->data[n] > qopt->mtu)
327 break;
328 size = (n << qopt->peakrate.cell_log) - 1;
329 if (size < max_size)
330 max_size = size;
331 }
332 if (max_size < 0)
333 goto done;
334 337
335 if (max_size < psched_mtu(qdisc_dev(sch))) 338 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
336 pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n", 339 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
337 max_size, qdisc_dev(sch)->name, 340 tb[TCA_TBF_PTAB]));
338 psched_mtu(qdisc_dev(sch)));
339 341
340 if (q->qdisc != &noop_qdisc) { 342 if (q->qdisc != &noop_qdisc) {
341 err = fifo_set_limit(q->qdisc, qopt->limit); 343 err = fifo_set_limit(q->qdisc, qopt->limit);
@@ -349,6 +351,39 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
349 } 351 }
350 } 352 }
351 353
354 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
355 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
356
357 if (tb[TCA_TBF_RATE64])
358 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
359 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
360
361 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
362
363 if (qopt->peakrate.rate) {
364 if (tb[TCA_TBF_PRATE64])
365 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
366 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
367 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
368 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
369 peak.rate_bytes_ps, rate.rate_bytes_ps);
370 err = -EINVAL;
371 goto done;
372 }
373
374 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
375 }
376
377 if (max_size < psched_mtu(qdisc_dev(sch)))
378 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
379 max_size, qdisc_dev(sch)->name,
380 psched_mtu(qdisc_dev(sch)));
381
382 if (!max_size) {
383 err = -EINVAL;
384 goto done;
385 }
386
352 sch_tree_lock(sch); 387 sch_tree_lock(sch);
353 if (child) { 388 if (child) {
354 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); 389 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
@@ -362,13 +397,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
362 q->tokens = q->buffer; 397 q->tokens = q->buffer;
363 q->ptokens = q->mtu; 398 q->ptokens = q->mtu;
364 399
365 if (tb[TCA_TBF_RATE64]) 400 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
366 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); 401 if (qopt->peakrate.rate) {
367 psched_ratecfg_precompute(&q->rate, &rtab->rate, rate64); 402 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
368 if (ptab) {
369 if (tb[TCA_TBF_PRATE64])
370 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
371 psched_ratecfg_precompute(&q->peak, &ptab->rate, prate64);
372 q->peak_present = true; 403 q->peak_present = true;
373 } else { 404 } else {
374 q->peak_present = false; 405 q->peak_present = false;
@@ -377,10 +408,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
377 sch_tree_unlock(sch); 408 sch_tree_unlock(sch);
378 err = 0; 409 err = 0;
379done: 410done:
380 if (rtab)
381 qdisc_put_rtab(rtab);
382 if (ptab)
383 qdisc_put_rtab(ptab);
384 return err; 411 return err;
385} 412}
386 413