diff options
author | Vinicius Costa Gomes <vinicius.gomes@intel.com> | 2019-04-29 18:48:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-05-01 11:58:51 -0400 |
commit | c25031e993440debdd530278ce2171ce477df029 (patch) | |
tree | 8c10d9c1bb132e40584b87e6a7285b972c6cf9b4 /net/sched | |
parent | 6ca6a6654225f3cd001304d33429c817e0c0b85f (diff) |
taprio: Add support for cycle-time-extension
IEEE 802.1Q-2018 defines the concept of a cycle-time-extension, so the
last entry of a schedule before the start of a new schedule can be
extended, so "too-short" entries can be avoided.
Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_taprio.c | 35 |
1 files changed, 29 insertions, 6 deletions
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 6b37ffda23ec..539677120b9f 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c | |||
@@ -48,6 +48,7 @@ struct sched_gate_list { | |||
48 | size_t num_entries; | 48 | size_t num_entries; |
49 | ktime_t cycle_close_time; | 49 | ktime_t cycle_close_time; |
50 | s64 cycle_time; | 50 | s64 cycle_time; |
51 | s64 cycle_time_extension; | ||
51 | s64 base_time; | 52 | s64 base_time; |
52 | }; | 53 | }; |
53 | 54 | ||
@@ -290,7 +291,7 @@ static bool should_change_schedules(const struct sched_gate_list *admin, | |||
290 | const struct sched_gate_list *oper, | 291 | const struct sched_gate_list *oper, |
291 | ktime_t close_time) | 292 | ktime_t close_time) |
292 | { | 293 | { |
293 | ktime_t next_base_time; | 294 | ktime_t next_base_time, extension_time; |
294 | 295 | ||
295 | if (!admin) | 296 | if (!admin) |
296 | return false; | 297 | return false; |
@@ -303,6 +304,20 @@ static bool should_change_schedules(const struct sched_gate_list *admin, | |||
303 | if (ktime_compare(next_base_time, close_time) <= 0) | 304 | if (ktime_compare(next_base_time, close_time) <= 0) |
304 | return true; | 305 | return true; |
305 | 306 | ||
307 | /* This is the cycle_time_extension case, if the close_time | ||
308 | * plus the amount that can be extended would fall after the | ||
309 | * next schedule base_time, we can extend the current schedule | ||
310 | * for that amount. | ||
311 | */ | ||
312 | extension_time = ktime_add_ns(close_time, oper->cycle_time_extension); | ||
313 | |||
314 | /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about | ||
315 | * how precisely the extension should be made. So after | ||
316 | * conformance testing, this logic may change. | ||
317 | */ | ||
318 | if (ktime_compare(next_base_time, extension_time) <= 0) | ||
319 | return true; | ||
320 | |||
306 | return false; | 321 | return false; |
307 | } | 322 | } |
308 | 323 | ||
@@ -390,11 +405,12 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { | |||
390 | [TCA_TAPRIO_ATTR_PRIOMAP] = { | 405 | [TCA_TAPRIO_ATTR_PRIOMAP] = { |
391 | .len = sizeof(struct tc_mqprio_qopt) | 406 | .len = sizeof(struct tc_mqprio_qopt) |
392 | }, | 407 | }, |
393 | [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, | 408 | [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, |
394 | [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, | 409 | [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, |
395 | [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, | 410 | [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, |
396 | [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, | 411 | [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, |
397 | [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, | 412 | [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, |
413 | [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, | ||
398 | }; | 414 | }; |
399 | 415 | ||
400 | static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, | 416 | static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, |
@@ -496,6 +512,9 @@ static int parse_taprio_schedule(struct nlattr **tb, | |||
496 | if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) | 512 | if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) |
497 | new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); | 513 | new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); |
498 | 514 | ||
515 | if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]) | ||
516 | new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]); | ||
517 | |||
499 | if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]) | 518 | if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]) |
500 | new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); | 519 | new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); |
501 | 520 | ||
@@ -1008,6 +1027,10 @@ static int dump_schedule(struct sk_buff *msg, | |||
1008 | root->cycle_time, TCA_TAPRIO_PAD)) | 1027 | root->cycle_time, TCA_TAPRIO_PAD)) |
1009 | return -1; | 1028 | return -1; |
1010 | 1029 | ||
1030 | if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, | ||
1031 | root->cycle_time_extension, TCA_TAPRIO_PAD)) | ||
1032 | return -1; | ||
1033 | |||
1011 | entry_list = nla_nest_start_noflag(msg, | 1034 | entry_list = nla_nest_start_noflag(msg, |
1012 | TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); | 1035 | TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); |
1013 | if (!entry_list) | 1036 | if (!entry_list) |