aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c141
1 files changed, 111 insertions, 30 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index cecaea67ae9b..85a5fbff2b00 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -176,7 +176,7 @@ struct task_group {
176 struct sched_rt_entity **rt_se; 176 struct sched_rt_entity **rt_se;
177 struct rt_rq **rt_rq; 177 struct rt_rq **rt_rq;
178 178
179 unsigned int rt_ratio; 179 u64 rt_runtime;
180 180
181 /* 181 /*
182 * shares assigned to a task group governs how much of cpu bandwidth 182 * shares assigned to a task group governs how much of cpu bandwidth
@@ -642,19 +642,21 @@ const_debug unsigned int sysctl_sched_features =
642const_debug unsigned int sysctl_sched_nr_migrate = 32; 642const_debug unsigned int sysctl_sched_nr_migrate = 32;
643 643
644/* 644/*
645 * period over which we measure -rt task cpu usage in ms. 645 * period over which we measure -rt task cpu usage in us.
646 * default: 1s 646 * default: 1s
647 */ 647 */
648const_debug unsigned int sysctl_sched_rt_period = 1000; 648unsigned int sysctl_sched_rt_period = 1000000;
649 649
650#define SCHED_RT_FRAC_SHIFT 16 650/*
651#define SCHED_RT_FRAC (1UL << SCHED_RT_FRAC_SHIFT) 651 * part of the period that we allow rt tasks to run in us.
652 * default: 0.95s
653 */
654int sysctl_sched_rt_runtime = 950000;
652 655
653/* 656/*
654 * ratio of time -rt tasks may consume. 657 * single value that denotes runtime == period, ie unlimited time.
655 * default: 95%
656 */ 658 */
657const_debug unsigned int sysctl_sched_rt_ratio = 62259; 659#define RUNTIME_INF ((u64)~0ULL)
658 660
659/* 661/*
660 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 662 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
@@ -7187,7 +7189,8 @@ void __init sched_init(void)
7187 &per_cpu(init_cfs_rq, i), 7189 &per_cpu(init_cfs_rq, i),
7188 &per_cpu(init_sched_entity, i), i, 1); 7190 &per_cpu(init_sched_entity, i), i, 1);
7189 7191
7190 init_task_group.rt_ratio = sysctl_sched_rt_ratio; /* XXX */ 7192 init_task_group.rt_runtime =
7193 sysctl_sched_rt_runtime * NSEC_PER_USEC;
7191 INIT_LIST_HEAD(&rq->leaf_rt_rq_list); 7194 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
7192 init_tg_rt_entry(rq, &init_task_group, 7195 init_tg_rt_entry(rq, &init_task_group,
7193 &per_cpu(init_rt_rq, i), 7196 &per_cpu(init_rt_rq, i),
@@ -7583,7 +7586,7 @@ struct task_group *sched_create_group(void)
7583 goto err; 7586 goto err;
7584 7587
7585 tg->shares = NICE_0_LOAD; 7588 tg->shares = NICE_0_LOAD;
7586 tg->rt_ratio = 0; /* XXX */ 7589 tg->rt_runtime = 0;
7587 7590
7588 for_each_possible_cpu(i) { 7591 for_each_possible_cpu(i) {
7589 rq = cpu_rq(i); 7592 rq = cpu_rq(i);
@@ -7785,30 +7788,76 @@ unsigned long sched_group_shares(struct task_group *tg)
7785} 7788}
7786 7789
7787/* 7790/*
7788 * Ensure the total rt_ratio <= sysctl_sched_rt_ratio 7791 * Ensure that the real time constraints are schedulable.
7789 */ 7792 */
7790int sched_group_set_rt_ratio(struct task_group *tg, unsigned long rt_ratio) 7793static DEFINE_MUTEX(rt_constraints_mutex);
7794
7795static unsigned long to_ratio(u64 period, u64 runtime)
7796{
7797 if (runtime == RUNTIME_INF)
7798 return 1ULL << 16;
7799
7800 runtime *= (1ULL << 16);
7801 div64_64(runtime, period);
7802 return runtime;
7803}
7804
7805static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7791{ 7806{
7792 struct task_group *tgi; 7807 struct task_group *tgi;
7793 unsigned long total = 0; 7808 unsigned long total = 0;
7809 unsigned long global_ratio =
7810 to_ratio(sysctl_sched_rt_period,
7811 sysctl_sched_rt_runtime < 0 ?
7812 RUNTIME_INF : sysctl_sched_rt_runtime);
7794 7813
7795 rcu_read_lock(); 7814 rcu_read_lock();
7796 list_for_each_entry_rcu(tgi, &task_groups, list) 7815 list_for_each_entry_rcu(tgi, &task_groups, list) {
7797 total += tgi->rt_ratio; 7816 if (tgi == tg)
7798 rcu_read_unlock(); 7817 continue;
7799 7818
7800 if (total + rt_ratio - tg->rt_ratio > sysctl_sched_rt_ratio) 7819 total += to_ratio(period, tgi->rt_runtime);
7801 return -EINVAL; 7820 }
7821 rcu_read_unlock();
7802 7822
7803 tg->rt_ratio = rt_ratio; 7823 return total + to_ratio(period, runtime) < global_ratio;
7804 return 0;
7805} 7824}
7806 7825
7807unsigned long sched_group_rt_ratio(struct task_group *tg) 7826int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7808{ 7827{
7809 return tg->rt_ratio; 7828 u64 rt_runtime, rt_period;
7829 int err = 0;
7830
7831 rt_period = sysctl_sched_rt_period * NSEC_PER_USEC;
7832 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7833 if (rt_runtime_us == -1)
7834 rt_runtime = rt_period;
7835
7836 mutex_lock(&rt_constraints_mutex);
7837 if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
7838 err = -EINVAL;
7839 goto unlock;
7840 }
7841 if (rt_runtime_us == -1)
7842 rt_runtime = RUNTIME_INF;
7843 tg->rt_runtime = rt_runtime;
7844 unlock:
7845 mutex_unlock(&rt_constraints_mutex);
7846
7847 return err;
7810} 7848}
7811 7849
7850long sched_group_rt_runtime(struct task_group *tg)
7851{
7852 u64 rt_runtime_us;
7853
7854 if (tg->rt_runtime == RUNTIME_INF)
7855 return -1;
7856
7857 rt_runtime_us = tg->rt_runtime;
7858 do_div(rt_runtime_us, NSEC_PER_USEC);
7859 return rt_runtime_us;
7860}
7812#endif /* CONFIG_FAIR_GROUP_SCHED */ 7861#endif /* CONFIG_FAIR_GROUP_SCHED */
7813 7862
7814#ifdef CONFIG_FAIR_CGROUP_SCHED 7863#ifdef CONFIG_FAIR_CGROUP_SCHED
@@ -7884,17 +7933,49 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
7884 return (u64) tg->shares; 7933 return (u64) tg->shares;
7885} 7934}
7886 7935
7887static int cpu_rt_ratio_write_uint(struct cgroup *cgrp, struct cftype *cftype, 7936static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
7888 u64 rt_ratio_val) 7937 struct file *file,
7938 const char __user *userbuf,
7939 size_t nbytes, loff_t *unused_ppos)
7889{ 7940{
7890 return sched_group_set_rt_ratio(cgroup_tg(cgrp), rt_ratio_val); 7941 char buffer[64];
7942 int retval = 0;
7943 s64 val;
7944 char *end;
7945
7946 if (!nbytes)
7947 return -EINVAL;
7948 if (nbytes >= sizeof(buffer))
7949 return -E2BIG;
7950 if (copy_from_user(buffer, userbuf, nbytes))
7951 return -EFAULT;
7952
7953 buffer[nbytes] = 0; /* nul-terminate */
7954
7955 /* strip newline if necessary */
7956 if (nbytes && (buffer[nbytes-1] == '\n'))
7957 buffer[nbytes-1] = 0;
7958 val = simple_strtoll(buffer, &end, 0);
7959 if (*end)
7960 return -EINVAL;
7961
7962 /* Pass to subsystem */
7963 retval = sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
7964 if (!retval)
7965 retval = nbytes;
7966 return retval;
7891} 7967}
7892 7968
7893static u64 cpu_rt_ratio_read_uint(struct cgroup *cgrp, struct cftype *cft) 7969static ssize_t cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft,
7970 struct file *file,
7971 char __user *buf, size_t nbytes,
7972 loff_t *ppos)
7894{ 7973{
7895 struct task_group *tg = cgroup_tg(cgrp); 7974 char tmp[64];
7975 long val = sched_group_rt_runtime(cgroup_tg(cgrp));
7976 int len = sprintf(tmp, "%ld\n", val);
7896 7977
7897 return (u64) tg->rt_ratio; 7978 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
7898} 7979}
7899 7980
7900static struct cftype cpu_files[] = { 7981static struct cftype cpu_files[] = {
@@ -7904,9 +7985,9 @@ static struct cftype cpu_files[] = {
7904 .write_uint = cpu_shares_write_uint, 7985 .write_uint = cpu_shares_write_uint,
7905 }, 7986 },
7906 { 7987 {
7907 .name = "rt_ratio", 7988 .name = "rt_runtime_us",
7908 .read_uint = cpu_rt_ratio_read_uint, 7989 .read = cpu_rt_runtime_read,
7909 .write_uint = cpu_rt_ratio_write_uint, 7990 .write = cpu_rt_runtime_write,
7910 }, 7991 },
7911}; 7992};
7912 7993