aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorDhaval Giani <dhaval@linux.vnet.ibm.com>2007-10-15 11:00:14 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:14 -0400
commit5cb350baf580017da38199625b7365b1763d7180 (patch)
tree3830339798b1c6f19f1580700ea6ba240fb56ef2 /kernel/sched.c
parent8ca0e14ffb12c257de591571a9e96102acdb1c64 (diff)
sched: group scheduling, sysfs tunables
Add tunables in sysfs to modify a user's cpu share. A directory is created in sysfs for each new user in the system. /sys/kernel/uids/<uid>/cpu_share Reading this file returns the cpu shares granted for the user. Writing into this file modifies the cpu share for the user. Only an administrator is allowed to modify a user's cpu share. Ex: # cd /sys/kernel/uids/ # cat 512/cpu_share 1024 # echo 2048 > 512/cpu_share # cat 512/cpu_share 2048 # Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a3c3ec825f42..9ac99896db8f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -162,6 +162,8 @@ struct task_group {
162 /* runqueue "owned" by this group on each cpu */ 162 /* runqueue "owned" by this group on each cpu */
163 struct cfs_rq **cfs_rq; 163 struct cfs_rq **cfs_rq;
164 unsigned long shares; 164 unsigned long shares;
165 /* spinlock to serialize modification to shares */
166 spinlock_t lock;
165}; 167};
166 168
167/* Default task group's sched entity on each cpu */ 169/* Default task group's sched entity on each cpu */
@@ -6533,6 +6535,7 @@ void __init sched_init(void)
6533 se->parent = NULL; 6535 se->parent = NULL;
6534 } 6536 }
6535 init_task_group.shares = init_task_group_load; 6537 init_task_group.shares = init_task_group_load;
6538 spin_lock_init(&init_task_group.lock);
6536#endif 6539#endif
6537 6540
6538 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 6541 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
@@ -6777,6 +6780,7 @@ struct task_group *sched_create_group(void)
6777 } 6780 }
6778 6781
6779 tg->shares = NICE_0_LOAD; 6782 tg->shares = NICE_0_LOAD;
6783 spin_lock_init(&tg->lock);
6780 6784
6781 return tg; 6785 return tg;
6782 6786
@@ -6897,8 +6901,9 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6897{ 6901{
6898 int i; 6902 int i;
6899 6903
6904 spin_lock(&tg->lock);
6900 if (tg->shares == shares) 6905 if (tg->shares == shares)
6901 return 0; 6906 goto done;
6902 6907
6903 /* return -EINVAL if the new value is not sane */ 6908 /* return -EINVAL if the new value is not sane */
6904 6909
@@ -6906,7 +6911,14 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6906 for_each_possible_cpu(i) 6911 for_each_possible_cpu(i)
6907 set_se_shares(tg->se[i], shares); 6912 set_se_shares(tg->se[i], shares);
6908 6913
6914done:
6915 spin_unlock(&tg->lock);
6909 return 0; 6916 return 0;
6910} 6917}
6911 6918
6919unsigned long sched_group_shares(struct task_group *tg)
6920{
6921 return tg->shares;
6922}
6923
6912#endif /* CONFIG_FAIR_GROUP_SCHED */ 6924#endif /* CONFIG_FAIR_GROUP_SCHED */