aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-24 11:05:11 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-24 11:51:59 -0500
commit5a16f3d30ca4e3f166d691220c003066a14e32b5 (patch)
tree8e65fa8dc91f12a6be37c36d71bb48d2c97d6447
parente76bd8d9850c2296a7e8e24c9dce9b5e6b55fe2f (diff)
sched: convert struct (sys_)sched_setaffinity() to cpumask_var_t.
Impact: stack usage reduction Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves space on the stack. cpumask_var_t is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK. Note the removal of the initializer of new_mask: since the first thing we did was "cpus_and(new_mask, new_mask, cpus_allowed)" I just changed that to "cpumask_and(new_mask, in_mask, cpus_allowed);". Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c40
1 files changed, 27 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f7dee2029e4d..2d4ff91e0c97 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5378,8 +5378,7 @@ out_unlock:
5378 5378
5379long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) 5379long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
5380{ 5380{
5381 cpumask_t cpus_allowed; 5381 cpumask_var_t cpus_allowed, new_mask;
5382 cpumask_t new_mask = *in_mask;
5383 struct task_struct *p; 5382 struct task_struct *p;
5384 int retval; 5383 int retval;
5385 5384
@@ -5401,6 +5400,14 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
5401 get_task_struct(p); 5400 get_task_struct(p);
5402 read_unlock(&tasklist_lock); 5401 read_unlock(&tasklist_lock);
5403 5402
5403 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5404 retval = -ENOMEM;
5405 goto out_put_task;
5406 }
5407 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5408 retval = -ENOMEM;
5409 goto out_free_cpus_allowed;
5410 }
5404 retval = -EPERM; 5411 retval = -EPERM;
5405 if ((current->euid != p->euid) && (current->euid != p->uid) && 5412 if ((current->euid != p->euid) && (current->euid != p->uid) &&
5406 !capable(CAP_SYS_NICE)) 5413 !capable(CAP_SYS_NICE))
@@ -5410,24 +5417,28 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
5410 if (retval) 5417 if (retval)
5411 goto out_unlock; 5418 goto out_unlock;
5412 5419
5413 cpuset_cpus_allowed(p, &cpus_allowed); 5420 cpuset_cpus_allowed(p, cpus_allowed);
5414 cpus_and(new_mask, new_mask, cpus_allowed); 5421 cpumask_and(new_mask, in_mask, cpus_allowed);
5415 again: 5422 again:
5416 retval = set_cpus_allowed_ptr(p, &new_mask); 5423 retval = set_cpus_allowed_ptr(p, new_mask);
5417 5424
5418 if (!retval) { 5425 if (!retval) {
5419 cpuset_cpus_allowed(p, &cpus_allowed); 5426 cpuset_cpus_allowed(p, cpus_allowed);
5420 if (!cpus_subset(new_mask, cpus_allowed)) { 5427 if (!cpumask_subset(new_mask, cpus_allowed)) {
5421 /* 5428 /*
5422 * We must have raced with a concurrent cpuset 5429 * We must have raced with a concurrent cpuset
5423 * update. Just reset the cpus_allowed to the 5430 * update. Just reset the cpus_allowed to the
5424 * cpuset's cpus_allowed 5431 * cpuset's cpus_allowed
5425 */ 5432 */
5426 new_mask = cpus_allowed; 5433 cpumask_copy(new_mask, cpus_allowed);
5427 goto again; 5434 goto again;
5428 } 5435 }
5429 } 5436 }
5430out_unlock: 5437out_unlock:
5438 free_cpumask_var(new_mask);
5439out_free_cpus_allowed:
5440 free_cpumask_var(cpus_allowed);
5441out_put_task:
5431 put_task_struct(p); 5442 put_task_struct(p);
5432 put_online_cpus(); 5443 put_online_cpus();
5433 return retval; 5444 return retval;
@@ -5453,14 +5464,17 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
5453asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, 5464asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
5454 unsigned long __user *user_mask_ptr) 5465 unsigned long __user *user_mask_ptr)
5455{ 5466{
5456 cpumask_t new_mask; 5467 cpumask_var_t new_mask;
5457 int retval; 5468 int retval;
5458 5469
5459 retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); 5470 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5460 if (retval) 5471 return -ENOMEM;
5461 return retval;
5462 5472
5463 return sched_setaffinity(pid, &new_mask); 5473 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5474 if (retval == 0)
5475 retval = sched_setaffinity(pid, new_mask);
5476 free_cpumask_var(new_mask);
5477 return retval;
5464} 5478}
5465 5479
5466long sched_getaffinity(pid_t pid, cpumask_t *mask) 5480long sched_getaffinity(pid_t pid, cpumask_t *mask)