aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2008-07-30 01:33:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-30 12:41:44 -0400
commitaeed682421a5ebfbf46940e30c3d1caf3bc64304 (patch)
tree684412db63c92fdee764a65d174834fbf7ef7a84
parent93a6557558a13f9ff35213efeca483f353c39dd3 (diff)
cpuset: clean up cpuset hierarchy traversal code
Use cpuset.stack_list rather than kfifo, so we avoid memory allocation for kfifo. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Paul Menage <menage@google.com> Cc: Cedric Le Goater <clg@fr.ibm.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/cpuset.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 7a82e9033a7f..d5ab79cf516d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -54,7 +54,6 @@
54#include <asm/uaccess.h> 54#include <asm/uaccess.h>
55#include <asm/atomic.h> 55#include <asm/atomic.h>
56#include <linux/mutex.h> 56#include <linux/mutex.h>
57#include <linux/kfifo.h>
58#include <linux/workqueue.h> 57#include <linux/workqueue.h>
59#include <linux/cgroup.h> 58#include <linux/cgroup.h>
60 59
@@ -557,7 +556,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
557 * So the reverse nesting would risk an ABBA deadlock. 556 * So the reverse nesting would risk an ABBA deadlock.
558 * 557 *
559 * The three key local variables below are: 558 * The three key local variables below are:
560 * q - a kfifo queue of cpuset pointers, used to implement a 559 * q - a linked-list queue of cpuset pointers, used to implement a
561 * top-down scan of all cpusets. This scan loads a pointer 560 * top-down scan of all cpusets. This scan loads a pointer
562 * to each cpuset marked is_sched_load_balance into the 561 * to each cpuset marked is_sched_load_balance into the
563 * array 'csa'. For our purposes, rebuilding the schedulers 562 * array 'csa'. For our purposes, rebuilding the schedulers
@@ -592,7 +591,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
592 591
593void rebuild_sched_domains(void) 592void rebuild_sched_domains(void)
594{ 593{
595 struct kfifo *q; /* queue of cpusets to be scanned */ 594 LIST_HEAD(q); /* queue of cpusets to be scanned*/
596 struct cpuset *cp; /* scans q */ 595 struct cpuset *cp; /* scans q */
597 struct cpuset **csa; /* array of all cpuset ptrs */ 596 struct cpuset **csa; /* array of all cpuset ptrs */
598 int csn; /* how many cpuset ptrs in csa so far */ 597 int csn; /* how many cpuset ptrs in csa so far */
@@ -602,7 +601,6 @@ void rebuild_sched_domains(void)
602 int ndoms; /* number of sched domains in result */ 601 int ndoms; /* number of sched domains in result */
603 int nslot; /* next empty doms[] cpumask_t slot */ 602 int nslot; /* next empty doms[] cpumask_t slot */
604 603
605 q = NULL;
606 csa = NULL; 604 csa = NULL;
607 doms = NULL; 605 doms = NULL;
608 dattr = NULL; 606 dattr = NULL;
@@ -622,20 +620,19 @@ void rebuild_sched_domains(void)
622 goto rebuild; 620 goto rebuild;
623 } 621 }
624 622
625 q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
626 if (IS_ERR(q))
627 goto done;
628 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); 623 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
629 if (!csa) 624 if (!csa)
630 goto done; 625 goto done;
631 csn = 0; 626 csn = 0;
632 627
633 cp = &top_cpuset; 628 list_add(&top_cpuset.stack_list, &q);
634 __kfifo_put(q, (void *)&cp, sizeof(cp)); 629 while (!list_empty(&q)) {
635 while (__kfifo_get(q, (void *)&cp, sizeof(cp))) {
636 struct cgroup *cont; 630 struct cgroup *cont;
637 struct cpuset *child; /* scans child cpusets of cp */ 631 struct cpuset *child; /* scans child cpusets of cp */
638 632
633 cp = list_first_entry(&q, struct cpuset, stack_list);
634 list_del(q.next);
635
639 if (cpus_empty(cp->cpus_allowed)) 636 if (cpus_empty(cp->cpus_allowed))
640 continue; 637 continue;
641 638
@@ -652,7 +649,7 @@ void rebuild_sched_domains(void)
652 649
653 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { 650 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
654 child = cgroup_cs(cont); 651 child = cgroup_cs(cont);
655 __kfifo_put(q, (void *)&child, sizeof(cp)); 652 list_add_tail(&child->stack_list, &q);
656 } 653 }
657 } 654 }
658 655
@@ -735,8 +732,6 @@ rebuild:
735 put_online_cpus(); 732 put_online_cpus();
736 733
737done: 734done:
738 if (q && !IS_ERR(q))
739 kfifo_free(q);
740 kfree(csa); 735 kfree(csa);
741 /* Don't kfree(doms) -- partition_sched_domains() does that. */ 736 /* Don't kfree(doms) -- partition_sched_domains() does that. */
742 /* Don't kfree(dattr) -- partition_sched_domains() does that. */ 737 /* Don't kfree(dattr) -- partition_sched_domains() does that. */