aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-15 09:45:31 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-15 09:45:31 -0500
commit49a93bc978b4b3d564f6b330179b4cc2724a031d (patch)
tree8b14b6c1d32b2a64bd9e7d0793717780ecc1d985 /kernel
parent54da5b3d44238eeb7417bacf792fb416d473bf4d (diff)
parenta6525042bfdfcab128bd91fad264de10fd24a55e (diff)
Merge branch 'linus' into cpus4096
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/async.c21
-rw-r--r--kernel/rcutorture.c113
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/sched_debug.c21
-rw-r--r--kernel/up.c21
6 files changed, 135 insertions, 60 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 2921d90ce32f..2aebc4cd7878 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -40,7 +40,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
40obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o 40obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
41obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o 41obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
42obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o 42obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
43obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o 43ifeq ($(CONFIG_USE_GENERIC_SMP_HELPERS),y)
44obj-y += smp.o
45else
46obj-y += up.o
47endif
44obj-$(CONFIG_SMP) += spinlock.o 48obj-$(CONFIG_SMP) += spinlock.o
45obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o 49obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
46obj-$(CONFIG_PROVE_LOCKING) += spinlock.o 50obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
diff --git a/kernel/async.c b/kernel/async.c
index f286e9f2b736..608b32b42812 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -90,12 +90,12 @@ extern int initcall_debug;
90static async_cookie_t __lowest_in_progress(struct list_head *running) 90static async_cookie_t __lowest_in_progress(struct list_head *running)
91{ 91{
92 struct async_entry *entry; 92 struct async_entry *entry;
93 if (!list_empty(&async_pending)) { 93 if (!list_empty(running)) {
94 entry = list_first_entry(&async_pending, 94 entry = list_first_entry(running,
95 struct async_entry, list); 95 struct async_entry, list);
96 return entry->cookie; 96 return entry->cookie;
97 } else if (!list_empty(running)) { 97 } else if (!list_empty(&async_pending)) {
98 entry = list_first_entry(running, 98 entry = list_first_entry(&async_pending,
99 struct async_entry, list); 99 struct async_entry, list);
100 return entry->cookie; 100 return entry->cookie;
101 } else { 101 } else {
@@ -104,6 +104,17 @@ static async_cookie_t __lowest_in_progress(struct list_head *running)
104 } 104 }
105 105
106} 106}
107
108static async_cookie_t lowest_in_progress(struct list_head *running)
109{
110 unsigned long flags;
111 async_cookie_t ret;
112
113 spin_lock_irqsave(&async_lock, flags);
114 ret = __lowest_in_progress(running);
115 spin_unlock_irqrestore(&async_lock, flags);
116 return ret;
117}
107/* 118/*
108 * pick the first pending entry and run it 119 * pick the first pending entry and run it
109 */ 120 */
@@ -229,7 +240,7 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r
229 starttime = ktime_get(); 240 starttime = ktime_get();
230 } 241 }
231 242
232 wait_event(async_done, __lowest_in_progress(running) >= cookie); 243 wait_event(async_done, lowest_in_progress(running) >= cookie);
233 244
234 if (initcall_debug && system_state == SYSTEM_BOOTING) { 245 if (initcall_debug && system_state == SYSTEM_BOOTING) {
235 endtime = ktime_get(); 246 endtime = ktime_get();
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 1cff28db56b6..7c4142a79f0a 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -136,29 +136,47 @@ static int stutter_pause_test = 0;
136#endif 136#endif
137int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; 137int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
138 138
139#define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */ 139/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
140#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */ 140
141static int fullstop; /* stop generating callbacks at test end. */ 141#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
142DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */ 142#define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
143 /* spawning of kthreads. */ 143#define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
144static int fullstop = FULLSTOP_RMMOD;
145DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */
146 /* of kthreads. */
144 147
145/* 148/*
146 * Detect and respond to a signal-based shutdown. 149 * Detect and respond to a system shutdown.
147 */ 150 */
148static int 151static int
149rcutorture_shutdown_notify(struct notifier_block *unused1, 152rcutorture_shutdown_notify(struct notifier_block *unused1,
150 unsigned long unused2, void *unused3) 153 unsigned long unused2, void *unused3)
151{ 154{
152 if (fullstop)
153 return NOTIFY_DONE;
154 mutex_lock(&fullstop_mutex); 155 mutex_lock(&fullstop_mutex);
155 if (!fullstop) 156 if (fullstop == FULLSTOP_DONTSTOP)
156 fullstop = FULLSTOP_SHUTDOWN; 157 fullstop = FULLSTOP_SHUTDOWN;
158 else
159 printk(KERN_WARNING /* but going down anyway, so... */
160 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
157 mutex_unlock(&fullstop_mutex); 161 mutex_unlock(&fullstop_mutex);
158 return NOTIFY_DONE; 162 return NOTIFY_DONE;
159} 163}
160 164
161/* 165/*
166 * Absorb kthreads into a kernel function that won't return, so that
167 * they won't ever access module text or data again.
168 */
169static void rcutorture_shutdown_absorb(char *title)
170{
171 if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
172 printk(KERN_NOTICE
173 "rcutorture thread %s parking due to system shutdown\n",
174 title);
175 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
176 }
177}
178
179/*
162 * Allocate an element from the rcu_tortures pool. 180 * Allocate an element from the rcu_tortures pool.
163 */ 181 */
164static struct rcu_torture * 182static struct rcu_torture *
@@ -219,13 +237,14 @@ rcu_random(struct rcu_random_state *rrsp)
219} 237}
220 238
221static void 239static void
222rcu_stutter_wait(void) 240rcu_stutter_wait(char *title)
223{ 241{
224 while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) { 242 while (stutter_pause_test || !rcutorture_runnable) {
225 if (rcutorture_runnable) 243 if (rcutorture_runnable)
226 schedule_timeout_interruptible(1); 244 schedule_timeout_interruptible(1);
227 else 245 else
228 schedule_timeout_interruptible(round_jiffies_relative(HZ)); 246 schedule_timeout_interruptible(round_jiffies_relative(HZ));
247 rcutorture_shutdown_absorb(title);
229 } 248 }
230} 249}
231 250
@@ -287,7 +306,7 @@ rcu_torture_cb(struct rcu_head *p)
287 int i; 306 int i;
288 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 307 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
289 308
290 if (fullstop) { 309 if (fullstop != FULLSTOP_DONTSTOP) {
291 /* Test is ending, just drop callbacks on the floor. */ 310 /* Test is ending, just drop callbacks on the floor. */
292 /* The next initialization will pick up the pieces. */ 311 /* The next initialization will pick up the pieces. */
293 return; 312 return;
@@ -619,10 +638,11 @@ rcu_torture_writer(void *arg)
619 } 638 }
620 rcu_torture_current_version++; 639 rcu_torture_current_version++;
621 oldbatch = cur_ops->completed(); 640 oldbatch = cur_ops->completed();
622 rcu_stutter_wait(); 641 rcu_stutter_wait("rcu_torture_writer");
623 } while (!kthread_should_stop() && !fullstop); 642 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
624 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); 643 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
625 while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) 644 rcutorture_shutdown_absorb("rcu_torture_writer");
645 while (!kthread_should_stop())
626 schedule_timeout_uninterruptible(1); 646 schedule_timeout_uninterruptible(1);
627 return 0; 647 return 0;
628} 648}
@@ -643,11 +663,12 @@ rcu_torture_fakewriter(void *arg)
643 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); 663 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
644 udelay(rcu_random(&rand) & 0x3ff); 664 udelay(rcu_random(&rand) & 0x3ff);
645 cur_ops->sync(); 665 cur_ops->sync();
646 rcu_stutter_wait(); 666 rcu_stutter_wait("rcu_torture_fakewriter");
647 } while (!kthread_should_stop() && !fullstop); 667 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
648 668
649 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); 669 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
650 while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) 670 rcutorture_shutdown_absorb("rcu_torture_fakewriter");
671 while (!kthread_should_stop())
651 schedule_timeout_uninterruptible(1); 672 schedule_timeout_uninterruptible(1);
652 return 0; 673 return 0;
653} 674}
@@ -752,12 +773,13 @@ rcu_torture_reader(void *arg)
752 preempt_enable(); 773 preempt_enable();
753 cur_ops->readunlock(idx); 774 cur_ops->readunlock(idx);
754 schedule(); 775 schedule();
755 rcu_stutter_wait(); 776 rcu_stutter_wait("rcu_torture_reader");
756 } while (!kthread_should_stop() && !fullstop); 777 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
757 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); 778 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
779 rcutorture_shutdown_absorb("rcu_torture_reader");
758 if (irqreader && cur_ops->irqcapable) 780 if (irqreader && cur_ops->irqcapable)
759 del_timer_sync(&t); 781 del_timer_sync(&t);
760 while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) 782 while (!kthread_should_stop())
761 schedule_timeout_uninterruptible(1); 783 schedule_timeout_uninterruptible(1);
762 return 0; 784 return 0;
763} 785}
@@ -854,7 +876,8 @@ rcu_torture_stats(void *arg)
854 do { 876 do {
855 schedule_timeout_interruptible(stat_interval * HZ); 877 schedule_timeout_interruptible(stat_interval * HZ);
856 rcu_torture_stats_print(); 878 rcu_torture_stats_print();
857 } while (!kthread_should_stop() && !fullstop); 879 rcutorture_shutdown_absorb("rcu_torture_stats");
880 } while (!kthread_should_stop());
858 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); 881 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
859 return 0; 882 return 0;
860} 883}
@@ -866,52 +889,49 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
866 */ 889 */
867static void rcu_torture_shuffle_tasks(void) 890static void rcu_torture_shuffle_tasks(void)
868{ 891{
869 cpumask_var_t tmp_mask; 892 cpumask_t tmp_mask;
870 int i; 893 int i;
871 894
872 if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) 895 cpus_setall(tmp_mask);
873 BUG();
874
875 cpumask_setall(tmp_mask);
876 get_online_cpus(); 896 get_online_cpus();
877 897
878 /* No point in shuffling if there is only one online CPU (ex: UP) */ 898 /* No point in shuffling if there is only one online CPU (ex: UP) */
879 if (num_online_cpus() == 1) 899 if (num_online_cpus() == 1) {
880 goto out; 900 put_online_cpus();
901 return;
902 }
881 903
882 if (rcu_idle_cpu != -1) 904 if (rcu_idle_cpu != -1)
883 cpumask_clear_cpu(rcu_idle_cpu, tmp_mask); 905 cpu_clear(rcu_idle_cpu, tmp_mask);
884 906
885 set_cpus_allowed_ptr(current, tmp_mask); 907 set_cpus_allowed_ptr(current, &tmp_mask);
886 908
887 if (reader_tasks) { 909 if (reader_tasks) {
888 for (i = 0; i < nrealreaders; i++) 910 for (i = 0; i < nrealreaders; i++)
889 if (reader_tasks[i]) 911 if (reader_tasks[i])
890 set_cpus_allowed_ptr(reader_tasks[i], 912 set_cpus_allowed_ptr(reader_tasks[i],
891 tmp_mask); 913 &tmp_mask);
892 } 914 }
893 915
894 if (fakewriter_tasks) { 916 if (fakewriter_tasks) {
895 for (i = 0; i < nfakewriters; i++) 917 for (i = 0; i < nfakewriters; i++)
896 if (fakewriter_tasks[i]) 918 if (fakewriter_tasks[i])
897 set_cpus_allowed_ptr(fakewriter_tasks[i], 919 set_cpus_allowed_ptr(fakewriter_tasks[i],
898 tmp_mask); 920 &tmp_mask);
899 } 921 }
900 922
901 if (writer_task) 923 if (writer_task)
902 set_cpus_allowed_ptr(writer_task, tmp_mask); 924 set_cpus_allowed_ptr(writer_task, &tmp_mask);
903 925
904 if (stats_task) 926 if (stats_task)
905 set_cpus_allowed_ptr(stats_task, tmp_mask); 927 set_cpus_allowed_ptr(stats_task, &tmp_mask);
906 928
907 if (rcu_idle_cpu == -1) 929 if (rcu_idle_cpu == -1)
908 rcu_idle_cpu = num_online_cpus() - 1; 930 rcu_idle_cpu = num_online_cpus() - 1;
909 else 931 else
910 rcu_idle_cpu--; 932 rcu_idle_cpu--;
911 933
912out:
913 put_online_cpus(); 934 put_online_cpus();
914 free_cpumask_var(tmp_mask);
915} 935}
916 936
917/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the 937/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
@@ -925,7 +945,8 @@ rcu_torture_shuffle(void *arg)
925 do { 945 do {
926 schedule_timeout_interruptible(shuffle_interval * HZ); 946 schedule_timeout_interruptible(shuffle_interval * HZ);
927 rcu_torture_shuffle_tasks(); 947 rcu_torture_shuffle_tasks();
928 } while (!kthread_should_stop() && !fullstop); 948 rcutorture_shutdown_absorb("rcu_torture_shuffle");
949 } while (!kthread_should_stop());
929 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); 950 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
930 return 0; 951 return 0;
931} 952}
@@ -940,10 +961,11 @@ rcu_torture_stutter(void *arg)
940 do { 961 do {
941 schedule_timeout_interruptible(stutter * HZ); 962 schedule_timeout_interruptible(stutter * HZ);
942 stutter_pause_test = 1; 963 stutter_pause_test = 1;
943 if (!kthread_should_stop() && !fullstop) 964 if (!kthread_should_stop())
944 schedule_timeout_interruptible(stutter * HZ); 965 schedule_timeout_interruptible(stutter * HZ);
945 stutter_pause_test = 0; 966 stutter_pause_test = 0;
946 } while (!kthread_should_stop() && !fullstop); 967 rcutorture_shutdown_absorb("rcu_torture_stutter");
968 } while (!kthread_should_stop());
947 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); 969 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
948 return 0; 970 return 0;
949} 971}
@@ -970,15 +992,16 @@ rcu_torture_cleanup(void)
970 int i; 992 int i;
971 993
972 mutex_lock(&fullstop_mutex); 994 mutex_lock(&fullstop_mutex);
973 if (!fullstop) { 995 if (fullstop == FULLSTOP_SHUTDOWN) {
974 /* If being signaled, let it happen, then exit. */ 996 printk(KERN_WARNING /* but going down anyway, so... */
997 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
975 mutex_unlock(&fullstop_mutex); 998 mutex_unlock(&fullstop_mutex);
976 schedule_timeout_interruptible(10 * HZ); 999 schedule_timeout_uninterruptible(10);
977 if (cur_ops->cb_barrier != NULL) 1000 if (cur_ops->cb_barrier != NULL)
978 cur_ops->cb_barrier(); 1001 cur_ops->cb_barrier();
979 return; 1002 return;
980 } 1003 }
981 fullstop = FULLSTOP_CLEANUP; 1004 fullstop = FULLSTOP_RMMOD;
982 mutex_unlock(&fullstop_mutex); 1005 mutex_unlock(&fullstop_mutex);
983 unregister_reboot_notifier(&rcutorture_nb); 1006 unregister_reboot_notifier(&rcutorture_nb);
984 if (stutter_task) { 1007 if (stutter_task) {
@@ -1078,7 +1101,7 @@ rcu_torture_init(void)
1078 else 1101 else
1079 nrealreaders = 2 * num_online_cpus(); 1102 nrealreaders = 2 * num_online_cpus();
1080 rcu_torture_print_module_parms("Start of test"); 1103 rcu_torture_print_module_parms("Start of test");
1081 fullstop = 0; 1104 fullstop = FULLSTOP_DONTSTOP;
1082 1105
1083 /* Set up the freelist. */ 1106 /* Set up the freelist. */
1084 1107
diff --git a/kernel/sched.c b/kernel/sched.c
index deb5ac8c12f3..8be2c13b50d0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch);
125DEFINE_TRACE(sched_migrate_task); 125DEFINE_TRACE(sched_migrate_task);
126 126
127#ifdef CONFIG_SMP 127#ifdef CONFIG_SMP
128
129static void double_rq_lock(struct rq *rq1, struct rq *rq2);
130
128/* 131/*
129 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) 132 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
130 * Since cpu_power is a 'constant', we can use a reciprocal divide. 133 * Since cpu_power is a 'constant', we can use a reciprocal divide.
@@ -7282,10 +7285,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7282 * groups, so roll our own. Now each node has its own list of groups which 7285 * groups, so roll our own. Now each node has its own list of groups which
7283 * gets dynamically allocated. 7286 * gets dynamically allocated.
7284 */ 7287 */
7285static DEFINE_PER_CPU(struct sched_domain, node_domains); 7288static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
7286static struct sched_group ***sched_group_nodes_bycpu; 7289static struct sched_group ***sched_group_nodes_bycpu;
7287 7290
7288static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); 7291static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
7289static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); 7292static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
7290 7293
7291static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, 7294static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
@@ -7560,7 +7563,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7560#ifdef CONFIG_NUMA 7563#ifdef CONFIG_NUMA
7561 if (cpumask_weight(cpu_map) > 7564 if (cpumask_weight(cpu_map) >
7562 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { 7565 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
7563 sd = &per_cpu(allnodes_domains, i); 7566 sd = &per_cpu(allnodes_domains, i).sd;
7564 SD_INIT(sd, ALLNODES); 7567 SD_INIT(sd, ALLNODES);
7565 set_domain_attribute(sd, attr); 7568 set_domain_attribute(sd, attr);
7566 cpumask_copy(sched_domain_span(sd), cpu_map); 7569 cpumask_copy(sched_domain_span(sd), cpu_map);
@@ -7570,7 +7573,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7570 } else 7573 } else
7571 p = NULL; 7574 p = NULL;
7572 7575
7573 sd = &per_cpu(node_domains, i); 7576 sd = &per_cpu(node_domains, i).sd;
7574 SD_INIT(sd, NODE); 7577 SD_INIT(sd, NODE);
7575 set_domain_attribute(sd, attr); 7578 set_domain_attribute(sd, attr);
7576 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); 7579 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
@@ -7688,7 +7691,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7688 for_each_cpu(j, nodemask) { 7691 for_each_cpu(j, nodemask) {
7689 struct sched_domain *sd; 7692 struct sched_domain *sd;
7690 7693
7691 sd = &per_cpu(node_domains, j); 7694 sd = &per_cpu(node_domains, j).sd;
7692 sd->groups = sg; 7695 sd->groups = sg;
7693 } 7696 }
7694 sg->__cpu_power = 0; 7697 sg->__cpu_power = 0;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 4293cfa9681d..16eeba4e4169 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -145,6 +145,19 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
145 read_unlock_irqrestore(&tasklist_lock, flags); 145 read_unlock_irqrestore(&tasklist_lock, flags);
146} 146}
147 147
148#if defined(CONFIG_CGROUP_SCHED) && \
149 (defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
150static void task_group_path(struct task_group *tg, char *buf, int buflen)
151{
152 /* may be NULL if the underlying cgroup isn't fully-created yet */
153 if (!tg->css.cgroup) {
154 buf[0] = '\0';
155 return;
156 }
157 cgroup_path(tg->css.cgroup, buf, buflen);
158}
159#endif
160
148void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 161void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
149{ 162{
150 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, 163 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -154,10 +167,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
154 unsigned long flags; 167 unsigned long flags;
155 168
156#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) 169#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
157 char path[128] = ""; 170 char path[128];
158 struct task_group *tg = cfs_rq->tg; 171 struct task_group *tg = cfs_rq->tg;
159 172
160 cgroup_path(tg->css.cgroup, path, sizeof(path)); 173 task_group_path(tg, path, sizeof(path));
161 174
162 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); 175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
163#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) 176#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
@@ -208,10 +221,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
208void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) 221void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
209{ 222{
210#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) 223#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
211 char path[128] = ""; 224 char path[128];
212 struct task_group *tg = rt_rq->tg; 225 struct task_group *tg = rt_rq->tg;
213 226
214 cgroup_path(tg->css.cgroup, path, sizeof(path)); 227 task_group_path(tg, path, sizeof(path));
215 228
216 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); 229 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
217#else 230#else
diff --git a/kernel/up.c b/kernel/up.c
new file mode 100644
index 000000000000..1ff27a28bb7d
--- /dev/null
+++ b/kernel/up.c
@@ -0,0 +1,21 @@
1/*
2 * Uniprocessor-only support functions. The counterpart to kernel/smp.c
3 */
4
5#include <linux/interrupt.h>
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/smp.h>
9
10int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
11 int wait)
12{
13 WARN_ON(cpu != 0);
14
15 local_irq_disable();
16 (func)(info);
17 local_irq_enable();
18
19 return 0;
20}
21EXPORT_SYMBOL(smp_call_function_single);