aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-17 20:00:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-17 20:00:20 -0500
commit55db493b65c7b6bb5d7bd3dd3c8a2fe13f5dc09c (patch)
tree7f9203f43e7c81687c9aaa0213266bc7b2e89e35
parentefc8e7f4c83dc85acbf5f54a8b1b24ae75b20aaa (diff)
parenta4636818f8e0991f32d9528f39cf4f3d6a7d30a3 (diff)
Merge branch 'cpumask-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* 'cpumask-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: cpumask: rename tsk_cpumask to tsk_cpus_allowed cpumask: don't recommend set_cpus_allowed hack in Documentation/cpu-hotplug.txt cpumask: avoid dereferencing struct cpumask cpumask: convert drivers/idle/i7300_idle.c to cpumask_var_t cpumask: use modern cpumask style in drivers/scsi/fcoe/fcoe.c cpumask: avoid deprecated function in mm/slab.c cpumask: use cpu_online in kernel/perf_event.c
-rw-r--r--Documentation/cpu-hotplug.txt49
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c2
-rw-r--r--drivers/idle/i7300_idle.c15
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/perf_event.c2
-rw-r--r--kernel/time/timer_list.c4
-rw-r--r--mm/slab.c2
8 files changed, 33 insertions, 45 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index 4d4a644b505e..a99d7031cdf9 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -315,41 +315,26 @@ A: The following are what is required for CPU hotplug infrastructure to work
315 315
316Q: I need to ensure that a particular cpu is not removed when there is some 316Q: I need to ensure that a particular cpu is not removed when there is some
317 work specific to this cpu is in progress. 317 work specific to this cpu is in progress.
318A: First switch the current thread context to preferred cpu 318A: There are two ways. If your code can be run in interrupt context, use
319 smp_call_function_single(), otherwise use work_on_cpu(). Note that
320 work_on_cpu() is slow, and can fail due to out of memory:
319 321
320 int my_func_on_cpu(int cpu) 322 int my_func_on_cpu(int cpu)
321 { 323 {
322 cpumask_t saved_mask, new_mask = CPU_MASK_NONE; 324 int err;
323 int curr_cpu, err = 0; 325 get_online_cpus();
324 326 if (!cpu_online(cpu))
325 saved_mask = current->cpus_allowed; 327 err = -EINVAL;
326 cpu_set(cpu, new_mask); 328 else
327 err = set_cpus_allowed(current, new_mask); 329#if NEEDS_BLOCKING
328 330 err = work_on_cpu(cpu, __my_func_on_cpu, NULL);
329 if (err) 331#else
330 return err; 332 smp_call_function_single(cpu, __my_func_on_cpu, &err,
331 333 true);
332 /* 334#endif
333 * If we got scheduled out just after the return from 335 put_online_cpus();
334 * set_cpus_allowed() before running the work, this ensures 336 return err;
335 * we stay locked. 337 }
336 */
337 curr_cpu = get_cpu();
338
339 if (curr_cpu != cpu) {
340 err = -EAGAIN;
341 goto ret;
342 } else {
343 /*
344 * Do work : But cant sleep, since get_cpu() disables preempt
345 */
346 }
347 ret:
348 put_cpu();
349 set_cpus_allowed(current, saved_mask);
350 return err;
351 }
352
353 338
354Q: How do we determine how many CPUs are available for hotplug. 339Q: How do we determine how many CPUs are available for hotplug.
355A: There is no clear spec defined way from ACPI that can give us that 340A: There is no clear spec defined way from ACPI that can give us that
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index a9df9441a9a2..f125e5c551c0 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1136,7 +1136,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1136 if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) 1136 if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
1137 return -ENOMEM; 1137 return -ENOMEM;
1138 1138
1139 cpumask_copy(oldmask, tsk_cpumask(current)); 1139 cpumask_copy(oldmask, tsk_cpus_allowed(current));
1140 set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); 1140 set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
1141 1141
1142 if (smp_processor_id() != pol->cpu) { 1142 if (smp_processor_id() != pol->cpu) {
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index 1f20a042a4f5..dd253002cd50 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -81,7 +81,7 @@ static u8 i7300_idle_thrtctl_saved;
81static u8 i7300_idle_thrtlow_saved; 81static u8 i7300_idle_thrtlow_saved;
82static u32 i7300_idle_mc_saved; 82static u32 i7300_idle_mc_saved;
83 83
84static cpumask_t idle_cpumask; 84static cpumask_var_t idle_cpumask;
85static ktime_t start_ktime; 85static ktime_t start_ktime;
86static unsigned long avg_idle_us; 86static unsigned long avg_idle_us;
87 87
@@ -459,9 +459,9 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
459 spin_lock_irqsave(&i7300_idle_lock, flags); 459 spin_lock_irqsave(&i7300_idle_lock, flags);
460 if (val == IDLE_START) { 460 if (val == IDLE_START) {
461 461
462 cpu_set(smp_processor_id(), idle_cpumask); 462 cpumask_set_cpu(smp_processor_id(), idle_cpumask);
463 463
464 if (cpus_weight(idle_cpumask) != num_online_cpus()) 464 if (cpumask_weight(idle_cpumask) != num_online_cpus())
465 goto end; 465 goto end;
466 466
467 now_ktime = ktime_get(); 467 now_ktime = ktime_get();
@@ -478,8 +478,8 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
478 i7300_idle_ioat_start(); 478 i7300_idle_ioat_start();
479 479
480 } else if (val == IDLE_END) { 480 } else if (val == IDLE_END) {
481 cpu_clear(smp_processor_id(), idle_cpumask); 481 cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
482 if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) { 482 if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
483 /* First CPU coming out of idle */ 483 /* First CPU coming out of idle */
484 u64 idle_duration_us; 484 u64 idle_duration_us;
485 485
@@ -553,7 +553,6 @@ struct debugfs_file_info {
553static int __init i7300_idle_init(void) 553static int __init i7300_idle_init(void)
554{ 554{
555 spin_lock_init(&i7300_idle_lock); 555 spin_lock_init(&i7300_idle_lock);
556 cpus_clear(idle_cpumask);
557 total_us = 0; 556 total_us = 0;
558 557
559 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload)) 558 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
@@ -565,6 +564,9 @@ static int __init i7300_idle_init(void)
565 if (i7300_idle_ioat_init()) 564 if (i7300_idle_ioat_init())
566 return -ENODEV; 565 return -ENODEV;
567 566
567 if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
568 return -ENOMEM;
569
568 debugfs_dir = debugfs_create_dir("i7300_idle", NULL); 570 debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
569 if (debugfs_dir) { 571 if (debugfs_dir) {
570 int i = 0; 572 int i = 0;
@@ -589,6 +591,7 @@ static int __init i7300_idle_init(void)
589static void __exit i7300_idle_exit(void) 591static void __exit i7300_idle_exit(void)
590{ 592{
591 idle_notifier_unregister(&i7300_idle_nb); 593 idle_notifier_unregister(&i7300_idle_nb);
594 free_cpumask_var(idle_cpumask);
592 595
593 if (debugfs_dir) { 596 if (debugfs_dir) {
594 int i = 0; 597 int i = 0;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index e3896fcb06e3..10be9f36a4cc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1260,7 +1260,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1260 "CPU.\n"); 1260 "CPU.\n");
1261 1261
1262 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1262 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1263 cpu = first_cpu(cpu_online_map); 1263 cpu = cpumask_first(cpu_online_mask);
1264 fps = &per_cpu(fcoe_percpu, cpu); 1264 fps = &per_cpu(fcoe_percpu, cpu);
1265 spin_lock_bh(&fps->fcoe_rx_list.lock); 1265 spin_lock_bh(&fps->fcoe_rx_list.lock);
1266 if (!fps->thread) { 1266 if (!fps->thread) {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 211ed32befbd..e89857812be6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1553,7 +1553,7 @@ struct task_struct {
1553}; 1553};
1554 1554
1555/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1555/* Future-safe accessor for struct task_struct's cpus_allowed. */
1556#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) 1556#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1557 1557
1558/* 1558/*
1559 * Priority of a process goes from 0..MAX_PRIO-1, valid RT 1559 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 8ab86988bd24..97d1a3dd7a59 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1614,7 +1614,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1614 * offline CPU and activate it when the CPU comes up, but 1614 * offline CPU and activate it when the CPU comes up, but
1615 * that's for later. 1615 * that's for later.
1616 */ 1616 */
1617 if (!cpu_isset(cpu, cpu_online_map)) 1617 if (!cpu_online(cpu))
1618 return ERR_PTR(-ENODEV); 1618 return ERR_PTR(-ENODEV);
1619 1619
1620 cpuctx = &per_cpu(perf_cpu_context, cpu); 1620 cpuctx = &per_cpu(perf_cpu_context, cpu);
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 28265636b6c2..bdfb8dd1050c 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -237,10 +237,10 @@ static void timer_list_show_tickdevices(struct seq_file *m)
237#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 237#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
238 print_tickdevice(m, tick_get_broadcast_device(), -1); 238 print_tickdevice(m, tick_get_broadcast_device(), -1);
239 SEQ_printf(m, "tick_broadcast_mask: %08lx\n", 239 SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
240 tick_get_broadcast_mask()->bits[0]); 240 cpumask_bits(tick_get_broadcast_mask())[0]);
241#ifdef CONFIG_TICK_ONESHOT 241#ifdef CONFIG_TICK_ONESHOT
242 SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n", 242 SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
243 tick_get_broadcast_oneshot_mask()->bits[0]); 243 cpumask_bits(tick_get_broadcast_oneshot_mask())[0]);
244#endif 244#endif
245 SEQ_printf(m, "\n"); 245 SEQ_printf(m, "\n");
246#endif 246#endif
diff --git a/mm/slab.c b/mm/slab.c
index e17cc2c337b8..7d41f15b48d3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1132,7 +1132,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1132 if (nc) 1132 if (nc)
1133 free_block(cachep, nc->entry, nc->avail, node); 1133 free_block(cachep, nc->entry, nc->avail, node);
1134 1134
1135 if (!cpus_empty(*mask)) { 1135 if (!cpumask_empty(mask)) {
1136 spin_unlock_irq(&l3->list_lock); 1136 spin_unlock_irq(&l3->list_lock);
1137 goto free_array_cache; 1137 goto free_array_cache;
1138 } 1138 }