diff options
author | Mauro Carvalho Chehab <mchehab@redhat.com> | 2012-04-19 08:23:28 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2012-04-19 08:23:28 -0400 |
commit | d5aeee8cb28317ef608ecac421abc4d986d585d2 (patch) | |
tree | 70ec8ed8891f26e5c58152ffca9924ea1c58fe3a /kernel/smp.c | |
parent | 32898a145404acbebe3256709e012c2830a2043b (diff) | |
parent | e816b57a337ea3b755de72bec38c10c864f23015 (diff) |
Merge tag 'v3.4-rc3' into staging/for_v3.5
* tag 'v3.4-rc3': (3755 commits)
Linux 3.4-rc3
x86-32: fix up strncpy_from_user() sign error
ARM: 7386/1: jump_label: fixup for rename to static_key
ARM: 7384/1: ThumbEE: Disable userspace TEEHBR access for !CONFIG_ARM_THUMBEE
ARM: 7382/1: mm: truncate memory banks to fit in 4GB space for classic MMU
ARM: 7359/2: smp_twd: Only wait for reprogramming on active cpus
PCI: Fix regression in pci_restore_state(), v3
SCSI: Fix error handling when no ULD is attached
ARM: OMAP: clock: cleanup CPUfreq leftovers, fix build errors
ARM: dts: remove blank interrupt-parent properties
ARM: EXYNOS: Fix Kconfig dependencies for device tree enabled machine files
do not export kernel's NULL #define to userspace
ARM: EXYNOS: Remove broken config values for touchscren for NURI board
ARM: EXYNOS: set fix xusbxti clock for NURI and Universal210 boards
ARM: EXYNOS: fix regulator name for NURI board
ARM: SAMSUNG: make SAMSUNG_PM_DEBUG select DEBUG_LL
cpufreq: OMAP: fix build errors: depends on ARCH_OMAP2PLUS
sparc64: Eliminate obsolete __handle_softirq() function
sparc64: Fix bootup crash on sun4v.
ARM: msm: Fix section mismatches in proc_comm.c
...
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 90 |
1 files changed, 90 insertions, 0 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index db197d60489..2f8b10ecf75 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -701,3 +701,93 @@ int on_each_cpu(void (*func) (void *info), void *info, int wait) | |||
701 | return ret; | 701 | return ret; |
702 | } | 702 | } |
703 | EXPORT_SYMBOL(on_each_cpu); | 703 | EXPORT_SYMBOL(on_each_cpu); |
704 | |||
705 | /** | ||
706 | * on_each_cpu_mask(): Run a function on processors specified by | ||
707 | * cpumask, which may include the local processor. | ||
708 | * @mask: The set of cpus to run on (only runs on online subset). | ||
709 | * @func: The function to run. This must be fast and non-blocking. | ||
710 | * @info: An arbitrary pointer to pass to the function. | ||
711 | * @wait: If true, wait (atomically) until function has completed | ||
712 | * on other CPUs. | ||
713 | * | ||
714 | * If @wait is true, then returns once @func has returned. | ||
715 | * | ||
716 | * You must not call this function with disabled interrupts or | ||
717 | * from a hardware interrupt handler or from a bottom half handler. | ||
718 | */ | ||
719 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | ||
720 | void *info, bool wait) | ||
721 | { | ||
722 | int cpu = get_cpu(); | ||
723 | |||
724 | smp_call_function_many(mask, func, info, wait); | ||
725 | if (cpumask_test_cpu(cpu, mask)) { | ||
726 | local_irq_disable(); | ||
727 | func(info); | ||
728 | local_irq_enable(); | ||
729 | } | ||
730 | put_cpu(); | ||
731 | } | ||
732 | EXPORT_SYMBOL(on_each_cpu_mask); | ||
733 | |||
734 | /* | ||
735 | * on_each_cpu_cond(): Call a function on each processor for which | ||
736 | * the supplied function cond_func returns true, optionally waiting | ||
737 | * for all the required CPUs to finish. This may include the local | ||
738 | * processor. | ||
739 | * @cond_func: A callback function that is passed a cpu id and | ||
740 | * the the info parameter. The function is called | ||
741 | * with preemption disabled. The function should | ||
742 | * return a blooean value indicating whether to IPI | ||
743 | * the specified CPU. | ||
744 | * @func: The function to run on all applicable CPUs. | ||
745 | * This must be fast and non-blocking. | ||
746 | * @info: An arbitrary pointer to pass to both functions. | ||
747 | * @wait: If true, wait (atomically) until function has | ||
748 | * completed on other CPUs. | ||
749 | * @gfp_flags: GFP flags to use when allocating the cpumask | ||
750 | * used internally by the function. | ||
751 | * | ||
752 | * The function might sleep if the GFP flags indicates a non | ||
753 | * atomic allocation is allowed. | ||
754 | * | ||
755 | * Preemption is disabled to protect against CPUs going offline but not online. | ||
756 | * CPUs going online during the call will not be seen or sent an IPI. | ||
757 | * | ||
758 | * You must not call this function with disabled interrupts or | ||
759 | * from a hardware interrupt handler or from a bottom half handler. | ||
760 | */ | ||
761 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
762 | smp_call_func_t func, void *info, bool wait, | ||
763 | gfp_t gfp_flags) | ||
764 | { | ||
765 | cpumask_var_t cpus; | ||
766 | int cpu, ret; | ||
767 | |||
768 | might_sleep_if(gfp_flags & __GFP_WAIT); | ||
769 | |||
770 | if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { | ||
771 | preempt_disable(); | ||
772 | for_each_online_cpu(cpu) | ||
773 | if (cond_func(cpu, info)) | ||
774 | cpumask_set_cpu(cpu, cpus); | ||
775 | on_each_cpu_mask(cpus, func, info, wait); | ||
776 | preempt_enable(); | ||
777 | free_cpumask_var(cpus); | ||
778 | } else { | ||
779 | /* | ||
780 | * No free cpumask, bother. No matter, we'll | ||
781 | * just have to IPI them one by one. | ||
782 | */ | ||
783 | preempt_disable(); | ||
784 | for_each_online_cpu(cpu) | ||
785 | if (cond_func(cpu, info)) { | ||
786 | ret = smp_call_function_single(cpu, func, | ||
787 | info, wait); | ||
788 | WARN_ON_ONCE(!ret); | ||
789 | } | ||
790 | preempt_enable(); | ||
791 | } | ||
792 | } | ||
793 | EXPORT_SYMBOL(on_each_cpu_cond); | ||