diff options
author | David S. Miller <davem@davemloft.net> | 2008-08-04 03:51:18 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-04 16:51:40 -0400 |
commit | 91a4231cc2efb9134373bb2a93be96a284955607 (patch) | |
tree | da98661d063a77992775cefb2172ff090188c36e /arch/sparc64 | |
parent | 24445a4ac9d3fdd3f96f0ad277cb2ba274470d94 (diff) |
sparc64: Make smp_cross_call_masked() take a cpumask_t pointer.
Ideally this could be simplified further such that we could pass
the pointer down directly into the xcall_deliver() implementation.
But if we do that we need to do the "cpu_online(cpu)" and
"cpu != self" checks down in those functions.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/kernel/smp.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 8c9e75dc1e65..740259d89552 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -761,15 +761,19 @@ dump_cpu_list_and_out: | |||
761 | 761 | ||
762 | static void (*xcall_deliver)(u64, u64, u64, const cpumask_t *); | 762 | static void (*xcall_deliver)(u64, u64, u64, const cpumask_t *); |
763 | 763 | ||
764 | /* Send cross call to all processors mentioned in MASK | 764 | /* Send cross call to all processors mentioned in MASK_P |
765 | * except self. | 765 | * except self. Really, there are only two cases currently, |
766 | * "&cpu_online_map" and "&mm->cpu_vm_mask". | ||
766 | */ | 767 | */ |
767 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask) | 768 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask_p) |
768 | { | 769 | { |
769 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); | 770 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); |
770 | int this_cpu = get_cpu(); | 771 | int this_cpu = get_cpu(); |
772 | cpumask_t mask; | ||
771 | 773 | ||
772 | cpus_and(mask, mask, cpu_online_map); | 774 | mask = *mask_p; |
775 | if (mask_p != &cpu_online_map) | ||
776 | cpus_and(mask, mask, cpu_online_map); | ||
773 | cpu_clear(this_cpu, mask); | 777 | cpu_clear(this_cpu, mask); |
774 | 778 | ||
775 | xcall_deliver(data0, data1, data2, &mask); | 779 | xcall_deliver(data0, data1, data2, &mask); |
@@ -803,7 +807,7 @@ void arch_send_call_function_single_ipi(int cpu) | |||
803 | 807 | ||
804 | /* Send cross call to all processors except self. */ | 808 | /* Send cross call to all processors except self. */ |
805 | #define smp_cross_call(func, ctx, data1, data2) \ | 809 | #define smp_cross_call(func, ctx, data1, data2) \ |
806 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map) | 810 | smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map) |
807 | 811 | ||
808 | void smp_call_function_client(int irq, struct pt_regs *regs) | 812 | void smp_call_function_client(int irq, struct pt_regs *regs) |
809 | { | 813 | { |
@@ -1056,7 +1060,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
1056 | 1060 | ||
1057 | smp_cross_call_masked(&xcall_flush_tlb_mm, | 1061 | smp_cross_call_masked(&xcall_flush_tlb_mm, |
1058 | ctx, 0, 0, | 1062 | ctx, 0, 0, |
1059 | mm->cpu_vm_mask); | 1063 | &mm->cpu_vm_mask); |
1060 | 1064 | ||
1061 | local_flush_and_out: | 1065 | local_flush_and_out: |
1062 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); | 1066 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); |
@@ -1074,7 +1078,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long | |||
1074 | else | 1078 | else |
1075 | smp_cross_call_masked(&xcall_flush_tlb_pending, | 1079 | smp_cross_call_masked(&xcall_flush_tlb_pending, |
1076 | ctx, nr, (unsigned long) vaddrs, | 1080 | ctx, nr, (unsigned long) vaddrs, |
1077 | mm->cpu_vm_mask); | 1081 | &mm->cpu_vm_mask); |
1078 | 1082 | ||
1079 | __flush_tlb_pending(ctx, nr, vaddrs); | 1083 | __flush_tlb_pending(ctx, nr, vaddrs); |
1080 | 1084 | ||