diff options
| -rw-r--r-- | arch/sparc64/kernel/smp.c | 33 |
1 files changed, 9 insertions, 24 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index ac8996ec97be..27b81775a4de 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
| @@ -787,21 +787,17 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask | |||
| 787 | * except self. Really, there are only two cases currently, | 787 | * except self. Really, there are only two cases currently, |
| 788 | * "&cpu_online_map" and "&mm->cpu_vm_mask". | 788 | * "&cpu_online_map" and "&mm->cpu_vm_mask". |
| 789 | */ | 789 | */ |
| 790 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask_p) | 790 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) |
| 791 | { | 791 | { |
| 792 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); | 792 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); |
| 793 | int this_cpu = get_cpu(); | ||
| 794 | cpumask_t mask; | ||
| 795 | 793 | ||
| 796 | mask = *mask_p; | 794 | xcall_deliver(data0, data1, data2, mask); |
| 797 | if (mask_p != &cpu_online_map) | 795 | } |
| 798 | cpus_and(mask, mask, cpu_online_map); | ||
| 799 | cpu_clear(this_cpu, mask); | ||
| 800 | |||
| 801 | xcall_deliver(data0, data1, data2, &mask); | ||
| 802 | /* NOTE: Caller runs local copy on master. */ | ||
| 803 | 796 | ||
| 804 | put_cpu(); | 797 | /* Send cross call to all processors except self. */ |
| 798 | static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) | ||
| 799 | { | ||
| 800 | smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map); | ||
| 805 | } | 801 | } |
| 806 | 802 | ||
| 807 | extern unsigned long xcall_sync_tick; | 803 | extern unsigned long xcall_sync_tick; |
| @@ -827,10 +823,6 @@ void arch_send_call_function_single_ipi(int cpu) | |||
| 827 | &cpumask_of_cpu(cpu)); | 823 | &cpumask_of_cpu(cpu)); |
| 828 | } | 824 | } |
| 829 | 825 | ||
| 830 | /* Send cross call to all processors except self. */ | ||
| 831 | #define smp_cross_call(func, ctx, data1, data2) \ | ||
| 832 | smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map) | ||
| 833 | |||
| 834 | void smp_call_function_client(int irq, struct pt_regs *regs) | 826 | void smp_call_function_client(int irq, struct pt_regs *regs) |
| 835 | { | 827 | { |
| 836 | clear_softint(1 << irq); | 828 | clear_softint(1 << irq); |
| @@ -900,7 +892,6 @@ static inline void __local_flush_dcache_page(struct page *page) | |||
| 900 | 892 | ||
| 901 | void smp_flush_dcache_page_impl(struct page *page, int cpu) | 893 | void smp_flush_dcache_page_impl(struct page *page, int cpu) |
| 902 | { | 894 | { |
| 903 | cpumask_t mask = cpumask_of_cpu(cpu); | ||
| 904 | int this_cpu; | 895 | int this_cpu; |
| 905 | 896 | ||
| 906 | if (tlb_type == hypervisor) | 897 | if (tlb_type == hypervisor) |
| @@ -929,7 +920,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) | |||
| 929 | } | 920 | } |
| 930 | if (data0) { | 921 | if (data0) { |
| 931 | xcall_deliver(data0, __pa(pg_addr), | 922 | xcall_deliver(data0, __pa(pg_addr), |
| 932 | (u64) pg_addr, &mask); | 923 | (u64) pg_addr, &cpumask_of_cpu(cpu)); |
| 933 | #ifdef CONFIG_DEBUG_DCFLUSH | 924 | #ifdef CONFIG_DEBUG_DCFLUSH |
| 934 | atomic_inc(&dcpage_flushes_xcall); | 925 | atomic_inc(&dcpage_flushes_xcall); |
| 935 | #endif | 926 | #endif |
| @@ -941,7 +932,6 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) | |||
| 941 | 932 | ||
| 942 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | 933 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) |
| 943 | { | 934 | { |
| 944 | cpumask_t mask = cpu_online_map; | ||
| 945 | void *pg_addr; | 935 | void *pg_addr; |
| 946 | int this_cpu; | 936 | int this_cpu; |
| 947 | u64 data0; | 937 | u64 data0; |
| @@ -951,13 +941,9 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
| 951 | 941 | ||
| 952 | this_cpu = get_cpu(); | 942 | this_cpu = get_cpu(); |
| 953 | 943 | ||
| 954 | cpu_clear(this_cpu, mask); | ||
| 955 | |||
| 956 | #ifdef CONFIG_DEBUG_DCFLUSH | 944 | #ifdef CONFIG_DEBUG_DCFLUSH |
| 957 | atomic_inc(&dcpage_flushes); | 945 | atomic_inc(&dcpage_flushes); |
| 958 | #endif | 946 | #endif |
| 959 | if (cpus_empty(mask)) | ||
| 960 | goto flush_self; | ||
| 961 | data0 = 0; | 947 | data0 = 0; |
| 962 | pg_addr = page_address(page); | 948 | pg_addr = page_address(page); |
| 963 | if (tlb_type == spitfire) { | 949 | if (tlb_type == spitfire) { |
| @@ -971,12 +957,11 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
| 971 | } | 957 | } |
| 972 | if (data0) { | 958 | if (data0) { |
| 973 | xcall_deliver(data0, __pa(pg_addr), | 959 | xcall_deliver(data0, __pa(pg_addr), |
| 974 | (u64) pg_addr, &mask); | 960 | (u64) pg_addr, &cpu_online_map); |
| 975 | #ifdef CONFIG_DEBUG_DCFLUSH | 961 | #ifdef CONFIG_DEBUG_DCFLUSH |
| 976 | atomic_inc(&dcpage_flushes_xcall); | 962 | atomic_inc(&dcpage_flushes_xcall); |
| 977 | #endif | 963 | #endif |
| 978 | } | 964 | } |
| 979 | flush_self: | ||
| 980 | __local_flush_dcache_page(page); | 965 | __local_flush_dcache_page(page); |
| 981 | 966 | ||
| 982 | put_cpu(); | 967 | put_cpu(); |
