diff options
author | Tejun Heo <tj@kernel.org> | 2014-08-27 11:18:29 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-08-27 11:18:29 -0400 |
commit | 23f66e2d661b4d3226d16e25910a9e9472ce2410 (patch) | |
tree | fe5b7769f72cac6ff722a0f1509df9e398768ab6 /arch/powerpc/include | |
parent | 47405a253da4d8ca4b18ad537423083fdd790440 (diff) |
Revert "powerpc: Replace __get_cpu_var uses"
This reverts commit 5828f666c069af74e00db21559f1535103c9f79a due to
build failure after merging with pending powerpc changes.
Link: http://lkml.kernel.org/g/20140827142243.6277eaff@canb.auug.org.au
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/hardirq.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/tlbflush.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/xics.h | 8 |
3 files changed, 7 insertions, 9 deletions
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index 8d907ba4fd05..1bbb3013d6aa 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h | |||
@@ -21,9 +21,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | |||
21 | 21 | ||
22 | #define __ARCH_IRQ_STAT | 22 | #define __ARCH_IRQ_STAT |
23 | 23 | ||
24 | #define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending) | 24 | #define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending |
25 | #define set_softirq_pending(x) __this_cpu_write(irq_stat._softirq_pending, (x)) | ||
26 | #define or_softirq_pending(x) __this_cpu_or(irq_stat._softirq_pending, (x)) | ||
27 | 25 | ||
28 | static inline void ack_bad_irq(unsigned int irq) | 26 | static inline void ack_bad_irq(unsigned int irq) |
29 | { | 27 | { |
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index cd7c2719d3ef..2def01ed0cb2 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h | |||
@@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); | |||
107 | 107 | ||
108 | static inline void arch_enter_lazy_mmu_mode(void) | 108 | static inline void arch_enter_lazy_mmu_mode(void) |
109 | { | 109 | { |
110 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); | 110 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
111 | 111 | ||
112 | batch->active = 1; | 112 | batch->active = 1; |
113 | } | 113 | } |
114 | 114 | ||
115 | static inline void arch_leave_lazy_mmu_mode(void) | 115 | static inline void arch_leave_lazy_mmu_mode(void) |
116 | { | 116 | { |
117 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); | 117 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
118 | 118 | ||
119 | if (batch->index) | 119 | if (batch->index) |
120 | __flush_tlb_pending(batch); | 120 | __flush_tlb_pending(batch); |
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index 5007ad0448ce..282d43a0c855 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h | |||
@@ -97,7 +97,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr); | |||
97 | 97 | ||
98 | static inline void xics_push_cppr(unsigned int vec) | 98 | static inline void xics_push_cppr(unsigned int vec) |
99 | { | 99 | { |
100 | struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | 100 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
101 | 101 | ||
102 | if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) | 102 | if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) |
103 | return; | 103 | return; |
@@ -110,7 +110,7 @@ static inline void xics_push_cppr(unsigned int vec) | |||
110 | 110 | ||
111 | static inline unsigned char xics_pop_cppr(void) | 111 | static inline unsigned char xics_pop_cppr(void) |
112 | { | 112 | { |
113 | struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | 113 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
114 | 114 | ||
115 | if (WARN_ON(os_cppr->index < 1)) | 115 | if (WARN_ON(os_cppr->index < 1)) |
116 | return LOWEST_PRIORITY; | 116 | return LOWEST_PRIORITY; |
@@ -120,7 +120,7 @@ static inline unsigned char xics_pop_cppr(void) | |||
120 | 120 | ||
121 | static inline void xics_set_base_cppr(unsigned char cppr) | 121 | static inline void xics_set_base_cppr(unsigned char cppr) |
122 | { | 122 | { |
123 | struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | 123 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
124 | 124 | ||
125 | /* we only really want to set the priority when there's | 125 | /* we only really want to set the priority when there's |
126 | * just one cppr value on the stack | 126 | * just one cppr value on the stack |
@@ -132,7 +132,7 @@ static inline void xics_set_base_cppr(unsigned char cppr) | |||
132 | 132 | ||
133 | static inline unsigned char xics_cppr_top(void) | 133 | static inline unsigned char xics_cppr_top(void) |
134 | { | 134 | { |
135 | struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | 135 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
136 | 136 | ||
137 | return os_cppr->stack[os_cppr->index]; | 137 | return os_cppr->stack[os_cppr->index]; |
138 | } | 138 | } |