aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-08-17 13:30:49 -0400
committerTejun Heo <tj@kernel.org>2014-08-26 13:45:53 -0400
commit5828f666c069af74e00db21559f1535103c9f79a (patch)
tree296715612f4c650c3e91986c9cda6f3974efcbbb /arch/powerpc/include
parent2999a4b354c24985268f9310bc9522ff358453a8 (diff)
powerpc: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of them is address calculation via the form &__get_cpu_var(x). This calculates the address for the instance of the percpu variable of the current processor based on an offset. Other use cases are for storing and retrieving data from the current processors percpu area. __get_cpu_var() can be used as an lvalue when writing data or on the right side of an assignment. __get_cpu_var() is defined as : #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) __get_cpu_var() always only does an address determination. However, store and retrieve operations could use a segment prefix (or global register on other platforms) to avoid the address calculation. this_cpu_write() and this_cpu_read() can directly take an offset into a percpu area and use optimized assembly code to read and write per cpu variables. This patch converts __get_cpu_var into either an explicit address calculation using this_cpu_ptr() or into a use of this_cpu operations that use the offset. Thereby address calculations are avoided and less registers are used when code is generated. At the end of the patch set all uses of __get_cpu_var have been removed so the macro is removed too. The patch set includes passes over all arches as well. Once these operations are used throughout then specialized macros can be defined in non -x86 arches as well in order to optimize per cpu access by f.e. using a global register that may be set to the per cpu base. Transformations done to __get_cpu_var() 1. Determine the address of the percpu instance of the current processor. DEFINE_PER_CPU(int, y); int *x = &__get_cpu_var(y); Converts to int *x = this_cpu_ptr(&y); 2. Same as #1 but this time an array structure is involved. DEFINE_PER_CPU(int, y[20]); int *x = __get_cpu_var(y); Converts to int *x = this_cpu_ptr(y); 3. Retrieve the content of the current processors instance of a per cpu variable. DEFINE_PER_CPU(int, y); int x = __get_cpu_var(y) Converts to int x = __this_cpu_read(y); 4. Retrieve the content of a percpu struct DEFINE_PER_CPU(struct mystruct, y); struct mystruct x = __get_cpu_var(y); Converts to memcpy(&x, this_cpu_ptr(&y), sizeof(x)); 5. Assignment to a per cpu variable DEFINE_PER_CPU(int, y) __get_cpu_var(y) = x; Converts to __this_cpu_write(y, x); 6. Increment/Decrement etc of a per cpu variable DEFINE_PER_CPU(int, y); __get_cpu_var(y)++ Converts to __this_cpu_inc(y) tj: Folded a fix patch. http://lkml.kernel.org/g/alpine.DEB.2.11.1408172143020.9652@gentwo.org Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> CC: Paul Mackerras <paulus@samba.org> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/hardirq.h4
-rw-r--r--arch/powerpc/include/asm/tlbflush.h4
-rw-r--r--arch/powerpc/include/asm/xics.h8
3 files changed, 9 insertions, 7 deletions
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 1bbb3013d6aa..8d907ba4fd05 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -21,7 +21,9 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
21 21
22#define __ARCH_IRQ_STAT 22#define __ARCH_IRQ_STAT
23 23
24#define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending 24#define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending)
25#define set_softirq_pending(x) __this_cpu_write(irq_stat._softirq_pending, (x))
26#define or_softirq_pending(x) __this_cpu_or(irq_stat._softirq_pending, (x))
25 27
26static inline void ack_bad_irq(unsigned int irq) 28static inline void ack_bad_irq(unsigned int irq)
27{ 29{
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 2def01ed0cb2..cd7c2719d3ef 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
107 107
108static inline void arch_enter_lazy_mmu_mode(void) 108static inline void arch_enter_lazy_mmu_mode(void)
109{ 109{
110 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 110 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
111 111
112 batch->active = 1; 112 batch->active = 1;
113} 113}
114 114
115static inline void arch_leave_lazy_mmu_mode(void) 115static inline void arch_leave_lazy_mmu_mode(void)
116{ 116{
117 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 117 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
118 118
119 if (batch->index) 119 if (batch->index)
120 __flush_tlb_pending(batch); 120 __flush_tlb_pending(batch);
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 282d43a0c855..5007ad0448ce 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -97,7 +97,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
97 97
98static inline void xics_push_cppr(unsigned int vec) 98static inline void xics_push_cppr(unsigned int vec)
99{ 99{
100 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 100 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
101 101
102 if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) 102 if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
103 return; 103 return;
@@ -110,7 +110,7 @@ static inline void xics_push_cppr(unsigned int vec)
110 110
111static inline unsigned char xics_pop_cppr(void) 111static inline unsigned char xics_pop_cppr(void)
112{ 112{
113 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 113 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
114 114
115 if (WARN_ON(os_cppr->index < 1)) 115 if (WARN_ON(os_cppr->index < 1))
116 return LOWEST_PRIORITY; 116 return LOWEST_PRIORITY;
@@ -120,7 +120,7 @@ static inline unsigned char xics_pop_cppr(void)
120 120
121static inline void xics_set_base_cppr(unsigned char cppr) 121static inline void xics_set_base_cppr(unsigned char cppr)
122{ 122{
123 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 123 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
124 124
125 /* we only really want to set the priority when there's 125 /* we only really want to set the priority when there's
126 * just one cppr value on the stack 126 * just one cppr value on the stack
@@ -132,7 +132,7 @@ static inline void xics_set_base_cppr(unsigned char cppr)
132 132
133static inline unsigned char xics_cppr_top(void) 133static inline unsigned char xics_cppr_top(void)
134{ 134{
135 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 135 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
136 136
137 return os_cppr->stack[os_cppr->index]; 137 return os_cppr->stack[os_cppr->index];
138} 138}