aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/pseries
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-10-21 16:23:25 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2014-11-02 20:12:32 -0500
commit69111bac42f5ceacdd22e30947837ceb2c4493ed (patch)
tree717c5e3c231b078f970aeb5d895bd96f9efc8fdd /arch/powerpc/platforms/pseries
parent0df1f2487d2f0d04703f142813d53615d62a1da4 (diff)
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does not have this change. Sorry about missing linuxppc-dev before. V2->V2 - Fix up to work against 3.18-rc1 __get_cpu_var() is used for multiple purposes in the kernel source. One of them is address calculation via the form &__get_cpu_var(x). This calculates the address for the instance of the percpu variable of the current processor based on an offset. Other use cases are for storing and retrieving data from the current processors percpu area. __get_cpu_var() can be used as an lvalue when writing data or on the right side of an assignment. __get_cpu_var() is defined as : __get_cpu_var() always only does an address determination. However, store and retrieve operations could use a segment prefix (or global register on other platforms) to avoid the address calculation. this_cpu_write() and this_cpu_read() can directly take an offset into a percpu area and use optimized assembly code to read and write per cpu variables. This patch converts __get_cpu_var into either an explicit address calculation using this_cpu_ptr() or into a use of this_cpu operations that use the offset. Thereby address calculations are avoided and less registers are used when code is generated. At the end of the patch set all uses of __get_cpu_var have been removed so the macro is removed too. The patch set includes passes over all arches as well. Once these operations are used throughout then specialized macros can be defined in non -x86 arches as well in order to optimize per cpu access by f.e. using a global register that may be set to the per cpu base. Transformations done to __get_cpu_var() 1. Determine the address of the percpu instance of the current processor. DEFINE_PER_CPU(int, y); int *x = &__get_cpu_var(y); Converts to int *x = this_cpu_ptr(&y); 2. Same as #1 but this time an array structure is involved. DEFINE_PER_CPU(int, y[20]); int *x = __get_cpu_var(y); Converts to int *x = this_cpu_ptr(y); 3. Retrieve the content of the current processors instance of a per cpu variable. DEFINE_PER_CPU(int, y); int x = __get_cpu_var(y) Converts to int x = __this_cpu_read(y); 4. Retrieve the content of a percpu struct DEFINE_PER_CPU(struct mystruct, y); struct mystruct x = __get_cpu_var(y); Converts to memcpy(&x, this_cpu_ptr(&y), sizeof(x)); 5. Assignment to a per cpu variable DEFINE_PER_CPU(int, y) __get_cpu_var(y) = x; Converts to __this_cpu_write(y, x); 6. Increment/Decrement etc of a per cpu variable DEFINE_PER_CPU(int, y); __get_cpu_var(y)++ Converts to __this_cpu_inc(y) Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> CC: Paul Mackerras <paulus@samba.org> Signed-off-by: Christoph Lameter <cl@linux.com> [mpe: Fix build errors caused by set/or_softirq_pending(), and rework assignment in __set_breakpoint() to use memcpy().] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/platforms/pseries')
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c2
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c8
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c6
-rw-r--r--arch/powerpc/platforms/pseries/ras.c4
5 files changed, 12 insertions, 12 deletions
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 1062f71f5a85..39049e4884fb 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -75,7 +75,7 @@ static atomic_t dtl_count;
75 */ 75 */
76static void consume_dtle(struct dtl_entry *dtle, u64 index) 76static void consume_dtle(struct dtl_entry *dtle, u64 index)
77{ 77{
78 struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings); 78 struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
79 struct dtl_entry *wp = dtlr->write_ptr; 79 struct dtl_entry *wp = dtlr->write_ptr;
80 struct lppaca *vpa = local_paca->lppaca_ptr; 80 struct lppaca *vpa = local_paca->lppaca_ptr;
81 81
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index 4575f0c9e521..f02ec3ab428c 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -110,7 +110,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
110 if (opcode > MAX_HCALL_OPCODE) 110 if (opcode > MAX_HCALL_OPCODE)
111 return; 111 return;
112 112
113 h = &__get_cpu_var(hcall_stats)[opcode / 4]; 113 h = this_cpu_ptr(&hcall_stats[opcode / 4]);
114 h->tb_start = mftb(); 114 h->tb_start = mftb();
115 h->purr_start = mfspr(SPRN_PURR); 115 h->purr_start = mfspr(SPRN_PURR);
116} 116}
@@ -123,7 +123,7 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
123 if (opcode > MAX_HCALL_OPCODE) 123 if (opcode > MAX_HCALL_OPCODE)
124 return; 124 return;
125 125
126 h = &__get_cpu_var(hcall_stats)[opcode / 4]; 126 h = this_cpu_ptr(&hcall_stats[opcode / 4]);
127 h->num_calls++; 127 h->num_calls++;
128 h->tb_total += mftb() - h->tb_start; 128 h->tb_total += mftb() - h->tb_start;
129 h->purr_total += mfspr(SPRN_PURR) - h->purr_start; 129 h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index e32e00976a94..49bcf7c09c49 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -199,7 +199,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
199 199
200 local_irq_save(flags); /* to protect tcep and the page behind it */ 200 local_irq_save(flags); /* to protect tcep and the page behind it */
201 201
202 tcep = __get_cpu_var(tce_page); 202 tcep = __this_cpu_read(tce_page);
203 203
204 /* This is safe to do since interrupts are off when we're called 204 /* This is safe to do since interrupts are off when we're called
205 * from iommu_alloc{,_sg}() 205 * from iommu_alloc{,_sg}()
@@ -212,7 +212,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
212 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 212 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
213 direction, attrs); 213 direction, attrs);
214 } 214 }
215 __get_cpu_var(tce_page) = tcep; 215 __this_cpu_write(tce_page, tcep);
216 } 216 }
217 217
218 rpn = __pa(uaddr) >> TCE_SHIFT; 218 rpn = __pa(uaddr) >> TCE_SHIFT;
@@ -398,7 +398,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
398 long l, limit; 398 long l, limit;
399 399
400 local_irq_disable(); /* to protect tcep and the page behind it */ 400 local_irq_disable(); /* to protect tcep and the page behind it */
401 tcep = __get_cpu_var(tce_page); 401 tcep = __this_cpu_read(tce_page);
402 402
403 if (!tcep) { 403 if (!tcep) {
404 tcep = (__be64 *)__get_free_page(GFP_ATOMIC); 404 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
@@ -406,7 +406,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
406 local_irq_enable(); 406 local_irq_enable();
407 return -ENOMEM; 407 return -ENOMEM;
408 } 408 }
409 __get_cpu_var(tce_page) = tcep; 409 __this_cpu_write(tce_page, tcep);
410 } 410 }
411 411
412 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; 412 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8c509d5397c6..87d8792f906c 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -505,7 +505,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
505 unsigned long vpn; 505 unsigned long vpn;
506 unsigned long i, pix, rc; 506 unsigned long i, pix, rc;
507 unsigned long flags = 0; 507 unsigned long flags = 0;
508 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 508 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
509 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 509 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
510 unsigned long param[9]; 510 unsigned long param[9];
511 unsigned long hash, index, shift, hidx, slot; 511 unsigned long hash, index, shift, hidx, slot;
@@ -695,7 +695,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
695 695
696 local_irq_save(flags); 696 local_irq_save(flags);
697 697
698 depth = &__get_cpu_var(hcall_trace_depth); 698 depth = this_cpu_ptr(&hcall_trace_depth);
699 699
700 if (*depth) 700 if (*depth)
701 goto out; 701 goto out;
@@ -720,7 +720,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
720 720
721 local_irq_save(flags); 721 local_irq_save(flags);
722 722
723 depth = &__get_cpu_var(hcall_trace_depth); 723 depth = this_cpu_ptr(&hcall_trace_depth);
724 724
725 if (*depth) 725 if (*depth)
726 goto out; 726 goto out;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 5a4d0fc03b03..c3b2a7e81ddb 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -302,8 +302,8 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
302 /* If it isn't an extended log we can use the per cpu 64bit buffer */ 302 /* If it isn't an extended log we can use the per cpu 64bit buffer */
303 h = (struct rtas_error_log *)&savep[1]; 303 h = (struct rtas_error_log *)&savep[1];
304 if (!rtas_error_extended(h)) { 304 if (!rtas_error_extended(h)) {
305 memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64)); 305 memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64));
306 errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf); 306 errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf);
307 } else { 307 } else {
308 int len, error_log_length; 308 int len, error_log_length;
309 309