aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/kernel/lparcfg.c14
-rw-r--r--arch/powerpc/lib/locks.c4
-rw-r--r--arch/powerpc/platforms/iseries/dt.c4
-rw-r--r--arch/powerpc/platforms/iseries/smp.c2
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c8
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
7 files changed, 20 insertions, 18 deletions
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 14b592dfb4e..6b73554433a 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -153,6 +153,8 @@ struct lppaca {
153 153
154extern struct lppaca lppaca[]; 154extern struct lppaca lppaca[];
155 155
156#define lppaca_of(cpu) (lppaca[cpu])
157
156/* 158/*
157 * SLB shadow buffer structure as defined in the PAPR. The save_area 159 * SLB shadow buffer structure as defined in the PAPR. The save_area
158 * contains adjacent ESID and VSID pairs for each shadowed SLB. The 160 * contains adjacent ESID and VSID pairs for each shadowed SLB. The
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 50362b6ef6e..8d9e3b9cda6 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -56,7 +56,7 @@ static unsigned long get_purr(void)
56 56
57 for_each_possible_cpu(cpu) { 57 for_each_possible_cpu(cpu) {
58 if (firmware_has_feature(FW_FEATURE_ISERIES)) 58 if (firmware_has_feature(FW_FEATURE_ISERIES))
59 sum_purr += lppaca[cpu].emulated_time_base; 59 sum_purr += lppaca_of(cpu).emulated_time_base;
60 else { 60 else {
61 struct cpu_usage *cu; 61 struct cpu_usage *cu;
62 62
@@ -263,7 +263,7 @@ static void parse_ppp_data(struct seq_file *m)
263 ppp_data.active_system_procs); 263 ppp_data.active_system_procs);
264 264
265 /* pool related entries are apropriate for shared configs */ 265 /* pool related entries are apropriate for shared configs */
266 if (lppaca[0].shared_proc) { 266 if (lppaca_of(0).shared_proc) {
267 unsigned long pool_idle_time, pool_procs; 267 unsigned long pool_idle_time, pool_procs;
268 268
269 seq_printf(m, "pool=%d\n", ppp_data.pool_num); 269 seq_printf(m, "pool=%d\n", ppp_data.pool_num);
@@ -460,8 +460,8 @@ static void pseries_cmo_data(struct seq_file *m)
460 return; 460 return;
461 461
462 for_each_possible_cpu(cpu) { 462 for_each_possible_cpu(cpu) {
463 cmo_faults += lppaca[cpu].cmo_faults; 463 cmo_faults += lppaca_of(cpu).cmo_faults;
464 cmo_fault_time += lppaca[cpu].cmo_fault_time; 464 cmo_fault_time += lppaca_of(cpu).cmo_fault_time;
465 } 465 }
466 466
467 seq_printf(m, "cmo_faults=%lu\n", cmo_faults); 467 seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
@@ -479,8 +479,8 @@ static void splpar_dispatch_data(struct seq_file *m)
479 unsigned long dispatch_dispersions = 0; 479 unsigned long dispatch_dispersions = 0;
480 480
481 for_each_possible_cpu(cpu) { 481 for_each_possible_cpu(cpu) {
482 dispatches += lppaca[cpu].yield_count; 482 dispatches += lppaca_of(cpu).yield_count;
483 dispatch_dispersions += lppaca[cpu].dispersion_count; 483 dispatch_dispersions += lppaca_of(cpu).dispersion_count;
484 } 484 }
485 485
486 seq_printf(m, "dispatches=%lu\n", dispatches); 486 seq_printf(m, "dispatches=%lu\n", dispatches);
@@ -545,7 +545,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
545 seq_printf(m, "partition_potential_processors=%d\n", 545 seq_printf(m, "partition_potential_processors=%d\n",
546 partition_potential_processors); 546 partition_potential_processors);
547 547
548 seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc); 548 seq_printf(m, "shared_processor_mode=%d\n", lppaca_of(0).shared_proc);
549 549
550 seq_printf(m, "slb_size=%d\n", mmu_slb_size); 550 seq_printf(m, "slb_size=%d\n", mmu_slb_size);
551 551
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 58e14fba11b..9b8182e8216 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -34,7 +34,7 @@ void __spin_yield(arch_spinlock_t *lock)
34 return; 34 return;
35 holder_cpu = lock_value & 0xffff; 35 holder_cpu = lock_value & 0xffff;
36 BUG_ON(holder_cpu >= NR_CPUS); 36 BUG_ON(holder_cpu >= NR_CPUS);
37 yield_count = lppaca[holder_cpu].yield_count; 37 yield_count = lppaca_of(holder_cpu).yield_count;
38 if ((yield_count & 1) == 0) 38 if ((yield_count & 1) == 0)
39 return; /* virtual cpu is currently running */ 39 return; /* virtual cpu is currently running */
40 rmb(); 40 rmb();
@@ -65,7 +65,7 @@ void __rw_yield(arch_rwlock_t *rw)
65 return; /* no write lock at present */ 65 return; /* no write lock at present */
66 holder_cpu = lock_value & 0xffff; 66 holder_cpu = lock_value & 0xffff;
67 BUG_ON(holder_cpu >= NR_CPUS); 67 BUG_ON(holder_cpu >= NR_CPUS);
68 yield_count = lppaca[holder_cpu].yield_count; 68 yield_count = lppaca_of(holder_cpu).yield_count;
69 if ((yield_count & 1) == 0) 69 if ((yield_count & 1) == 0)
70 return; /* virtual cpu is currently running */ 70 return; /* virtual cpu is currently running */
71 rmb(); 71 rmb();
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index 7f45a51fe79..fdb7384c0c4 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -243,7 +243,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); 243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
244 244
245 for (i = 0; i < NR_CPUS; i++) { 245 for (i = 0; i < NR_CPUS; i++) {
246 if (lppaca[i].dyn_proc_status >= 2) 246 if (lppaca_of(i).dyn_proc_status >= 2)
247 continue; 247 continue;
248 248
249 snprintf(p, 32 - (p - buf), "@%d", i); 249 snprintf(p, 32 - (p - buf), "@%d", i);
@@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
251 251
252 dt_prop_str(dt, "device_type", device_type_cpu); 252 dt_prop_str(dt, "device_type", device_type_cpu);
253 253
254 index = lppaca[i].dyn_hv_phys_proc_index; 254 index = lppaca_of(i).dyn_hv_phys_proc_index;
255 d = &xIoHriProcessorVpd[index]; 255 d = &xIoHriProcessorVpd[index];
256 256
257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); 257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 6590850045a..6c6029914db 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -91,7 +91,7 @@ static void smp_iSeries_kick_cpu(int nr)
91 BUG_ON((nr < 0) || (nr >= NR_CPUS)); 91 BUG_ON((nr < 0) || (nr >= NR_CPUS));
92 92
93 /* Verify that our partition has a processor nr */ 93 /* Verify that our partition has a processor nr */
94 if (lppaca[nr].dyn_proc_status >= 2) 94 if (lppaca_of(nr).dyn_proc_status >= 2)
95 return; 95 return;
96 96
97 /* The processor is currently spinning, waiting 97 /* The processor is currently spinning, waiting
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index a00addb5594..adfd5441b61 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -107,14 +107,14 @@ static int dtl_enable(struct dtl *dtl)
107 } 107 }
108 108
109 /* set our initial buffer indices */ 109 /* set our initial buffer indices */
110 dtl->last_idx = lppaca[dtl->cpu].dtl_idx = 0; 110 dtl->last_idx = lppaca_of(dtl->cpu).dtl_idx = 0;
111 111
112 /* ensure that our updates to the lppaca fields have occurred before 112 /* ensure that our updates to the lppaca fields have occurred before
113 * we actually enable the logging */ 113 * we actually enable the logging */
114 smp_wmb(); 114 smp_wmb();
115 115
116 /* enable event logging */ 116 /* enable event logging */
117 lppaca[dtl->cpu].dtl_enable_mask = dtl_event_mask; 117 lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
118 118
119 return 0; 119 return 0;
120} 120}
@@ -123,7 +123,7 @@ static void dtl_disable(struct dtl *dtl)
123{ 123{
124 int hwcpu = get_hard_smp_processor_id(dtl->cpu); 124 int hwcpu = get_hard_smp_processor_id(dtl->cpu);
125 125
126 lppaca[dtl->cpu].dtl_enable_mask = 0x0; 126 lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
127 127
128 unregister_dtl(hwcpu, __pa(dtl->buf)); 128 unregister_dtl(hwcpu, __pa(dtl->buf));
129 129
@@ -171,7 +171,7 @@ static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
171 /* actual number of entries read */ 171 /* actual number of entries read */
172 n_read = 0; 172 n_read = 0;
173 173
174 cur_idx = lppaca[dtl->cpu].dtl_idx; 174 cur_idx = lppaca_of(dtl->cpu).dtl_idx;
175 last_idx = dtl->last_idx; 175 last_idx = dtl->last_idx;
176 176
177 if (cur_idx - last_idx > dtl->buf_entries) { 177 if (cur_idx - last_idx > dtl->buf_entries) {
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index cf79b46d8f8..a17fe4a9059 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -250,9 +250,9 @@ void vpa_init(int cpu)
250 long ret; 250 long ret;
251 251
252 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 252 if (cpu_has_feature(CPU_FTR_ALTIVEC))
253 lppaca[cpu].vmxregs_in_use = 1; 253 lppaca_of(cpu).vmxregs_in_use = 1;
254 254
255 addr = __pa(&lppaca[cpu]); 255 addr = __pa(&lppaca_of(cpu));
256 ret = register_vpa(hwcpu, addr); 256 ret = register_vpa(hwcpu, addr);
257 257
258 if (ret) { 258 if (ret) {