aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-10-30 04:43:08 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-30 04:43:08 -0400
commit169ed55bd30305b933f52bfab32a58671d44ab68 (patch)
tree32e280957474f458901abfce16fa2a1687ef7497 /arch/powerpc/kernel
parent3d7851b3cdd43a734e5cc4c643fd886ab28ad4d5 (diff)
parent45f81b1c96d9793e47ce925d257ea693ce0b193e (diff)
Merge branch 'tip/perf/jump-label-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/urgent
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c25
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/kernel/head_64.S6
-rw-r--r--arch/powerpc/kernel/ibmebus.c11
-rw-r--r--arch/powerpc/kernel/kvm.c596
-rw-r--r--arch/powerpc/kernel/kvm_emul.S302
-rw-r--r--arch/powerpc/kernel/legacy_serial.c22
-rw-r--r--arch/powerpc/kernel/lparcfg.c1
-rw-r--r--arch/powerpc/kernel/prom.c12
-rw-r--r--arch/powerpc/kernel/ptrace.c66
-rw-r--r--arch/powerpc/kernel/rtas_flash.c3
-rw-r--r--arch/powerpc/kernel/rtasd.c1
-rw-r--r--arch/powerpc/kernel/vio.c4
14 files changed, 992 insertions, 65 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 4ed076a4db24..36c30f31ec93 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -129,6 +129,8 @@ ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
129obj-y += ppc_save_regs.o 129obj-y += ppc_save_regs.o
130endif 130endif
131 131
132obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o
133
132# Disable GCOV in odd or sensitive code 134# Disable GCOV in odd or sensitive code
133GCOV_PROFILE_prom_init.o := n 135GCOV_PROFILE_prom_init.o := n
134GCOV_PROFILE_ftrace.o := n 136GCOV_PROFILE_ftrace.o := n
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c3e01945ad4f..bd0df2e6aa8f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -48,11 +48,11 @@
48#ifdef CONFIG_PPC_ISERIES 48#ifdef CONFIG_PPC_ISERIES
49#include <asm/iseries/alpaca.h> 49#include <asm/iseries/alpaca.h>
50#endif 50#endif
51#ifdef CONFIG_KVM 51#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST)
52#include <linux/kvm_host.h> 52#include <linux/kvm_host.h>
53#ifndef CONFIG_BOOKE
54#include <asm/kvm_book3s.h>
55#endif 53#endif
54#if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S)
55#include <asm/kvm_book3s.h>
56#endif 56#endif
57 57
58#ifdef CONFIG_PPC32 58#ifdef CONFIG_PPC32
@@ -396,12 +396,13 @@ int main(void)
396 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 396 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
397 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 397 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
398 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 398 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
399 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
400 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 399 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
401 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 400 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
402 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 401 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
403 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 402 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
404 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 403 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
404 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
405 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
405 406
406 /* book3s */ 407 /* book3s */
407#ifdef CONFIG_PPC_BOOK3S 408#ifdef CONFIG_PPC_BOOK3S
@@ -466,6 +467,22 @@ int main(void)
466 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 467 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
467#endif /* CONFIG_PPC_BOOK3S */ 468#endif /* CONFIG_PPC_BOOK3S */
468#endif 469#endif
470
471#ifdef CONFIG_KVM_GUEST
472 DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
473 scratch1));
474 DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared,
475 scratch2));
476 DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared,
477 scratch3));
478 DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared,
479 int_pending));
480 DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
481 DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared,
482 critical));
483 DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr));
484#endif
485
469#ifdef CONFIG_44x 486#ifdef CONFIG_44x
470 DEFINE(PGD_T_LOG2, PGD_T_LOG2); 487 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
471 DEFINE(PTE_T_LOG2, PTE_T_LOG2); 488 DEFINE(PTE_T_LOG2, PTE_T_LOG2);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 39b0c48872d2..9f8b01d6466f 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -299,6 +299,12 @@ slb_miss_user_pseries:
299 b . /* prevent spec. execution */ 299 b . /* prevent spec. execution */
300#endif /* __DISABLED__ */ 300#endif /* __DISABLED__ */
301 301
302/* KVM's trampoline code needs to be close to the interrupt handlers */
303
304#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
305#include "../kvm/book3s_rmhandlers.S"
306#endif
307
302 .align 7 308 .align 7
303 .globl __end_interrupts 309 .globl __end_interrupts
304__end_interrupts: 310__end_interrupts:
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index c571cd3c1453..f0dd577e4a5b 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -166,12 +166,6 @@ exception_marker:
166#include "exceptions-64s.S" 166#include "exceptions-64s.S"
167#endif 167#endif
168 168
169/* KVM trampoline code needs to be close to the interrupt handlers */
170
171#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
172#include "../kvm/book3s_rmhandlers.S"
173#endif
174
175_GLOBAL(generic_secondary_thread_init) 169_GLOBAL(generic_secondary_thread_init)
176 mr r24,r3 170 mr r24,r3
177 171
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 9b626cfffce1..f62efdfd1769 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -162,13 +162,10 @@ static int ibmebus_create_device(struct device_node *dn)
162 dev->dev.bus = &ibmebus_bus_type; 162 dev->dev.bus = &ibmebus_bus_type;
163 dev->dev.archdata.dma_ops = &ibmebus_dma_ops; 163 dev->dev.archdata.dma_ops = &ibmebus_dma_ops;
164 164
165 ret = of_device_register(dev); 165 ret = of_device_add(dev);
166 if (ret) { 166 if (ret)
167 of_device_free(dev); 167 platform_device_put(dev);
168 return ret; 168 return ret;
169 }
170
171 return 0;
172} 169}
173 170
174static int ibmebus_create_devices(const struct of_device_id *matches) 171static int ibmebus_create_devices(const struct of_device_id *matches)
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
new file mode 100644
index 000000000000..428d0e538aec
--- /dev/null
+++ b/arch/powerpc/kernel/kvm.c
@@ -0,0 +1,596 @@
1/*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/init.h>
23#include <linux/kvm_para.h>
24#include <linux/slab.h>
25#include <linux/of.h>
26
27#include <asm/reg.h>
28#include <asm/sections.h>
29#include <asm/cacheflush.h>
30#include <asm/disassemble.h>
31
32#define KVM_MAGIC_PAGE (-4096L)
33#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
34
35#define KVM_INST_LWZ 0x80000000
36#define KVM_INST_STW 0x90000000
37#define KVM_INST_LD 0xe8000000
38#define KVM_INST_STD 0xf8000000
39#define KVM_INST_NOP 0x60000000
40#define KVM_INST_B 0x48000000
41#define KVM_INST_B_MASK 0x03ffffff
42#define KVM_INST_B_MAX 0x01ffffff
43
44#define KVM_MASK_RT 0x03e00000
45#define KVM_RT_30 0x03c00000
46#define KVM_MASK_RB 0x0000f800
47#define KVM_INST_MFMSR 0x7c0000a6
48#define KVM_INST_MFSPR_SPRG0 0x7c1042a6
49#define KVM_INST_MFSPR_SPRG1 0x7c1142a6
50#define KVM_INST_MFSPR_SPRG2 0x7c1242a6
51#define KVM_INST_MFSPR_SPRG3 0x7c1342a6
52#define KVM_INST_MFSPR_SRR0 0x7c1a02a6
53#define KVM_INST_MFSPR_SRR1 0x7c1b02a6
54#define KVM_INST_MFSPR_DAR 0x7c1302a6
55#define KVM_INST_MFSPR_DSISR 0x7c1202a6
56
57#define KVM_INST_MTSPR_SPRG0 0x7c1043a6
58#define KVM_INST_MTSPR_SPRG1 0x7c1143a6
59#define KVM_INST_MTSPR_SPRG2 0x7c1243a6
60#define KVM_INST_MTSPR_SPRG3 0x7c1343a6
61#define KVM_INST_MTSPR_SRR0 0x7c1a03a6
62#define KVM_INST_MTSPR_SRR1 0x7c1b03a6
63#define KVM_INST_MTSPR_DAR 0x7c1303a6
64#define KVM_INST_MTSPR_DSISR 0x7c1203a6
65
66#define KVM_INST_TLBSYNC 0x7c00046c
67#define KVM_INST_MTMSRD_L0 0x7c000164
68#define KVM_INST_MTMSRD_L1 0x7c010164
69#define KVM_INST_MTMSR 0x7c000124
70
71#define KVM_INST_WRTEEI_0 0x7c000146
72#define KVM_INST_WRTEEI_1 0x7c008146
73
74#define KVM_INST_MTSRIN 0x7c0001e4
75
76static bool kvm_patching_worked = true;
77static char kvm_tmp[1024 * 1024];
78static int kvm_tmp_index;
79
80static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
81{
82 *inst = new_inst;
83 flush_icache_range((ulong)inst, (ulong)inst + 4);
84}
85
86static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
87{
88#ifdef CONFIG_64BIT
89 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
90#else
91 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
92#endif
93}
94
95static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
96{
97#ifdef CONFIG_64BIT
98 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
99#else
100 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
101#endif
102}
103
104static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
105{
106 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
107}
108
109static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
110{
111#ifdef CONFIG_64BIT
112 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
113#else
114 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
115#endif
116}
117
118static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
119{
120 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
121}
122
123static void kvm_patch_ins_nop(u32 *inst)
124{
125 kvm_patch_ins(inst, KVM_INST_NOP);
126}
127
128static void kvm_patch_ins_b(u32 *inst, int addr)
129{
130#ifdef CONFIG_RELOCATABLE
131 /* On relocatable kernels interrupts handlers and our code
132 can be in different regions, so we don't patch them */
133
134 extern u32 __end_interrupts;
135 if ((ulong)inst < (ulong)&__end_interrupts)
136 return;
137#endif
138
139 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
140}
141
142static u32 *kvm_alloc(int len)
143{
144 u32 *p;
145
146 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
147 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
148 kvm_tmp_index, len);
149 kvm_patching_worked = false;
150 return NULL;
151 }
152
153 p = (void*)&kvm_tmp[kvm_tmp_index];
154 kvm_tmp_index += len;
155
156 return p;
157}
158
159extern u32 kvm_emulate_mtmsrd_branch_offs;
160extern u32 kvm_emulate_mtmsrd_reg_offs;
161extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
162extern u32 kvm_emulate_mtmsrd_len;
163extern u32 kvm_emulate_mtmsrd[];
164
165static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
166{
167 u32 *p;
168 int distance_start;
169 int distance_end;
170 ulong next_inst;
171
172 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
173 if (!p)
174 return;
175
176 /* Find out where we are and put everything there */
177 distance_start = (ulong)p - (ulong)inst;
178 next_inst = ((ulong)inst + 4);
179 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
180
181 /* Make sure we only write valid b instructions */
182 if (distance_start > KVM_INST_B_MAX) {
183 kvm_patching_worked = false;
184 return;
185 }
186
187 /* Modify the chunk to fit the invocation */
188 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
189 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
190 switch (get_rt(rt)) {
191 case 30:
192 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
193 magic_var(scratch2), KVM_RT_30);
194 break;
195 case 31:
196 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
197 magic_var(scratch1), KVM_RT_30);
198 break;
199 default:
200 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
201 break;
202 }
203
204 p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
205 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
206
207 /* Patch the invocation */
208 kvm_patch_ins_b(inst, distance_start);
209}
210
211extern u32 kvm_emulate_mtmsr_branch_offs;
212extern u32 kvm_emulate_mtmsr_reg1_offs;
213extern u32 kvm_emulate_mtmsr_reg2_offs;
214extern u32 kvm_emulate_mtmsr_orig_ins_offs;
215extern u32 kvm_emulate_mtmsr_len;
216extern u32 kvm_emulate_mtmsr[];
217
218static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
219{
220 u32 *p;
221 int distance_start;
222 int distance_end;
223 ulong next_inst;
224
225 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
226 if (!p)
227 return;
228
229 /* Find out where we are and put everything there */
230 distance_start = (ulong)p - (ulong)inst;
231 next_inst = ((ulong)inst + 4);
232 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
233
234 /* Make sure we only write valid b instructions */
235 if (distance_start > KVM_INST_B_MAX) {
236 kvm_patching_worked = false;
237 return;
238 }
239
240 /* Modify the chunk to fit the invocation */
241 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
242 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
243
244 /* Make clobbered registers work too */
245 switch (get_rt(rt)) {
246 case 30:
247 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
248 magic_var(scratch2), KVM_RT_30);
249 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
250 magic_var(scratch2), KVM_RT_30);
251 break;
252 case 31:
253 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
254 magic_var(scratch1), KVM_RT_30);
255 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
256 magic_var(scratch1), KVM_RT_30);
257 break;
258 default:
259 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
260 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
261 break;
262 }
263
264 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
265 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
266
267 /* Patch the invocation */
268 kvm_patch_ins_b(inst, distance_start);
269}
270
271#ifdef CONFIG_BOOKE
272
273extern u32 kvm_emulate_wrteei_branch_offs;
274extern u32 kvm_emulate_wrteei_ee_offs;
275extern u32 kvm_emulate_wrteei_len;
276extern u32 kvm_emulate_wrteei[];
277
278static void kvm_patch_ins_wrteei(u32 *inst)
279{
280 u32 *p;
281 int distance_start;
282 int distance_end;
283 ulong next_inst;
284
285 p = kvm_alloc(kvm_emulate_wrteei_len * 4);
286 if (!p)
287 return;
288
289 /* Find out where we are and put everything there */
290 distance_start = (ulong)p - (ulong)inst;
291 next_inst = ((ulong)inst + 4);
292 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs];
293
294 /* Make sure we only write valid b instructions */
295 if (distance_start > KVM_INST_B_MAX) {
296 kvm_patching_worked = false;
297 return;
298 }
299
300 /* Modify the chunk to fit the invocation */
301 memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4);
302 p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK;
303 p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE);
304 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4);
305
306 /* Patch the invocation */
307 kvm_patch_ins_b(inst, distance_start);
308}
309
310#endif
311
312#ifdef CONFIG_PPC_BOOK3S_32
313
314extern u32 kvm_emulate_mtsrin_branch_offs;
315extern u32 kvm_emulate_mtsrin_reg1_offs;
316extern u32 kvm_emulate_mtsrin_reg2_offs;
317extern u32 kvm_emulate_mtsrin_orig_ins_offs;
318extern u32 kvm_emulate_mtsrin_len;
319extern u32 kvm_emulate_mtsrin[];
320
321static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
322{
323 u32 *p;
324 int distance_start;
325 int distance_end;
326 ulong next_inst;
327
328 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
329 if (!p)
330 return;
331
332 /* Find out where we are and put everything there */
333 distance_start = (ulong)p - (ulong)inst;
334 next_inst = ((ulong)inst + 4);
335 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
336
337 /* Make sure we only write valid b instructions */
338 if (distance_start > KVM_INST_B_MAX) {
339 kvm_patching_worked = false;
340 return;
341 }
342
343 /* Modify the chunk to fit the invocation */
344 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
345 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
346 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
347 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
348 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
349 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
350
351 /* Patch the invocation */
352 kvm_patch_ins_b(inst, distance_start);
353}
354
355#endif
356
357static void kvm_map_magic_page(void *data)
358{
359 u32 *features = data;
360
361 ulong in[8];
362 ulong out[8];
363
364 in[0] = KVM_MAGIC_PAGE;
365 in[1] = KVM_MAGIC_PAGE;
366
367 kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
368
369 *features = out[0];
370}
371
372static void kvm_check_ins(u32 *inst, u32 features)
373{
374 u32 _inst = *inst;
375 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
376 u32 inst_rt = _inst & KVM_MASK_RT;
377
378 switch (inst_no_rt) {
379 /* Loads */
380 case KVM_INST_MFMSR:
381 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
382 break;
383 case KVM_INST_MFSPR_SPRG0:
384 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
385 break;
386 case KVM_INST_MFSPR_SPRG1:
387 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
388 break;
389 case KVM_INST_MFSPR_SPRG2:
390 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
391 break;
392 case KVM_INST_MFSPR_SPRG3:
393 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
394 break;
395 case KVM_INST_MFSPR_SRR0:
396 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
397 break;
398 case KVM_INST_MFSPR_SRR1:
399 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
400 break;
401 case KVM_INST_MFSPR_DAR:
402 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
403 break;
404 case KVM_INST_MFSPR_DSISR:
405 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
406 break;
407
408 /* Stores */
409 case KVM_INST_MTSPR_SPRG0:
410 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
411 break;
412 case KVM_INST_MTSPR_SPRG1:
413 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
414 break;
415 case KVM_INST_MTSPR_SPRG2:
416 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
417 break;
418 case KVM_INST_MTSPR_SPRG3:
419 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
420 break;
421 case KVM_INST_MTSPR_SRR0:
422 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
423 break;
424 case KVM_INST_MTSPR_SRR1:
425 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
426 break;
427 case KVM_INST_MTSPR_DAR:
428 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
429 break;
430 case KVM_INST_MTSPR_DSISR:
431 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
432 break;
433
434 /* Nops */
435 case KVM_INST_TLBSYNC:
436 kvm_patch_ins_nop(inst);
437 break;
438
439 /* Rewrites */
440 case KVM_INST_MTMSRD_L1:
441 kvm_patch_ins_mtmsrd(inst, inst_rt);
442 break;
443 case KVM_INST_MTMSR:
444 case KVM_INST_MTMSRD_L0:
445 kvm_patch_ins_mtmsr(inst, inst_rt);
446 break;
447 }
448
449 switch (inst_no_rt & ~KVM_MASK_RB) {
450#ifdef CONFIG_PPC_BOOK3S_32
451 case KVM_INST_MTSRIN:
452 if (features & KVM_MAGIC_FEAT_SR) {
453 u32 inst_rb = _inst & KVM_MASK_RB;
454 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
455 }
456 break;
457 break;
458#endif
459 }
460
461 switch (_inst) {
462#ifdef CONFIG_BOOKE
463 case KVM_INST_WRTEEI_0:
464 case KVM_INST_WRTEEI_1:
465 kvm_patch_ins_wrteei(inst);
466 break;
467#endif
468 }
469}
470
471static void kvm_use_magic_page(void)
472{
473 u32 *p;
474 u32 *start, *end;
475 u32 tmp;
476 u32 features;
477
478 /* Tell the host to map the magic page to -4096 on all CPUs */
479 on_each_cpu(kvm_map_magic_page, &features, 1);
480
481 /* Quick self-test to see if the mapping works */
482 if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
483 kvm_patching_worked = false;
484 return;
485 }
486
487 /* Now loop through all code and find instructions */
488 start = (void*)_stext;
489 end = (void*)_etext;
490
491 for (p = start; p < end; p++)
492 kvm_check_ins(p, features);
493
494 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
495 kvm_patching_worked ? "worked" : "failed");
496}
497
498unsigned long kvm_hypercall(unsigned long *in,
499 unsigned long *out,
500 unsigned long nr)
501{
502 unsigned long register r0 asm("r0");
503 unsigned long register r3 asm("r3") = in[0];
504 unsigned long register r4 asm("r4") = in[1];
505 unsigned long register r5 asm("r5") = in[2];
506 unsigned long register r6 asm("r6") = in[3];
507 unsigned long register r7 asm("r7") = in[4];
508 unsigned long register r8 asm("r8") = in[5];
509 unsigned long register r9 asm("r9") = in[6];
510 unsigned long register r10 asm("r10") = in[7];
511 unsigned long register r11 asm("r11") = nr;
512 unsigned long register r12 asm("r12");
513
514 asm volatile("bl kvm_hypercall_start"
515 : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
516 "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
517 "=r"(r12)
518 : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
519 "r"(r9), "r"(r10), "r"(r11)
520 : "memory", "cc", "xer", "ctr", "lr");
521
522 out[0] = r4;
523 out[1] = r5;
524 out[2] = r6;
525 out[3] = r7;
526 out[4] = r8;
527 out[5] = r9;
528 out[6] = r10;
529 out[7] = r11;
530
531 return r3;
532}
533EXPORT_SYMBOL_GPL(kvm_hypercall);
534
535static int kvm_para_setup(void)
536{
537 extern u32 kvm_hypercall_start;
538 struct device_node *hyper_node;
539 u32 *insts;
540 int len, i;
541
542 hyper_node = of_find_node_by_path("/hypervisor");
543 if (!hyper_node)
544 return -1;
545
546 insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
547 if (len % 4)
548 return -1;
549 if (len > (4 * 4))
550 return -1;
551
552 for (i = 0; i < (len / 4); i++)
553 kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
554
555 return 0;
556}
557
558static __init void kvm_free_tmp(void)
559{
560 unsigned long start, end;
561
562 start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
563 end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
564
565 /* Free the tmp space we don't need */
566 for (; start < end; start += PAGE_SIZE) {
567 ClearPageReserved(virt_to_page(start));
568 init_page_count(virt_to_page(start));
569 free_page(start);
570 totalram_pages++;
571 }
572}
573
574static int __init kvm_guest_init(void)
575{
576 if (!kvm_para_available())
577 goto free_tmp;
578
579 if (kvm_para_setup())
580 goto free_tmp;
581
582 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
583 kvm_use_magic_page();
584
585#ifdef CONFIG_PPC_BOOK3S_64
586 /* Enable napping */
587 powersave_nap = 1;
588#endif
589
590free_tmp:
591 kvm_free_tmp();
592
593 return 0;
594}
595
596postcore_initcall(kvm_guest_init);
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S
new file mode 100644
index 000000000000..f2b1b2523e61
--- /dev/null
+++ b/arch/powerpc/kernel/kvm_emul.S
@@ -0,0 +1,302 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
25
26/* Hypercall entry point. Will be patched with device tree instructions. */
27
28.global kvm_hypercall_start
29kvm_hypercall_start:
30 li r3, -1
31 nop
32 nop
33 nop
34 blr
35
36#define KVM_MAGIC_PAGE (-4096)
37
38#ifdef CONFIG_64BIT
39#define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
40#define STL64(reg, offs, reg2) std reg, (offs)(reg2)
41#else
42#define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
43#define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
44#endif
45
46#define SCRATCH_SAVE \
47 /* Enable critical section. We are critical if \
48 shared->critical == r1 */ \
49 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
50 \
51 /* Save state */ \
52 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
53 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
54 mfcr r31; \
55 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
56
57#define SCRATCH_RESTORE \
58 /* Restore state */ \
59 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
60 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
61 mtcr r30; \
62 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
63 \
64 /* Disable critical section. We are critical if \
65 shared->critical == r1 and r2 is always != r1 */ \
66 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
67
68.global kvm_emulate_mtmsrd
69kvm_emulate_mtmsrd:
70
71 SCRATCH_SAVE
72
73 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
74 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
75 lis r30, (~(MSR_EE | MSR_RI))@h
76 ori r30, r30, (~(MSR_EE | MSR_RI))@l
77 and r31, r31, r30
78
79 /* OR the register's (MSR_EE|MSR_RI) on MSR */
80kvm_emulate_mtmsrd_reg:
81 ori r30, r0, 0
82 andi. r30, r30, (MSR_EE|MSR_RI)
83 or r31, r31, r30
84
85 /* Put MSR back into magic page */
86 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
87
88 /* Check if we have to fetch an interrupt */
89 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
90 cmpwi r31, 0
91 beq+ no_check
92
93 /* Check if we may trigger an interrupt */
94 andi. r30, r30, MSR_EE
95 beq no_check
96
97 SCRATCH_RESTORE
98
99 /* Nag hypervisor */
100kvm_emulate_mtmsrd_orig_ins:
101 tlbsync
102
103 b kvm_emulate_mtmsrd_branch
104
105no_check:
106
107 SCRATCH_RESTORE
108
109 /* Go back to caller */
110kvm_emulate_mtmsrd_branch:
111 b .
112kvm_emulate_mtmsrd_end:
113
114.global kvm_emulate_mtmsrd_branch_offs
115kvm_emulate_mtmsrd_branch_offs:
116 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
117
118.global kvm_emulate_mtmsrd_reg_offs
119kvm_emulate_mtmsrd_reg_offs:
120 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
121
122.global kvm_emulate_mtmsrd_orig_ins_offs
123kvm_emulate_mtmsrd_orig_ins_offs:
124 .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
125
126.global kvm_emulate_mtmsrd_len
127kvm_emulate_mtmsrd_len:
128 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
129
130
131#define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
132#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
133
134.global kvm_emulate_mtmsr
135kvm_emulate_mtmsr:
136
137 SCRATCH_SAVE
138
139 /* Fetch old MSR in r31 */
140 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
141
142 /* Find the changed bits between old and new MSR */
143kvm_emulate_mtmsr_reg1:
144 ori r30, r0, 0
145 xor r31, r30, r31
146
147 /* Check if we need to really do mtmsr */
148 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
149 and. r31, r31, r30
150
151 /* No critical bits changed? Maybe we can stay in the guest. */
152 beq maybe_stay_in_guest
153
154do_mtmsr:
155
156 SCRATCH_RESTORE
157
158 /* Just fire off the mtmsr if it's critical */
159kvm_emulate_mtmsr_orig_ins:
160 mtmsr r0
161
162 b kvm_emulate_mtmsr_branch
163
164maybe_stay_in_guest:
165
166 /* Get the target register in r30 */
167kvm_emulate_mtmsr_reg2:
168 ori r30, r0, 0
169
170 /* Check if we have to fetch an interrupt */
171 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
172 cmpwi r31, 0
173 beq+ no_mtmsr
174
175 /* Check if we may trigger an interrupt */
176 andi. r31, r30, MSR_EE
177 beq no_mtmsr
178
179 b do_mtmsr
180
181no_mtmsr:
182
183 /* Put MSR into magic page because we don't call mtmsr */
184 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
185
186 SCRATCH_RESTORE
187
188 /* Go back to caller */
189kvm_emulate_mtmsr_branch:
190 b .
191kvm_emulate_mtmsr_end:
192
193.global kvm_emulate_mtmsr_branch_offs
194kvm_emulate_mtmsr_branch_offs:
195 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
196
197.global kvm_emulate_mtmsr_reg1_offs
198kvm_emulate_mtmsr_reg1_offs:
199 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
200
201.global kvm_emulate_mtmsr_reg2_offs
202kvm_emulate_mtmsr_reg2_offs:
203 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
204
205.global kvm_emulate_mtmsr_orig_ins_offs
206kvm_emulate_mtmsr_orig_ins_offs:
207 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
208
209.global kvm_emulate_mtmsr_len
210kvm_emulate_mtmsr_len:
211 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
212
213
214
215.global kvm_emulate_wrteei
216kvm_emulate_wrteei:
217
218 SCRATCH_SAVE
219
220 /* Fetch old MSR in r31 */
221 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
222
223 /* Remove MSR_EE from old MSR */
224 li r30, 0
225 ori r30, r30, MSR_EE
226 andc r31, r31, r30
227
228 /* OR new MSR_EE onto the old MSR */
229kvm_emulate_wrteei_ee:
230 ori r31, r31, 0
231
232 /* Write new MSR value back */
233 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
234
235 SCRATCH_RESTORE
236
237 /* Go back to caller */
238kvm_emulate_wrteei_branch:
239 b .
240kvm_emulate_wrteei_end:
241
242.global kvm_emulate_wrteei_branch_offs
243kvm_emulate_wrteei_branch_offs:
244 .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4
245
246.global kvm_emulate_wrteei_ee_offs
247kvm_emulate_wrteei_ee_offs:
248 .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4
249
250.global kvm_emulate_wrteei_len
251kvm_emulate_wrteei_len:
252 .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4
253
254
255.global kvm_emulate_mtsrin
256kvm_emulate_mtsrin:
257
258 SCRATCH_SAVE
259
260 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
261 andi. r31, r31, MSR_DR | MSR_IR
262 beq kvm_emulate_mtsrin_reg1
263
264 SCRATCH_RESTORE
265
266kvm_emulate_mtsrin_orig_ins:
267 nop
268 b kvm_emulate_mtsrin_branch
269
270kvm_emulate_mtsrin_reg1:
271 /* rX >> 26 */
272 rlwinm r30,r0,6,26,29
273
274kvm_emulate_mtsrin_reg2:
275 stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
276
277 SCRATCH_RESTORE
278
279 /* Go back to caller */
280kvm_emulate_mtsrin_branch:
281 b .
282kvm_emulate_mtsrin_end:
283
284.global kvm_emulate_mtsrin_branch_offs
285kvm_emulate_mtsrin_branch_offs:
286 .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
287
288.global kvm_emulate_mtsrin_reg1_offs
289kvm_emulate_mtsrin_reg1_offs:
290 .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
291
292.global kvm_emulate_mtsrin_reg2_offs
293kvm_emulate_mtsrin_reg2_offs:
294 .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
295
296.global kvm_emulate_mtsrin_orig_ins_offs
297kvm_emulate_mtsrin_orig_ins_offs:
298 .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
299
300.global kvm_emulate_mtsrin_len
301kvm_emulate_mtsrin_len:
302 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c1fd0f9658fd..c834757bebc0 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -52,14 +52,14 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
52 phys_addr_t taddr, unsigned long irq, 52 phys_addr_t taddr, unsigned long irq,
53 upf_t flags, int irq_check_parent) 53 upf_t flags, int irq_check_parent)
54{ 54{
55 const u32 *clk, *spd; 55 const __be32 *clk, *spd;
56 u32 clock = BASE_BAUD * 16; 56 u32 clock = BASE_BAUD * 16;
57 int index; 57 int index;
58 58
59 /* get clock freq. if present */ 59 /* get clock freq. if present */
60 clk = of_get_property(np, "clock-frequency", NULL); 60 clk = of_get_property(np, "clock-frequency", NULL);
61 if (clk && *clk) 61 if (clk && *clk)
62 clock = *clk; 62 clock = be32_to_cpup(clk);
63 63
64 /* get default speed if present */ 64 /* get default speed if present */
65 spd = of_get_property(np, "current-speed", NULL); 65 spd = of_get_property(np, "current-speed", NULL);
@@ -109,7 +109,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
109 legacy_serial_infos[index].taddr = taddr; 109 legacy_serial_infos[index].taddr = taddr;
110 legacy_serial_infos[index].np = of_node_get(np); 110 legacy_serial_infos[index].np = of_node_get(np);
111 legacy_serial_infos[index].clock = clock; 111 legacy_serial_infos[index].clock = clock;
112 legacy_serial_infos[index].speed = spd ? *spd : 0; 112 legacy_serial_infos[index].speed = spd ? be32_to_cpup(spd) : 0;
113 legacy_serial_infos[index].irq_check_parent = irq_check_parent; 113 legacy_serial_infos[index].irq_check_parent = irq_check_parent;
114 114
115 printk(KERN_DEBUG "Found legacy serial port %d for %s\n", 115 printk(KERN_DEBUG "Found legacy serial port %d for %s\n",
@@ -168,7 +168,7 @@ static int __init add_legacy_soc_port(struct device_node *np,
168static int __init add_legacy_isa_port(struct device_node *np, 168static int __init add_legacy_isa_port(struct device_node *np,
169 struct device_node *isa_brg) 169 struct device_node *isa_brg)
170{ 170{
171 const u32 *reg; 171 const __be32 *reg;
172 const char *typep; 172 const char *typep;
173 int index = -1; 173 int index = -1;
174 u64 taddr; 174 u64 taddr;
@@ -181,7 +181,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
181 return -1; 181 return -1;
182 182
183 /* Verify it's an IO port, we don't support anything else */ 183 /* Verify it's an IO port, we don't support anything else */
184 if (!(reg[0] & 0x00000001)) 184 if (!(be32_to_cpu(reg[0]) & 0x00000001))
185 return -1; 185 return -1;
186 186
187 /* Now look for an "ibm,aix-loc" property that gives us ordering 187 /* Now look for an "ibm,aix-loc" property that gives us ordering
@@ -202,7 +202,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
202 taddr = 0; 202 taddr = 0;
203 203
204 /* Add port, irq will be dealt with later */ 204 /* Add port, irq will be dealt with later */
205 return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, 205 return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), taddr,
206 NO_IRQ, UPF_BOOT_AUTOCONF, 0); 206 NO_IRQ, UPF_BOOT_AUTOCONF, 0);
207 207
208} 208}
@@ -251,9 +251,9 @@ static int __init add_legacy_pci_port(struct device_node *np,
251 * we get to their "reg" property 251 * we get to their "reg" property
252 */ 252 */
253 if (np != pci_dev) { 253 if (np != pci_dev) {
254 const u32 *reg = of_get_property(np, "reg", NULL); 254 const __be32 *reg = of_get_property(np, "reg", NULL);
255 if (reg && (*reg < 4)) 255 if (reg && (be32_to_cpup(reg) < 4))
256 index = lindex = *reg; 256 index = lindex = be32_to_cpup(reg);
257 } 257 }
258 258
259 /* Local index means it's the Nth port in the PCI chip. Unfortunately 259 /* Local index means it's the Nth port in the PCI chip. Unfortunately
@@ -507,7 +507,7 @@ static int __init check_legacy_serial_console(void)
507 struct device_node *prom_stdout = NULL; 507 struct device_node *prom_stdout = NULL;
508 int i, speed = 0, offset = 0; 508 int i, speed = 0, offset = 0;
509 const char *name; 509 const char *name;
510 const u32 *spd; 510 const __be32 *spd;
511 511
512 DBG(" -> check_legacy_serial_console()\n"); 512 DBG(" -> check_legacy_serial_console()\n");
513 513
@@ -547,7 +547,7 @@ static int __init check_legacy_serial_console(void)
547 } 547 }
548 spd = of_get_property(prom_stdout, "current-speed", NULL); 548 spd = of_get_property(prom_stdout, "current-speed", NULL);
549 if (spd) 549 if (spd)
550 speed = *spd; 550 speed = be32_to_cpup(spd);
551 551
552 if (strcmp(name, "serial") != 0) 552 if (strcmp(name, "serial") != 0)
553 goto not_found; 553 goto not_found;
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 8d9e3b9cda64..16468362ad57 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -780,6 +780,7 @@ static const struct file_operations lparcfg_fops = {
780 .write = lparcfg_write, 780 .write = lparcfg_write,
781 .open = lparcfg_open, 781 .open = lparcfg_open,
782 .release = single_release, 782 .release = single_release,
783 .llseek = seq_lseek,
783}; 784};
784 785
785static int __init lparcfg_init(void) 786static int __init lparcfg_init(void)
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index c3c6a8857544..9e3132db718b 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -364,10 +364,15 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
364 return 0; 364 return 0;
365} 365}
366 366
367void __init early_init_dt_scan_chosen_arch(unsigned long node) 367int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
368 int depth, void *data)
368{ 369{
369 unsigned long *lprop; 370 unsigned long *lprop;
370 371
372 /* Use common scan routine to determine if this is the chosen node */
373 if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
374 return 0;
375
371#ifdef CONFIG_PPC64 376#ifdef CONFIG_PPC64
372 /* check if iommu is forced on or off */ 377 /* check if iommu is forced on or off */
373 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) 378 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
@@ -399,6 +404,9 @@ void __init early_init_dt_scan_chosen_arch(unsigned long node)
399 if (lprop) 404 if (lprop)
400 crashk_res.end = crashk_res.start + *lprop - 1; 405 crashk_res.end = crashk_res.start + *lprop - 1;
401#endif 406#endif
407
408 /* break now */
409 return 1;
402} 410}
403 411
404#ifdef CONFIG_PPC_PSERIES 412#ifdef CONFIG_PPC_PSERIES
@@ -683,7 +691,7 @@ void __init early_init_devtree(void *params)
683 * device-tree, including the platform type, initrd location and 691 * device-tree, including the platform type, initrd location and
684 * size, TCE reserve, and more ... 692 * size, TCE reserve, and more ...
685 */ 693 */
686 of_scan_flat_dt(early_init_dt_scan_chosen, NULL); 694 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, NULL);
687 695
688 /* Scan memory nodes and rebuild MEMBLOCKs */ 696 /* Scan memory nodes and rebuild MEMBLOCKs */
689 memblock_init(); 697 memblock_init();
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 286d9783d93f..a9b32967cff6 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1406,37 +1406,42 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
1406 * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, 1406 * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
1407 * we mark them as obsolete now, they will be removed in a future version 1407 * we mark them as obsolete now, they will be removed in a future version
1408 */ 1408 */
1409static long arch_ptrace_old(struct task_struct *child, long request, long addr, 1409static long arch_ptrace_old(struct task_struct *child, long request,
1410 long data) 1410 unsigned long addr, unsigned long data)
1411{ 1411{
1412 void __user *datavp = (void __user *) data;
1413
1412 switch (request) { 1414 switch (request) {
1413 case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ 1415 case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
1414 return copy_regset_to_user(child, &user_ppc_native_view, 1416 return copy_regset_to_user(child, &user_ppc_native_view,
1415 REGSET_GPR, 0, 32 * sizeof(long), 1417 REGSET_GPR, 0, 32 * sizeof(long),
1416 (void __user *) data); 1418 datavp);
1417 1419
1418 case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ 1420 case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
1419 return copy_regset_from_user(child, &user_ppc_native_view, 1421 return copy_regset_from_user(child, &user_ppc_native_view,
1420 REGSET_GPR, 0, 32 * sizeof(long), 1422 REGSET_GPR, 0, 32 * sizeof(long),
1421 (const void __user *) data); 1423 datavp);
1422 1424
1423 case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ 1425 case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
1424 return copy_regset_to_user(child, &user_ppc_native_view, 1426 return copy_regset_to_user(child, &user_ppc_native_view,
1425 REGSET_FPR, 0, 32 * sizeof(double), 1427 REGSET_FPR, 0, 32 * sizeof(double),
1426 (void __user *) data); 1428 datavp);
1427 1429
1428 case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */ 1430 case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
1429 return copy_regset_from_user(child, &user_ppc_native_view, 1431 return copy_regset_from_user(child, &user_ppc_native_view,
1430 REGSET_FPR, 0, 32 * sizeof(double), 1432 REGSET_FPR, 0, 32 * sizeof(double),
1431 (const void __user *) data); 1433 datavp);
1432 } 1434 }
1433 1435
1434 return -EPERM; 1436 return -EPERM;
1435} 1437}
1436 1438
1437long arch_ptrace(struct task_struct *child, long request, long addr, long data) 1439long arch_ptrace(struct task_struct *child, long request,
1440 unsigned long addr, unsigned long data)
1438{ 1441{
1439 int ret = -EPERM; 1442 int ret = -EPERM;
1443 void __user *datavp = (void __user *) data;
1444 unsigned long __user *datalp = datavp;
1440 1445
1441 switch (request) { 1446 switch (request) {
1442 /* read the word at location addr in the USER area. */ 1447 /* read the word at location addr in the USER area. */
@@ -1446,11 +1451,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1446 ret = -EIO; 1451 ret = -EIO;
1447 /* convert to index and check */ 1452 /* convert to index and check */
1448#ifdef CONFIG_PPC32 1453#ifdef CONFIG_PPC32
1449 index = (unsigned long) addr >> 2; 1454 index = addr >> 2;
1450 if ((addr & 3) || (index > PT_FPSCR) 1455 if ((addr & 3) || (index > PT_FPSCR)
1451 || (child->thread.regs == NULL)) 1456 || (child->thread.regs == NULL))
1452#else 1457#else
1453 index = (unsigned long) addr >> 3; 1458 index = addr >> 3;
1454 if ((addr & 7) || (index > PT_FPSCR)) 1459 if ((addr & 7) || (index > PT_FPSCR))
1455#endif 1460#endif
1456 break; 1461 break;
@@ -1463,7 +1468,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1463 tmp = ((unsigned long *)child->thread.fpr) 1468 tmp = ((unsigned long *)child->thread.fpr)
1464 [TS_FPRWIDTH * (index - PT_FPR0)]; 1469 [TS_FPRWIDTH * (index - PT_FPR0)];
1465 } 1470 }
1466 ret = put_user(tmp,(unsigned long __user *) data); 1471 ret = put_user(tmp, datalp);
1467 break; 1472 break;
1468 } 1473 }
1469 1474
@@ -1474,11 +1479,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1474 ret = -EIO; 1479 ret = -EIO;
1475 /* convert to index and check */ 1480 /* convert to index and check */
1476#ifdef CONFIG_PPC32 1481#ifdef CONFIG_PPC32
1477 index = (unsigned long) addr >> 2; 1482 index = addr >> 2;
1478 if ((addr & 3) || (index > PT_FPSCR) 1483 if ((addr & 3) || (index > PT_FPSCR)
1479 || (child->thread.regs == NULL)) 1484 || (child->thread.regs == NULL))
1480#else 1485#else
1481 index = (unsigned long) addr >> 3; 1486 index = addr >> 3;
1482 if ((addr & 7) || (index > PT_FPSCR)) 1487 if ((addr & 7) || (index > PT_FPSCR))
1483#endif 1488#endif
1484 break; 1489 break;
@@ -1525,11 +1530,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1525 dbginfo.features = 0; 1530 dbginfo.features = 0;
1526#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1531#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1527 1532
1528 if (!access_ok(VERIFY_WRITE, data, 1533 if (!access_ok(VERIFY_WRITE, datavp,
1529 sizeof(struct ppc_debug_info))) 1534 sizeof(struct ppc_debug_info)))
1530 return -EFAULT; 1535 return -EFAULT;
1531 ret = __copy_to_user((struct ppc_debug_info __user *)data, 1536 ret = __copy_to_user(datavp, &dbginfo,
1532 &dbginfo, sizeof(struct ppc_debug_info)) ? 1537 sizeof(struct ppc_debug_info)) ?
1533 -EFAULT : 0; 1538 -EFAULT : 0;
1534 break; 1539 break;
1535 } 1540 }
@@ -1537,11 +1542,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1537 case PPC_PTRACE_SETHWDEBUG: { 1542 case PPC_PTRACE_SETHWDEBUG: {
1538 struct ppc_hw_breakpoint bp_info; 1543 struct ppc_hw_breakpoint bp_info;
1539 1544
1540 if (!access_ok(VERIFY_READ, data, 1545 if (!access_ok(VERIFY_READ, datavp,
1541 sizeof(struct ppc_hw_breakpoint))) 1546 sizeof(struct ppc_hw_breakpoint)))
1542 return -EFAULT; 1547 return -EFAULT;
1543 ret = __copy_from_user(&bp_info, 1548 ret = __copy_from_user(&bp_info, datavp,
1544 (struct ppc_hw_breakpoint __user *)data,
1545 sizeof(struct ppc_hw_breakpoint)) ? 1549 sizeof(struct ppc_hw_breakpoint)) ?
1546 -EFAULT : 0; 1550 -EFAULT : 0;
1547 if (!ret) 1551 if (!ret)
@@ -1560,11 +1564,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1560 if (addr > 0) 1564 if (addr > 0)
1561 break; 1565 break;
1562#ifdef CONFIG_PPC_ADV_DEBUG_REGS 1566#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1563 ret = put_user(child->thread.dac1, 1567 ret = put_user(child->thread.dac1, datalp);
1564 (unsigned long __user *)data);
1565#else 1568#else
1566 ret = put_user(child->thread.dabr, 1569 ret = put_user(child->thread.dabr, datalp);
1567 (unsigned long __user *)data);
1568#endif 1570#endif
1569 break; 1571 break;
1570 } 1572 }
@@ -1580,7 +1582,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1580 return copy_regset_to_user(child, &user_ppc_native_view, 1582 return copy_regset_to_user(child, &user_ppc_native_view,
1581 REGSET_GPR, 1583 REGSET_GPR,
1582 0, sizeof(struct pt_regs), 1584 0, sizeof(struct pt_regs),
1583 (void __user *) data); 1585 datavp);
1584 1586
1585#ifdef CONFIG_PPC64 1587#ifdef CONFIG_PPC64
1586 case PTRACE_SETREGS64: 1588 case PTRACE_SETREGS64:
@@ -1589,19 +1591,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1589 return copy_regset_from_user(child, &user_ppc_native_view, 1591 return copy_regset_from_user(child, &user_ppc_native_view,
1590 REGSET_GPR, 1592 REGSET_GPR,
1591 0, sizeof(struct pt_regs), 1593 0, sizeof(struct pt_regs),
1592 (const void __user *) data); 1594 datavp);
1593 1595
1594 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */ 1596 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
1595 return copy_regset_to_user(child, &user_ppc_native_view, 1597 return copy_regset_to_user(child, &user_ppc_native_view,
1596 REGSET_FPR, 1598 REGSET_FPR,
1597 0, sizeof(elf_fpregset_t), 1599 0, sizeof(elf_fpregset_t),
1598 (void __user *) data); 1600 datavp);
1599 1601
1600 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */ 1602 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
1601 return copy_regset_from_user(child, &user_ppc_native_view, 1603 return copy_regset_from_user(child, &user_ppc_native_view,
1602 REGSET_FPR, 1604 REGSET_FPR,
1603 0, sizeof(elf_fpregset_t), 1605 0, sizeof(elf_fpregset_t),
1604 (const void __user *) data); 1606 datavp);
1605 1607
1606#ifdef CONFIG_ALTIVEC 1608#ifdef CONFIG_ALTIVEC
1607 case PTRACE_GETVRREGS: 1609 case PTRACE_GETVRREGS:
@@ -1609,40 +1611,40 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1609 REGSET_VMX, 1611 REGSET_VMX,
1610 0, (33 * sizeof(vector128) + 1612 0, (33 * sizeof(vector128) +
1611 sizeof(u32)), 1613 sizeof(u32)),
1612 (void __user *) data); 1614 datavp);
1613 1615
1614 case PTRACE_SETVRREGS: 1616 case PTRACE_SETVRREGS:
1615 return copy_regset_from_user(child, &user_ppc_native_view, 1617 return copy_regset_from_user(child, &user_ppc_native_view,
1616 REGSET_VMX, 1618 REGSET_VMX,
1617 0, (33 * sizeof(vector128) + 1619 0, (33 * sizeof(vector128) +
1618 sizeof(u32)), 1620 sizeof(u32)),
1619 (const void __user *) data); 1621 datavp);
1620#endif 1622#endif
1621#ifdef CONFIG_VSX 1623#ifdef CONFIG_VSX
1622 case PTRACE_GETVSRREGS: 1624 case PTRACE_GETVSRREGS:
1623 return copy_regset_to_user(child, &user_ppc_native_view, 1625 return copy_regset_to_user(child, &user_ppc_native_view,
1624 REGSET_VSX, 1626 REGSET_VSX,
1625 0, 32 * sizeof(double), 1627 0, 32 * sizeof(double),
1626 (void __user *) data); 1628 datavp);
1627 1629
1628 case PTRACE_SETVSRREGS: 1630 case PTRACE_SETVSRREGS:
1629 return copy_regset_from_user(child, &user_ppc_native_view, 1631 return copy_regset_from_user(child, &user_ppc_native_view,
1630 REGSET_VSX, 1632 REGSET_VSX,
1631 0, 32 * sizeof(double), 1633 0, 32 * sizeof(double),
1632 (const void __user *) data); 1634 datavp);
1633#endif 1635#endif
1634#ifdef CONFIG_SPE 1636#ifdef CONFIG_SPE
1635 case PTRACE_GETEVRREGS: 1637 case PTRACE_GETEVRREGS:
1636 /* Get the child spe register state. */ 1638 /* Get the child spe register state. */
1637 return copy_regset_to_user(child, &user_ppc_native_view, 1639 return copy_regset_to_user(child, &user_ppc_native_view,
1638 REGSET_SPE, 0, 35 * sizeof(u32), 1640 REGSET_SPE, 0, 35 * sizeof(u32),
1639 (void __user *) data); 1641 datavp);
1640 1642
1641 case PTRACE_SETEVRREGS: 1643 case PTRACE_SETEVRREGS:
1642 /* Set the child spe register state. */ 1644 /* Set the child spe register state. */
1643 return copy_regset_from_user(child, &user_ppc_native_view, 1645 return copy_regset_from_user(child, &user_ppc_native_view,
1644 REGSET_SPE, 0, 35 * sizeof(u32), 1646 REGSET_SPE, 0, 35 * sizeof(u32),
1645 (const void __user *) data); 1647 datavp);
1646#endif 1648#endif
1647 1649
1648 /* Old reverse args ptrace callss */ 1650 /* Old reverse args ptrace callss */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 67a84d8f118d..2b442e6c21e6 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -716,6 +716,7 @@ static const struct file_operations rtas_flash_operations = {
716 .write = rtas_flash_write, 716 .write = rtas_flash_write,
717 .open = rtas_excl_open, 717 .open = rtas_excl_open,
718 .release = rtas_flash_release, 718 .release = rtas_flash_release,
719 .llseek = default_llseek,
719}; 720};
720 721
721static const struct file_operations manage_flash_operations = { 722static const struct file_operations manage_flash_operations = {
@@ -724,6 +725,7 @@ static const struct file_operations manage_flash_operations = {
724 .write = manage_flash_write, 725 .write = manage_flash_write,
725 .open = rtas_excl_open, 726 .open = rtas_excl_open,
726 .release = rtas_excl_release, 727 .release = rtas_excl_release,
728 .llseek = default_llseek,
727}; 729};
728 730
729static const struct file_operations validate_flash_operations = { 731static const struct file_operations validate_flash_operations = {
@@ -732,6 +734,7 @@ static const struct file_operations validate_flash_operations = {
732 .write = validate_flash_write, 734 .write = validate_flash_write,
733 .open = rtas_excl_open, 735 .open = rtas_excl_open,
734 .release = validate_flash_release, 736 .release = validate_flash_release,
737 .llseek = default_llseek,
735}; 738};
736 739
737static int __init rtas_flash_init(void) 740static int __init rtas_flash_init(void)
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 638883e23e3a..0438f819fe6b 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -354,6 +354,7 @@ static const struct file_operations proc_rtas_log_operations = {
354 .poll = rtas_log_poll, 354 .poll = rtas_log_poll,
355 .open = rtas_log_open, 355 .open = rtas_log_open,
356 .release = rtas_log_release, 356 .release = rtas_log_release,
357 .llseek = noop_llseek,
357}; 358};
358 359
359static int enable_surveillance(int timeout) 360static int enable_surveillance(int timeout)
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index d692989a4318..441d2a722f06 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -238,9 +238,7 @@ static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
238 * memory in this pool does not change. 238 * memory in this pool does not change.
239 */ 239 */
240 if (spare_needed && reserve_freed) { 240 if (spare_needed && reserve_freed) {
241 tmp = min(spare_needed, min(reserve_freed, 241 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
242 (viodev->cmo.entitled -
243 VIO_CMO_MIN_ENT)));
244 242
245 vio_cmo.spare += tmp; 243 vio_cmo.spare += tmp;
246 viodev->cmo.entitled -= tmp; 244 viodev->cmo.entitled -= tmp;