aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/archrandom.h32
-rw-r--r--arch/powerpc/include/asm/checksum.h5
-rw-r--r--arch/powerpc/include/asm/disassemble.h4
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h1
-rw-r--r--arch/powerpc/include/asm/exception-64s.h21
-rw-r--r--arch/powerpc/include/asm/fsl_ifc.h2
-rw-r--r--arch/powerpc/include/asm/hvsi.h16
-rw-r--r--arch/powerpc/include/asm/io.h69
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h232
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h9
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h7
-rw-r--r--arch/powerpc/include/asm/kvm_host.h57
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h107
-rw-r--r--arch/powerpc/include/asm/lppaca.h12
-rw-r--r--arch/powerpc/include/asm/machdep.h16
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h4
-rw-r--r--arch/powerpc/include/asm/opal.h109
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/page.h4
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h5
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h8
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h142
-rw-r--r--arch/powerpc/include/asm/processor.h93
-rw-r--r--arch/powerpc/include/asm/prom.h36
-rw-r--r--arch/powerpc/include/asm/pte-book3e.h2
-rw-r--r--arch/powerpc/include/asm/reg.h22
-rw-r--r--arch/powerpc/include/asm/reg_booke.h8
-rw-r--r--arch/powerpc/include/asm/scom.h23
-rw-r--r--arch/powerpc/include/asm/setup.h4
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h2
-rw-r--r--arch/powerpc/include/asm/spu.h3
-rw-r--r--arch/powerpc/include/asm/string.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h1
-rw-r--r--arch/powerpc/include/asm/uprobes.h8
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h78
-rw-r--r--arch/powerpc/include/asm/xor.h67
-rw-r--r--arch/powerpc/include/uapi/asm/byteorder.h4
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h86
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h2
46 files changed, 805 insertions, 525 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 704e6f10ae80..d8f9d2f18a23 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -2,4 +2,5 @@
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += rwsem.h 3generic-y += rwsem.h
4generic-y += trace_clock.h 4generic-y += trace_clock.h
5generic-y += preempt.h
5generic-y += vtime.h \ No newline at end of file 6generic-y += vtime.h \ No newline at end of file
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
new file mode 100644
index 000000000000..d853d163ba47
--- /dev/null
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -0,0 +1,32 @@
1#ifndef _ASM_POWERPC_ARCHRANDOM_H
2#define _ASM_POWERPC_ARCHRANDOM_H
3
4#ifdef CONFIG_ARCH_RANDOM
5
6#include <asm/machdep.h>
7
8static inline int arch_get_random_long(unsigned long *v)
9{
10 if (ppc_md.get_random_long)
11 return ppc_md.get_random_long(v);
12
13 return 0;
14}
15
16static inline int arch_get_random_int(unsigned int *v)
17{
18 unsigned long val;
19 int rc;
20
21 rc = arch_get_random_long(&val);
22 if (rc)
23 *v = val;
24
25 return rc;
26}
27
28int powernv_get_random_long(unsigned long *v);
29
30#endif /* CONFIG_ARCH_RANDOM */
31
32#endif /* _ASM_POWERPC_ARCHRANDOM_H */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index ce0c28495f9a..8251a3ba870f 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -14,6 +14,9 @@
14 * which always checksum on 4 octet boundaries. ihl is the number 14 * which always checksum on 4 octet boundaries. ihl is the number
15 * of 32-bit words and is always >= 5. 15 * of 32-bit words and is always >= 5.
16 */ 16 */
17#ifdef CONFIG_GENERIC_CSUM
18#include <asm-generic/checksum.h>
19#else
17extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 20extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
18 21
19/* 22/*
@@ -123,5 +126,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
123 return sum; 126 return sum;
124#endif 127#endif
125} 128}
129
130#endif
126#endif /* __KERNEL__ */ 131#endif /* __KERNEL__ */
127#endif 132#endif
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
index 9b198d1b3b2b..856f8deb557a 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst)
77 return inst & 0xffff; 77 return inst & 0xffff;
78} 78}
79 79
80static inline unsigned int get_oc(u32 inst)
81{
82 return (inst >> 11) & 0x7fff;
83}
80#endif /* __ASM_PPC_DISASSEMBLE_H__ */ 84#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index 5a8b82aa7241..4358e3002f35 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -43,6 +43,7 @@ extern struct ppc_emulated {
43 struct ppc_emulated_entry popcntb; 43 struct ppc_emulated_entry popcntb;
44 struct ppc_emulated_entry spe; 44 struct ppc_emulated_entry spe;
45 struct ppc_emulated_entry string; 45 struct ppc_emulated_entry string;
46 struct ppc_emulated_entry sync;
46 struct ppc_emulated_entry unaligned; 47 struct ppc_emulated_entry unaligned;
47#ifdef CONFIG_MATH_EMULATION 48#ifdef CONFIG_MATH_EMULATION
48 struct ppc_emulated_entry math; 49 struct ppc_emulated_entry math;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index cca12f084842..894662a5d4d5 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -198,12 +198,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
198 cmpwi r10,0; \ 198 cmpwi r10,0; \
199 bne do_kvm_##n 199 bne do_kvm_##n
200 200
201#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
202/*
203 * If hv is possible, interrupts come into to the hv version
204 * of the kvmppc_interrupt code, which then jumps to the PR handler,
205 * kvmppc_interrupt_pr, if the guest is a PR guest.
206 */
207#define kvmppc_interrupt kvmppc_interrupt_hv
208#else
209#define kvmppc_interrupt kvmppc_interrupt_pr
210#endif
211
201#define __KVM_HANDLER(area, h, n) \ 212#define __KVM_HANDLER(area, h, n) \
202do_kvm_##n: \ 213do_kvm_##n: \
203 BEGIN_FTR_SECTION_NESTED(947) \ 214 BEGIN_FTR_SECTION_NESTED(947) \
204 ld r10,area+EX_CFAR(r13); \ 215 ld r10,area+EX_CFAR(r13); \
205 std r10,HSTATE_CFAR(r13); \ 216 std r10,HSTATE_CFAR(r13); \
206 END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ 217 END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
218 BEGIN_FTR_SECTION_NESTED(948) \
219 ld r10,area+EX_PPR(r13); \
220 std r10,HSTATE_PPR(r13); \
221 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
207 ld r10,area+EX_R10(r13); \ 222 ld r10,area+EX_R10(r13); \
208 stw r9,HSTATE_SCRATCH1(r13); \ 223 stw r9,HSTATE_SCRATCH1(r13); \
209 ld r9,area+EX_R9(r13); \ 224 ld r9,area+EX_R9(r13); \
@@ -217,6 +232,10 @@ do_kvm_##n: \
217 ld r10,area+EX_R10(r13); \ 232 ld r10,area+EX_R10(r13); \
218 beq 89f; \ 233 beq 89f; \
219 stw r9,HSTATE_SCRATCH1(r13); \ 234 stw r9,HSTATE_SCRATCH1(r13); \
235 BEGIN_FTR_SECTION_NESTED(948) \
236 ld r9,area+EX_PPR(r13); \
237 std r9,HSTATE_PPR(r13); \
238 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
220 ld r9,area+EX_R9(r13); \ 239 ld r9,area+EX_R9(r13); \
221 std r12,HSTATE_SCRATCH0(r13); \ 240 std r12,HSTATE_SCRATCH0(r13); \
222 li r12,n; \ 241 li r12,n; \
@@ -236,7 +255,7 @@ do_kvm_##n: \
236#define KVM_HANDLER_SKIP(area, h, n) 255#define KVM_HANDLER_SKIP(area, h, n)
237#endif 256#endif
238 257
239#ifdef CONFIG_KVM_BOOK3S_PR 258#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
240#define KVMTEST_PR(n) __KVMTEST(n) 259#define KVMTEST_PR(n) __KVMTEST(n)
241#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) 260#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
242#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) 261#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h
index b8a4b9bc50b3..f49ddb1b2273 100644
--- a/arch/powerpc/include/asm/fsl_ifc.h
+++ b/arch/powerpc/include/asm/fsl_ifc.h
@@ -93,6 +93,7 @@
93#define CSOR_NAND_PGS_512 0x00000000 93#define CSOR_NAND_PGS_512 0x00000000
94#define CSOR_NAND_PGS_2K 0x00080000 94#define CSOR_NAND_PGS_2K 0x00080000
95#define CSOR_NAND_PGS_4K 0x00100000 95#define CSOR_NAND_PGS_4K 0x00100000
96#define CSOR_NAND_PGS_8K 0x00180000
96/* Spare region Size */ 97/* Spare region Size */
97#define CSOR_NAND_SPRZ_MASK 0x0000E000 98#define CSOR_NAND_SPRZ_MASK 0x0000E000
98#define CSOR_NAND_SPRZ_SHIFT 13 99#define CSOR_NAND_SPRZ_SHIFT 13
@@ -102,6 +103,7 @@
102#define CSOR_NAND_SPRZ_210 0x00006000 103#define CSOR_NAND_SPRZ_210 0x00006000
103#define CSOR_NAND_SPRZ_218 0x00008000 104#define CSOR_NAND_SPRZ_218 0x00008000
104#define CSOR_NAND_SPRZ_224 0x0000A000 105#define CSOR_NAND_SPRZ_224 0x0000A000
106#define CSOR_NAND_SPRZ_CSOR_EXT 0x0000C000
105/* Pages Per Block */ 107/* Pages Per Block */
106#define CSOR_NAND_PB_MASK 0x00000700 108#define CSOR_NAND_PB_MASK 0x00000700
107#define CSOR_NAND_PB_SHIFT 8 109#define CSOR_NAND_PB_SHIFT 8
diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h
index d3f64f361814..d4a5315718ca 100644
--- a/arch/powerpc/include/asm/hvsi.h
+++ b/arch/powerpc/include/asm/hvsi.h
@@ -25,7 +25,7 @@
25struct hvsi_header { 25struct hvsi_header {
26 uint8_t type; 26 uint8_t type;
27 uint8_t len; 27 uint8_t len;
28 uint16_t seqno; 28 __be16 seqno;
29} __attribute__((packed)); 29} __attribute__((packed));
30 30
31struct hvsi_data { 31struct hvsi_data {
@@ -35,24 +35,24 @@ struct hvsi_data {
35 35
36struct hvsi_control { 36struct hvsi_control {
37 struct hvsi_header hdr; 37 struct hvsi_header hdr;
38 uint16_t verb; 38 __be16 verb;
39 /* optional depending on verb: */ 39 /* optional depending on verb: */
40 uint32_t word; 40 __be32 word;
41 uint32_t mask; 41 __be32 mask;
42} __attribute__((packed)); 42} __attribute__((packed));
43 43
44struct hvsi_query { 44struct hvsi_query {
45 struct hvsi_header hdr; 45 struct hvsi_header hdr;
46 uint16_t verb; 46 __be16 verb;
47} __attribute__((packed)); 47} __attribute__((packed));
48 48
49struct hvsi_query_response { 49struct hvsi_query_response {
50 struct hvsi_header hdr; 50 struct hvsi_header hdr;
51 uint16_t verb; 51 __be16 verb;
52 uint16_t query_seqno; 52 __be16 query_seqno;
53 union { 53 union {
54 uint8_t version; 54 uint8_t version;
55 uint32_t mctrl_word; 55 __be32 mctrl_word;
56 } u; 56 } u;
57} __attribute__((packed)); 57} __attribute__((packed));
58 58
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 5a64757dc0d1..575fbf81fad0 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -21,7 +21,7 @@ extern struct pci_dev *isa_bridge_pcidev;
21/* 21/*
22 * has legacy ISA devices ? 22 * has legacy ISA devices ?
23 */ 23 */
24#define arch_has_dev_port() (isa_bridge_pcidev != NULL) 24#define arch_has_dev_port() (isa_bridge_pcidev != NULL || isa_io_special)
25#endif 25#endif
26 26
27#include <linux/device.h> 27#include <linux/device.h>
@@ -113,7 +113,7 @@ extern bool isa_io_special;
113 113
114/* gcc 4.0 and older doesn't have 'Z' constraint */ 114/* gcc 4.0 and older doesn't have 'Z' constraint */
115#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0) 115#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)
116#define DEF_MMIO_IN_LE(name, size, insn) \ 116#define DEF_MMIO_IN_X(name, size, insn) \
117static inline u##size name(const volatile u##size __iomem *addr) \ 117static inline u##size name(const volatile u##size __iomem *addr) \
118{ \ 118{ \
119 u##size ret; \ 119 u##size ret; \
@@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
122 return ret; \ 122 return ret; \
123} 123}
124 124
125#define DEF_MMIO_OUT_LE(name, size, insn) \ 125#define DEF_MMIO_OUT_X(name, size, insn) \
126static inline void name(volatile u##size __iomem *addr, u##size val) \ 126static inline void name(volatile u##size __iomem *addr, u##size val) \
127{ \ 127{ \
128 __asm__ __volatile__("sync;"#insn" %1,0,%2" \ 128 __asm__ __volatile__("sync;"#insn" %1,0,%2" \
@@ -130,7 +130,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
130 IO_SET_SYNC_FLAG(); \ 130 IO_SET_SYNC_FLAG(); \
131} 131}
132#else /* newer gcc */ 132#else /* newer gcc */
133#define DEF_MMIO_IN_LE(name, size, insn) \ 133#define DEF_MMIO_IN_X(name, size, insn) \
134static inline u##size name(const volatile u##size __iomem *addr) \ 134static inline u##size name(const volatile u##size __iomem *addr) \
135{ \ 135{ \
136 u##size ret; \ 136 u##size ret; \
@@ -139,7 +139,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
139 return ret; \ 139 return ret; \
140} 140}
141 141
142#define DEF_MMIO_OUT_LE(name, size, insn) \ 142#define DEF_MMIO_OUT_X(name, size, insn) \
143static inline void name(volatile u##size __iomem *addr, u##size val) \ 143static inline void name(volatile u##size __iomem *addr, u##size val) \
144{ \ 144{ \
145 __asm__ __volatile__("sync;"#insn" %1,%y0" \ 145 __asm__ __volatile__("sync;"#insn" %1,%y0" \
@@ -148,7 +148,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
148} 148}
149#endif 149#endif
150 150
151#define DEF_MMIO_IN_BE(name, size, insn) \ 151#define DEF_MMIO_IN_D(name, size, insn) \
152static inline u##size name(const volatile u##size __iomem *addr) \ 152static inline u##size name(const volatile u##size __iomem *addr) \
153{ \ 153{ \
154 u##size ret; \ 154 u##size ret; \
@@ -157,7 +157,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
157 return ret; \ 157 return ret; \
158} 158}
159 159
160#define DEF_MMIO_OUT_BE(name, size, insn) \ 160#define DEF_MMIO_OUT_D(name, size, insn) \
161static inline void name(volatile u##size __iomem *addr, u##size val) \ 161static inline void name(volatile u##size __iomem *addr, u##size val) \
162{ \ 162{ \
163 __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ 163 __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
@@ -165,22 +165,37 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
165 IO_SET_SYNC_FLAG(); \ 165 IO_SET_SYNC_FLAG(); \
166} 166}
167 167
168DEF_MMIO_IN_D(in_8, 8, lbz);
169DEF_MMIO_OUT_D(out_8, 8, stb);
168 170
169DEF_MMIO_IN_BE(in_8, 8, lbz); 171#ifdef __BIG_ENDIAN__
170DEF_MMIO_IN_BE(in_be16, 16, lhz); 172DEF_MMIO_IN_D(in_be16, 16, lhz);
171DEF_MMIO_IN_BE(in_be32, 32, lwz); 173DEF_MMIO_IN_D(in_be32, 32, lwz);
172DEF_MMIO_IN_LE(in_le16, 16, lhbrx); 174DEF_MMIO_IN_X(in_le16, 16, lhbrx);
173DEF_MMIO_IN_LE(in_le32, 32, lwbrx); 175DEF_MMIO_IN_X(in_le32, 32, lwbrx);
174 176
175DEF_MMIO_OUT_BE(out_8, 8, stb); 177DEF_MMIO_OUT_D(out_be16, 16, sth);
176DEF_MMIO_OUT_BE(out_be16, 16, sth); 178DEF_MMIO_OUT_D(out_be32, 32, stw);
177DEF_MMIO_OUT_BE(out_be32, 32, stw); 179DEF_MMIO_OUT_X(out_le16, 16, sthbrx);
178DEF_MMIO_OUT_LE(out_le16, 16, sthbrx); 180DEF_MMIO_OUT_X(out_le32, 32, stwbrx);
179DEF_MMIO_OUT_LE(out_le32, 32, stwbrx); 181#else
182DEF_MMIO_IN_X(in_be16, 16, lhbrx);
183DEF_MMIO_IN_X(in_be32, 32, lwbrx);
184DEF_MMIO_IN_D(in_le16, 16, lhz);
185DEF_MMIO_IN_D(in_le32, 32, lwz);
186
187DEF_MMIO_OUT_X(out_be16, 16, sthbrx);
188DEF_MMIO_OUT_X(out_be32, 32, stwbrx);
189DEF_MMIO_OUT_D(out_le16, 16, sth);
190DEF_MMIO_OUT_D(out_le32, 32, stw);
191
192#endif /* __BIG_ENDIAN */
180 193
181#ifdef __powerpc64__ 194#ifdef __powerpc64__
182DEF_MMIO_OUT_BE(out_be64, 64, std); 195
183DEF_MMIO_IN_BE(in_be64, 64, ld); 196#ifdef __BIG_ENDIAN__
197DEF_MMIO_OUT_D(out_be64, 64, std);
198DEF_MMIO_IN_D(in_be64, 64, ld);
184 199
185/* There is no asm instructions for 64 bits reverse loads and stores */ 200/* There is no asm instructions for 64 bits reverse loads and stores */
186static inline u64 in_le64(const volatile u64 __iomem *addr) 201static inline u64 in_le64(const volatile u64 __iomem *addr)
@@ -192,6 +207,22 @@ static inline void out_le64(volatile u64 __iomem *addr, u64 val)
192{ 207{
193 out_be64(addr, swab64(val)); 208 out_be64(addr, swab64(val));
194} 209}
210#else
211DEF_MMIO_OUT_D(out_le64, 64, std);
212DEF_MMIO_IN_D(in_le64, 64, ld);
213
214/* There is no asm instructions for 64 bits reverse loads and stores */
215static inline u64 in_be64(const volatile u64 __iomem *addr)
216{
217 return swab64(in_le64(addr));
218}
219
220static inline void out_be64(volatile u64 __iomem *addr, u64 val)
221{
222 out_le64(addr, swab64(val));
223}
224
225#endif
195#endif /* __powerpc64__ */ 226#endif /* __powerpc64__ */
196 227
197/* 228/*
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0e40843a1c6e..41f13cec8a8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
69 69
70extern void irq_ctx_init(void); 70extern void irq_ctx_init(void);
71extern void call_do_softirq(struct thread_info *tp); 71extern void call_do_softirq(struct thread_info *tp);
72extern int call_handle_irq(int irq, void *p1, 72extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
73 struct thread_info *tp, void *func);
74extern void do_IRQ(struct pt_regs *regs); 73extern void do_IRQ(struct pt_regs *regs);
74extern void __do_irq(struct pt_regs *regs);
75 75
76int irq_choose_cpu(const struct cpumask *mask); 76int irq_choose_cpu(const struct cpumask *mask);
77 77
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index ae098c438f00..f016bb699b5f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -19,7 +19,7 @@
19 19
20static __always_inline bool arch_static_branch(struct static_key *key) 20static __always_inline bool arch_static_branch(struct static_key *key)
21{ 21{
22 asm goto("1:\n\t" 22 asm_volatile_goto("1:\n\t"
23 "nop\n\t" 23 "nop\n\t"
24 ".pushsection __jump_table, \"aw\"\n\t" 24 ".pushsection __jump_table, \"aw\"\n\t"
25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" 25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 851bac7afa4b..1bd92fd43cfb 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -123,6 +123,8 @@
123#define BOOK3S_HFLAG_SLB 0x2 123#define BOOK3S_HFLAG_SLB 0x2
124#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4 124#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
125#define BOOK3S_HFLAG_NATIVE_PS 0x8 125#define BOOK3S_HFLAG_NATIVE_PS 0x8
126#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
127#define BOOK3S_HFLAG_NEW_TLBIE 0x20
126 128
127#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ 129#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
128#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 130#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
@@ -136,6 +138,8 @@
136#define KVM_GUEST_MODE_NONE 0 138#define KVM_GUEST_MODE_NONE 0
137#define KVM_GUEST_MODE_GUEST 1 139#define KVM_GUEST_MODE_GUEST 1
138#define KVM_GUEST_MODE_SKIP 2 140#define KVM_GUEST_MODE_SKIP 2
141#define KVM_GUEST_MODE_GUEST_HV 3
142#define KVM_GUEST_MODE_HOST_HV 4
139 143
140#define KVM_INST_FETCH_FAILED -1 144#define KVM_INST_FETCH_FAILED -1
141 145
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index fa19e2f1a874..4a594b76674d 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -58,16 +58,18 @@ struct hpte_cache {
58 struct hlist_node list_pte_long; 58 struct hlist_node list_pte_long;
59 struct hlist_node list_vpte; 59 struct hlist_node list_vpte;
60 struct hlist_node list_vpte_long; 60 struct hlist_node list_vpte_long;
61#ifdef CONFIG_PPC_BOOK3S_64
62 struct hlist_node list_vpte_64k;
63#endif
61 struct rcu_head rcu_head; 64 struct rcu_head rcu_head;
62 u64 host_vpn; 65 u64 host_vpn;
63 u64 pfn; 66 u64 pfn;
64 ulong slot; 67 ulong slot;
65 struct kvmppc_pte pte; 68 struct kvmppc_pte pte;
69 int pagesize;
66}; 70};
67 71
68struct kvmppc_vcpu_book3s { 72struct kvmppc_vcpu_book3s {
69 struct kvm_vcpu vcpu;
70 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
71 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 73 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
72 struct { 74 struct {
73 u64 esid; 75 u64 esid;
@@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s {
99 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 101 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
100 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 102 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
101 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 103 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
104#ifdef CONFIG_PPC_BOOK3S_64
105 struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
106#endif
102 int hpte_cache_count; 107 int hpte_cache_count;
103 spinlock_t mmu_lock; 108 spinlock_t mmu_lock;
104}; 109};
@@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s {
107#define CONTEXT_GUEST 1 112#define CONTEXT_GUEST 1
108#define CONTEXT_GUEST_END 2 113#define CONTEXT_GUEST_END 2
109 114
110#define VSID_REAL 0x0fffffffffc00000ULL 115#define VSID_REAL 0x07ffffffffc00000ULL
111#define VSID_BAT 0x0fffffffffb00000ULL 116#define VSID_BAT 0x07ffffffffb00000ULL
117#define VSID_64K 0x0800000000000000ULL
112#define VSID_1T 0x1000000000000000ULL 118#define VSID_1T 0x1000000000000000ULL
113#define VSID_REAL_DR 0x2000000000000000ULL 119#define VSID_REAL_DR 0x2000000000000000ULL
114#define VSID_REAL_IR 0x4000000000000000ULL 120#define VSID_REAL_IR 0x4000000000000000ULL
@@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
118extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 124extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
119extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); 125extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
120extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 126extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
121extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
122extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 127extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
123extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 128extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
124extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); 129extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
125extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 130extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
131 bool iswrite);
132extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
126extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 133extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
127extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 134extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
128extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 135extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
@@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
134 141
135extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 142extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
136extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); 143extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
144extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
137extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); 145extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
138extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); 146extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
139extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 147extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
@@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
151 bool upper, u32 val); 159 bool upper, u32 val);
152extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 160extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
153extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 161extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
154extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 162extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
163 bool *writable);
155extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 164extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
156 unsigned long *rmap, long pte_index, int realmode); 165 unsigned long *rmap, long pte_index, int realmode);
157extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 166extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
@@ -172,6 +181,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
172 unsigned long *hpret); 181 unsigned long *hpret);
173extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, 182extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
174 struct kvm_memory_slot *memslot, unsigned long *map); 183 struct kvm_memory_slot *memslot, unsigned long *map);
184extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
185 unsigned long mask);
175 186
176extern void kvmppc_entry_trampoline(void); 187extern void kvmppc_entry_trampoline(void);
177extern void kvmppc_hv_entry_trampoline(void); 188extern void kvmppc_hv_entry_trampoline(void);
@@ -184,11 +195,9 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
184 195
185static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 196static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
186{ 197{
187 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); 198 return vcpu->arch.book3s;
188} 199}
189 200
190extern void kvm_return_point(void);
191
192/* Also add subarch specific defines */ 201/* Also add subarch specific defines */
193 202
194#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 203#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -198,203 +207,6 @@ extern void kvm_return_point(void);
198#include <asm/kvm_book3s_64.h> 207#include <asm/kvm_book3s_64.h>
199#endif 208#endif
200 209
201#ifdef CONFIG_KVM_BOOK3S_PR
202
203static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
204{
205 return to_book3s(vcpu)->hior;
206}
207
208static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
209 unsigned long pending_now, unsigned long old_pending)
210{
211 if (pending_now)
212 vcpu->arch.shared->int_pending = 1;
213 else if (old_pending)
214 vcpu->arch.shared->int_pending = 0;
215}
216
217static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
218{
219 if ( num < 14 ) {
220 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
221 svcpu->gpr[num] = val;
222 svcpu_put(svcpu);
223 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
224 } else
225 vcpu->arch.gpr[num] = val;
226}
227
228static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
229{
230 if ( num < 14 ) {
231 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
232 ulong r = svcpu->gpr[num];
233 svcpu_put(svcpu);
234 return r;
235 } else
236 return vcpu->arch.gpr[num];
237}
238
239static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
240{
241 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
242 svcpu->cr = val;
243 svcpu_put(svcpu);
244 to_book3s(vcpu)->shadow_vcpu->cr = val;
245}
246
247static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
248{
249 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
250 u32 r;
251 r = svcpu->cr;
252 svcpu_put(svcpu);
253 return r;
254}
255
256static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
257{
258 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
259 svcpu->xer = val;
260 to_book3s(vcpu)->shadow_vcpu->xer = val;
261 svcpu_put(svcpu);
262}
263
264static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
265{
266 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
267 u32 r;
268 r = svcpu->xer;
269 svcpu_put(svcpu);
270 return r;
271}
272
273static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
274{
275 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
276 svcpu->ctr = val;
277 svcpu_put(svcpu);
278}
279
280static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
281{
282 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
283 ulong r;
284 r = svcpu->ctr;
285 svcpu_put(svcpu);
286 return r;
287}
288
289static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
290{
291 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
292 svcpu->lr = val;
293 svcpu_put(svcpu);
294}
295
296static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
297{
298 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
299 ulong r;
300 r = svcpu->lr;
301 svcpu_put(svcpu);
302 return r;
303}
304
305static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
306{
307 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
308 svcpu->pc = val;
309 svcpu_put(svcpu);
310}
311
312static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
313{
314 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
315 ulong r;
316 r = svcpu->pc;
317 svcpu_put(svcpu);
318 return r;
319}
320
321static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
322{
323 ulong pc = kvmppc_get_pc(vcpu);
324 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
325 u32 r;
326
327 /* Load the instruction manually if it failed to do so in the
328 * exit path */
329 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
330 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
331
332 r = svcpu->last_inst;
333 svcpu_put(svcpu);
334 return r;
335}
336
337/*
338 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
339 * Because the sc instruction sets SRR0 to point to the following
340 * instruction, we have to fetch from pc - 4.
341 */
342static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
343{
344 ulong pc = kvmppc_get_pc(vcpu) - 4;
345 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
346 u32 r;
347
348 /* Load the instruction manually if it failed to do so in the
349 * exit path */
350 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
351 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
352
353 r = svcpu->last_inst;
354 svcpu_put(svcpu);
355 return r;
356}
357
358static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
359{
360 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
361 ulong r;
362 r = svcpu->fault_dar;
363 svcpu_put(svcpu);
364 return r;
365}
366
367static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
368{
369 ulong crit_raw = vcpu->arch.shared->critical;
370 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
371 bool crit;
372
373 /* Truncate crit indicators in 32 bit mode */
374 if (!(vcpu->arch.shared->msr & MSR_SF)) {
375 crit_raw &= 0xffffffff;
376 crit_r1 &= 0xffffffff;
377 }
378
379 /* Critical section when crit == r1 */
380 crit = (crit_raw == crit_r1);
381 /* ... and we're in supervisor mode */
382 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
383
384 return crit;
385}
386#else /* CONFIG_KVM_BOOK3S_PR */
387
388static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
389{
390 return 0;
391}
392
393static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
394 unsigned long pending_now, unsigned long old_pending)
395{
396}
397
398static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 210static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
399{ 211{
400 vcpu->arch.gpr[num] = val; 212 vcpu->arch.gpr[num] = val;
@@ -489,12 +301,6 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
489 return vcpu->arch.fault_dar; 301 return vcpu->arch.fault_dar;
490} 302}
491 303
492static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
493{
494 return false;
495}
496#endif
497
498/* Magic register values loaded into r3 and r4 before the 'sc' assembly 304/* Magic register values loaded into r3 and r4 before the 'sc' assembly
499 * instruction for the OSI hypercalls */ 305 * instruction for the OSI hypercalls */
500#define OSI_SC_MAGIC_R3 0x113724FA 306#define OSI_SC_MAGIC_R3 0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
index ce0ef6ce8f86..c720e0b3238d 100644
--- a/arch/powerpc/include/asm/kvm_book3s_32.h
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -22,7 +22,7 @@
22 22
23static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) 23static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
24{ 24{
25 return to_book3s(vcpu)->shadow_vcpu; 25 return vcpu->arch.shadow_vcpu;
26} 26}
27 27
28static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) 28static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 86d638a3b359..bf0fa8b0a883 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,7 +20,7 @@
20#ifndef __ASM_KVM_BOOK3S_64_H__ 20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__ 21#define __ASM_KVM_BOOK3S_64_H__
22 22
23#ifdef CONFIG_KVM_BOOK3S_PR 23#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) 24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
25{ 25{
26 preempt_disable(); 26 preempt_disable();
@@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
35 35
36#define SPAPR_TCE_SHIFT 12 36#define SPAPR_TCE_SHIFT 12
37 37
38#ifdef CONFIG_KVM_BOOK3S_64_HV 38#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ 39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40extern unsigned long kvm_rma_pages; 40extern unsigned long kvm_rma_pages;
41#endif 41#endif
@@ -278,7 +278,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v)
278 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); 278 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
279} 279}
280 280
281#ifdef CONFIG_KVM_BOOK3S_64_HV 281#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
282/* 282/*
283 * Note modification of an HPTE; set the HPTE modified bit 283 * Note modification of an HPTE; set the HPTE modified bit
284 * if anyone is interested. 284 * if anyone is interested.
@@ -289,6 +289,6 @@ static inline void note_hpte_modification(struct kvm *kvm,
289 if (atomic_read(&kvm->arch.hpte_mod_interest)) 289 if (atomic_read(&kvm->arch.hpte_mod_interest))
290 rev->guest_rpte |= HPTE_GR_MODIFIED; 290 rev->guest_rpte |= HPTE_GR_MODIFIED;
291} 291}
292#endif /* CONFIG_KVM_BOOK3S_64_HV */ 292#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
293 293
294#endif /* __ASM_KVM_BOOK3S_64_H__ */ 294#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 9039d3c97eec..0bd9348a4db9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -83,7 +83,7 @@ struct kvmppc_host_state {
83 u8 restore_hid5; 83 u8 restore_hid5;
84 u8 napping; 84 u8 napping;
85 85
86#ifdef CONFIG_KVM_BOOK3S_64_HV 86#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
87 u8 hwthread_req; 87 u8 hwthread_req;
88 u8 hwthread_state; 88 u8 hwthread_state;
89 u8 host_ipi; 89 u8 host_ipi;
@@ -101,6 +101,7 @@ struct kvmppc_host_state {
101#endif 101#endif
102#ifdef CONFIG_PPC_BOOK3S_64 102#ifdef CONFIG_PPC_BOOK3S_64
103 u64 cfar; 103 u64 cfar;
104 u64 ppr;
104#endif 105#endif
105}; 106};
106 107
@@ -108,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu {
108 ulong gpr[14]; 109 ulong gpr[14];
109 u32 cr; 110 u32 cr;
110 u32 xer; 111 u32 xer;
111
112 u32 fault_dsisr;
113 u32 last_inst;
114 ulong ctr; 112 ulong ctr;
115 ulong lr; 113 ulong lr;
116 ulong pc; 114 ulong pc;
115
117 ulong shadow_srr1; 116 ulong shadow_srr1;
118 ulong fault_dar; 117 ulong fault_dar;
118 u32 fault_dsisr;
119 u32 last_inst;
119 120
120#ifdef CONFIG_PPC_BOOK3S_32 121#ifdef CONFIG_PPC_BOOK3S_32
121 u32 sr[16]; /* Guest SRs */ 122 u32 sr[16]; /* Guest SRs */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index d3c1eb34c986..dd8f61510dfd 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -26,7 +26,12 @@
26/* LPIDs we support with this build -- runtime limit may be lower */ 26/* LPIDs we support with this build -- runtime limit may be lower */
27#define KVMPPC_NR_LPIDS 64 27#define KVMPPC_NR_LPIDS 64
28 28
29#define KVMPPC_INST_EHPRIV 0x7c00021c 29#define KVMPPC_INST_EHPRIV 0x7c00021c
30#define EHPRIV_OC_SHIFT 11
31/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
32#define EHPRIV_OC_DEBUG 1
33#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \
34 (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))
30 35
31static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 36static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
32{ 37{
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 33283532e9d8..237d1d25b448 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -63,20 +63,17 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
63 63
64#endif 64#endif
65 65
66/* We don't currently support large pages. */
67#define KVM_HPAGE_GFN_SHIFT(x) 0
68#define KVM_NR_PAGE_SIZES 1
69#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
70
71#define HPTEG_CACHE_NUM (1 << 15) 66#define HPTEG_CACHE_NUM (1 << 15)
72#define HPTEG_HASH_BITS_PTE 13 67#define HPTEG_HASH_BITS_PTE 13
73#define HPTEG_HASH_BITS_PTE_LONG 12 68#define HPTEG_HASH_BITS_PTE_LONG 12
74#define HPTEG_HASH_BITS_VPTE 13 69#define HPTEG_HASH_BITS_VPTE 13
75#define HPTEG_HASH_BITS_VPTE_LONG 5 70#define HPTEG_HASH_BITS_VPTE_LONG 5
71#define HPTEG_HASH_BITS_VPTE_64K 11
76#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) 72#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
77#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) 73#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
78#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) 74#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
79#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) 75#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
76#define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
80 77
81/* Physical Address Mask - allowed range of real mode RAM access */ 78/* Physical Address Mask - allowed range of real mode RAM access */
82#define KVM_PAM 0x0fffffffffffffffULL 79#define KVM_PAM 0x0fffffffffffffffULL
@@ -89,6 +86,9 @@ struct lppaca;
89struct slb_shadow; 86struct slb_shadow;
90struct dtl_entry; 87struct dtl_entry;
91 88
89struct kvmppc_vcpu_book3s;
90struct kvmppc_book3s_shadow_vcpu;
91
92struct kvm_vm_stat { 92struct kvm_vm_stat {
93 u32 remote_tlb_flush; 93 u32 remote_tlb_flush;
94}; 94};
@@ -224,15 +224,15 @@ struct revmap_entry {
224#define KVMPPC_GOT_PAGE 0x80 224#define KVMPPC_GOT_PAGE 0x80
225 225
226struct kvm_arch_memory_slot { 226struct kvm_arch_memory_slot {
227#ifdef CONFIG_KVM_BOOK3S_64_HV 227#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
228 unsigned long *rmap; 228 unsigned long *rmap;
229 unsigned long *slot_phys; 229 unsigned long *slot_phys;
230#endif /* CONFIG_KVM_BOOK3S_64_HV */ 230#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
231}; 231};
232 232
233struct kvm_arch { 233struct kvm_arch {
234 unsigned int lpid; 234 unsigned int lpid;
235#ifdef CONFIG_KVM_BOOK3S_64_HV 235#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
236 unsigned long hpt_virt; 236 unsigned long hpt_virt;
237 struct revmap_entry *revmap; 237 struct revmap_entry *revmap;
238 unsigned int host_lpid; 238 unsigned int host_lpid;
@@ -256,7 +256,10 @@ struct kvm_arch {
256 cpumask_t need_tlb_flush; 256 cpumask_t need_tlb_flush;
257 struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; 257 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
258 int hpt_cma_alloc; 258 int hpt_cma_alloc;
259#endif /* CONFIG_KVM_BOOK3S_64_HV */ 259#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
260#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
261 struct mutex hpt_mutex;
262#endif
260#ifdef CONFIG_PPC_BOOK3S_64 263#ifdef CONFIG_PPC_BOOK3S_64
261 struct list_head spapr_tce_tables; 264 struct list_head spapr_tce_tables;
262 struct list_head rtas_tokens; 265 struct list_head rtas_tokens;
@@ -267,6 +270,7 @@ struct kvm_arch {
267#ifdef CONFIG_KVM_XICS 270#ifdef CONFIG_KVM_XICS
268 struct kvmppc_xics *xics; 271 struct kvmppc_xics *xics;
269#endif 272#endif
273 struct kvmppc_ops *kvm_ops;
270}; 274};
271 275
272/* 276/*
@@ -294,6 +298,10 @@ struct kvmppc_vcore {
294 u64 stolen_tb; 298 u64 stolen_tb;
295 u64 preempt_tb; 299 u64 preempt_tb;
296 struct kvm_vcpu *runner; 300 struct kvm_vcpu *runner;
301 u64 tb_offset; /* guest timebase - host timebase */
302 ulong lpcr;
303 u32 arch_compat;
304 ulong pcr;
297}; 305};
298 306
299#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) 307#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -328,6 +336,7 @@ struct kvmppc_pte {
328 bool may_read : 1; 336 bool may_read : 1;
329 bool may_write : 1; 337 bool may_write : 1;
330 bool may_execute : 1; 338 bool may_execute : 1;
339 u8 page_size; /* MMU_PAGE_xxx */
331}; 340};
332 341
333struct kvmppc_mmu { 342struct kvmppc_mmu {
@@ -340,7 +349,8 @@ struct kvmppc_mmu {
340 /* book3s */ 349 /* book3s */
341 void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); 350 void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
342 u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); 351 u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
343 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); 352 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
353 struct kvmppc_pte *pte, bool data, bool iswrite);
344 void (*reset_msr)(struct kvm_vcpu *vcpu); 354 void (*reset_msr)(struct kvm_vcpu *vcpu);
345 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); 355 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
346 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); 356 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
@@ -360,6 +370,7 @@ struct kvmppc_slb {
360 bool large : 1; /* PTEs are 16MB */ 370 bool large : 1; /* PTEs are 16MB */
361 bool tb : 1; /* 1TB segment */ 371 bool tb : 1; /* 1TB segment */
362 bool class : 1; 372 bool class : 1;
373 u8 base_page_size; /* MMU_PAGE_xxx */
363}; 374};
364 375
365# ifdef CONFIG_PPC_FSL_BOOK3E 376# ifdef CONFIG_PPC_FSL_BOOK3E
@@ -377,17 +388,6 @@ struct kvmppc_slb {
377#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */ 388#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
378#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */ 389#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
379 390
380struct kvmppc_booke_debug_reg {
381 u32 dbcr0;
382 u32 dbcr1;
383 u32 dbcr2;
384#ifdef CONFIG_KVM_E500MC
385 u32 dbcr4;
386#endif
387 u64 iac[KVMPPC_BOOKE_MAX_IAC];
388 u64 dac[KVMPPC_BOOKE_MAX_DAC];
389};
390
391#define KVMPPC_IRQ_DEFAULT 0 391#define KVMPPC_IRQ_DEFAULT 0
392#define KVMPPC_IRQ_MPIC 1 392#define KVMPPC_IRQ_MPIC 1
393#define KVMPPC_IRQ_XICS 2 393#define KVMPPC_IRQ_XICS 2
@@ -402,6 +402,10 @@ struct kvm_vcpu_arch {
402 int slb_max; /* 1 + index of last valid entry in slb[] */ 402 int slb_max; /* 1 + index of last valid entry in slb[] */
403 int slb_nr; /* total number of entries in SLB */ 403 int slb_nr; /* total number of entries in SLB */
404 struct kvmppc_mmu mmu; 404 struct kvmppc_mmu mmu;
405 struct kvmppc_vcpu_book3s *book3s;
406#endif
407#ifdef CONFIG_PPC_BOOK3S_32
408 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
405#endif 409#endif
406 410
407 ulong gpr[32]; 411 ulong gpr[32];
@@ -463,6 +467,8 @@ struct kvm_vcpu_arch {
463 u32 ctrl; 467 u32 ctrl;
464 ulong dabr; 468 ulong dabr;
465 ulong cfar; 469 ulong cfar;
470 ulong ppr;
471 ulong shadow_srr1;
466#endif 472#endif
467 u32 vrsave; /* also USPRG0 */ 473 u32 vrsave; /* also USPRG0 */
468 u32 mmucr; 474 u32 mmucr;
@@ -498,6 +504,8 @@ struct kvm_vcpu_arch {
498 504
499 u64 mmcr[3]; 505 u64 mmcr[3];
500 u32 pmc[8]; 506 u32 pmc[8];
507 u64 siar;
508 u64 sdar;
501 509
502#ifdef CONFIG_KVM_EXIT_TIMING 510#ifdef CONFIG_KVM_EXIT_TIMING
503 struct mutex exit_timing_lock; 511 struct mutex exit_timing_lock;
@@ -531,7 +539,10 @@ struct kvm_vcpu_arch {
531 u32 eptcfg; 539 u32 eptcfg;
532 u32 epr; 540 u32 epr;
533 u32 crit_save; 541 u32 crit_save;
534 struct kvmppc_booke_debug_reg dbg_reg; 542 /* guest debug registers*/
543 struct debug_reg dbg_reg;
544 /* hardware visible debug registers when in guest state */
545 struct debug_reg shadow_dbg_reg;
535#endif 546#endif
536 gpa_t paddr_accessed; 547 gpa_t paddr_accessed;
537 gva_t vaddr_accessed; 548 gva_t vaddr_accessed;
@@ -582,7 +593,7 @@ struct kvm_vcpu_arch {
582 struct kvmppc_icp *icp; /* XICS presentation controller */ 593 struct kvmppc_icp *icp; /* XICS presentation controller */
583#endif 594#endif
584 595
585#ifdef CONFIG_KVM_BOOK3S_64_HV 596#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
586 struct kvm_vcpu_arch_shared shregs; 597 struct kvm_vcpu_arch_shared shregs;
587 598
588 unsigned long pgfault_addr; 599 unsigned long pgfault_addr;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b15554a26c20..c8317fbf92c4 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -106,13 +106,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
106 struct kvm_interrupt *irq); 106 struct kvm_interrupt *irq);
107extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); 107extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
108extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); 108extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
109
110extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
111 unsigned int op, int *advance);
112extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
113 ulong val);
114extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
115 ulong *val);
116extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); 109extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
117 110
118extern int kvmppc_booke_init(void); 111extern int kvmppc_booke_init(void);
@@ -135,17 +128,17 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
135 struct kvm_create_spapr_tce *args); 128 struct kvm_create_spapr_tce *args);
136extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 129extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
137 unsigned long ioba, unsigned long tce); 130 unsigned long ioba, unsigned long tce);
138extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
139 struct kvm_allocate_rma *rma);
140extern struct kvm_rma_info *kvm_alloc_rma(void); 131extern struct kvm_rma_info *kvm_alloc_rma(void);
141extern void kvm_release_rma(struct kvm_rma_info *ri); 132extern void kvm_release_rma(struct kvm_rma_info *ri);
142extern struct page *kvm_alloc_hpt(unsigned long nr_pages); 133extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
143extern void kvm_release_hpt(struct page *page, unsigned long nr_pages); 134extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
144extern int kvmppc_core_init_vm(struct kvm *kvm); 135extern int kvmppc_core_init_vm(struct kvm *kvm);
145extern void kvmppc_core_destroy_vm(struct kvm *kvm); 136extern void kvmppc_core_destroy_vm(struct kvm *kvm);
146extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 137extern void kvmppc_core_free_memslot(struct kvm *kvm,
138 struct kvm_memory_slot *free,
147 struct kvm_memory_slot *dont); 139 struct kvm_memory_slot *dont);
148extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 140extern int kvmppc_core_create_memslot(struct kvm *kvm,
141 struct kvm_memory_slot *slot,
149 unsigned long npages); 142 unsigned long npages);
150extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, 143extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
151 struct kvm_memory_slot *memslot, 144 struct kvm_memory_slot *memslot,
@@ -177,6 +170,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
177extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); 170extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
178extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); 171extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
179 172
173union kvmppc_one_reg {
174 u32 wval;
175 u64 dval;
176 vector128 vval;
177 u64 vsxval[2];
178 struct {
179 u64 addr;
180 u64 length;
181 } vpaval;
182};
183
184struct kvmppc_ops {
185 struct module *owner;
186 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
187 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
188 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
189 union kvmppc_one_reg *val);
190 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
191 union kvmppc_one_reg *val);
192 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
193 void (*vcpu_put)(struct kvm_vcpu *vcpu);
194 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
195 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
196 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
197 void (*vcpu_free)(struct kvm_vcpu *vcpu);
198 int (*check_requests)(struct kvm_vcpu *vcpu);
199 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
200 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
201 int (*prepare_memory_region)(struct kvm *kvm,
202 struct kvm_memory_slot *memslot,
203 struct kvm_userspace_memory_region *mem);
204 void (*commit_memory_region)(struct kvm *kvm,
205 struct kvm_userspace_memory_region *mem,
206 const struct kvm_memory_slot *old);
207 int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
208 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
209 unsigned long end);
210 int (*age_hva)(struct kvm *kvm, unsigned long hva);
211 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
212 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
213 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
214 void (*free_memslot)(struct kvm_memory_slot *free,
215 struct kvm_memory_slot *dont);
216 int (*create_memslot)(struct kvm_memory_slot *slot,
217 unsigned long npages);
218 int (*init_vm)(struct kvm *kvm);
219 void (*destroy_vm)(struct kvm *kvm);
220 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
221 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
222 unsigned int inst, int *advance);
223 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
224 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
225 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
226 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
227 unsigned long arg);
228
229};
230
231extern struct kvmppc_ops *kvmppc_hv_ops;
232extern struct kvmppc_ops *kvmppc_pr_ops;
233
234static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
235{
236 return kvm->arch.kvm_ops == kvmppc_hv_ops;
237}
238
180/* 239/*
181 * Cuts out inst bits with ordering according to spec. 240 * Cuts out inst bits with ordering according to spec.
182 * That means the leftmost bit is zero. All given bits are included. 241 * That means the leftmost bit is zero. All given bits are included.
@@ -210,17 +269,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
210 return r; 269 return r;
211} 270}
212 271
213union kvmppc_one_reg {
214 u32 wval;
215 u64 dval;
216 vector128 vval;
217 u64 vsxval[2];
218 struct {
219 u64 addr;
220 u64 length;
221 } vpaval;
222};
223
224#define one_reg_size(id) \ 272#define one_reg_size(id) \
225 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) 273 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
226 274
@@ -245,10 +293,10 @@ union kvmppc_one_reg {
245 __v; \ 293 __v; \
246}) 294})
247 295
248void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 296int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
249int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 297int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
250 298
251void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 299int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
252int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 300int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
253 301
254int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); 302int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
@@ -260,7 +308,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
260 308
261struct openpic; 309struct openpic;
262 310
263#ifdef CONFIG_KVM_BOOK3S_64_HV 311#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
264extern void kvm_cma_reserve(void) __init; 312extern void kvm_cma_reserve(void) __init;
265static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 313static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
266{ 314{
@@ -269,10 +317,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
269 317
270static inline u32 kvmppc_get_xics_latch(void) 318static inline u32 kvmppc_get_xics_latch(void)
271{ 319{
272 u32 xirr = get_paca()->kvm_hstate.saved_xirr; 320 u32 xirr;
273 321
322 xirr = get_paca()->kvm_hstate.saved_xirr;
274 get_paca()->kvm_hstate.saved_xirr = 0; 323 get_paca()->kvm_hstate.saved_xirr = 0;
275
276 return xirr; 324 return xirr;
277} 325}
278 326
@@ -281,7 +329,10 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
281 paca[cpu].kvm_hstate.host_ipi = host_ipi; 329 paca[cpu].kvm_hstate.host_ipi = host_ipi;
282} 330}
283 331
284extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); 332static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
333{
334 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
335}
285 336
286#else 337#else
287static inline void __init kvm_cma_reserve(void) 338static inline void __init kvm_cma_reserve(void)
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 4470d1e34d23..844c28de7ec0 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -84,8 +84,8 @@ struct lppaca {
84 * the processor is yielded (either because of an OS yield or a 84 * the processor is yielded (either because of an OS yield or a
85 * hypervisor preempt). An even value implies that the processor is 85 * hypervisor preempt). An even value implies that the processor is
86 * currently executing. 86 * currently executing.
87 * NOTE: This value will ALWAYS be zero for dedicated processors and 87 * NOTE: Even dedicated processor partitions can yield so this
88 * will NEVER be zero for shared processors (ie, initialized to a 1). 88 * field cannot be used to determine if we are shared or dedicated.
89 */ 89 */
90 volatile __be32 yield_count; 90 volatile __be32 yield_count;
91 volatile __be32 dispersion_count; /* dispatch changed physical cpu */ 91 volatile __be32 dispersion_count; /* dispatch changed physical cpu */
@@ -106,15 +106,15 @@ extern struct lppaca lppaca[];
106#define lppaca_of(cpu) (*paca[cpu].lppaca_ptr) 106#define lppaca_of(cpu) (*paca[cpu].lppaca_ptr)
107 107
108/* 108/*
109 * Old kernels used a reserved bit in the VPA to determine if it was running 109 * We are using a non architected field to determine if a partition is
110 * in shared processor mode. New kernels look for a non zero yield count 110 * shared or dedicated. This currently works on both KVM and PHYP, but
111 * but KVM still needs to set the bit to keep the old stuff happy. 111 * we will have to transition to something better.
112 */ 112 */
113#define LPPACA_OLD_SHARED_PROC 2 113#define LPPACA_OLD_SHARED_PROC 2
114 114
115static inline bool lppaca_shared_proc(struct lppaca *l) 115static inline bool lppaca_shared_proc(struct lppaca *l)
116{ 116{
117 return l->yield_count != 0; 117 return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
118} 118}
119 119
120/* 120/*
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 8b480901165a..ad3025d0880b 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -78,6 +78,18 @@ struct machdep_calls {
78 long index); 78 long index);
79 void (*tce_flush)(struct iommu_table *tbl); 79 void (*tce_flush)(struct iommu_table *tbl);
80 80
81 /* _rm versions are for real mode use only */
82 int (*tce_build_rm)(struct iommu_table *tbl,
83 long index,
84 long npages,
85 unsigned long uaddr,
86 enum dma_data_direction direction,
87 struct dma_attrs *attrs);
88 void (*tce_free_rm)(struct iommu_table *tbl,
89 long index,
90 long npages);
91 void (*tce_flush_rm)(struct iommu_table *tbl);
92
81 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, 93 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
82 unsigned long flags, void *caller); 94 unsigned long flags, void *caller);
83 void (*iounmap)(volatile void __iomem *token); 95 void (*iounmap)(volatile void __iomem *token);
@@ -263,6 +275,10 @@ struct machdep_calls {
263 ssize_t (*cpu_probe)(const char *, size_t); 275 ssize_t (*cpu_probe)(const char *, size_t);
264 ssize_t (*cpu_release)(const char *, size_t); 276 ssize_t (*cpu_release)(const char *, size_t);
265#endif 277#endif
278
279#ifdef CONFIG_ARCH_RANDOM
280 int (*get_random_long)(unsigned long *v);
281#endif
266}; 282};
267 283
268extern void e500_idle(void); 284extern void e500_idle(void);
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index c4cf01197273..807014dde821 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -135,8 +135,8 @@ extern char initial_stab[];
135#ifndef __ASSEMBLY__ 135#ifndef __ASSEMBLY__
136 136
137struct hash_pte { 137struct hash_pte {
138 unsigned long v; 138 __be64 v;
139 unsigned long r; 139 __be64 r;
140}; 140};
141 141
142extern struct hash_pte *htab_address; 142extern struct hash_pte *htab_address;
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index c5cd72833d6e..033c06be1d84 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -129,6 +129,9 @@ extern int opal_enter_rtas(struct rtas_args *args,
129#define OPAL_LPC_READ 67 129#define OPAL_LPC_READ 67
130#define OPAL_LPC_WRITE 68 130#define OPAL_LPC_WRITE 68
131#define OPAL_RETURN_CPU 69 131#define OPAL_RETURN_CPU 69
132#define OPAL_FLASH_VALIDATE 76
133#define OPAL_FLASH_MANAGE 77
134#define OPAL_FLASH_UPDATE 78
132 135
133#ifndef __ASSEMBLY__ 136#ifndef __ASSEMBLY__
134 137
@@ -460,10 +463,12 @@ enum {
460 463
461enum { 464enum {
462 OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1, 465 OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
466 OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
463}; 467};
464 468
465enum { 469enum {
466 OPAL_P7IOC_NUM_PEST_REGS = 128, 470 OPAL_P7IOC_NUM_PEST_REGS = 128,
471 OPAL_PHB3_NUM_PEST_REGS = 256
467}; 472};
468 473
469struct OpalIoPhbErrorCommon { 474struct OpalIoPhbErrorCommon {
@@ -531,28 +536,94 @@ struct OpalIoP7IOCPhbErrorData {
531 uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS]; 536 uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];
532}; 537};
533 538
539struct OpalIoPhb3ErrorData {
540 struct OpalIoPhbErrorCommon common;
541
542 uint32_t brdgCtl;
543
544 /* PHB3 UTL regs */
545 uint32_t portStatusReg;
546 uint32_t rootCmplxStatus;
547 uint32_t busAgentStatus;
548
549 /* PHB3 cfg regs */
550 uint32_t deviceStatus;
551 uint32_t slotStatus;
552 uint32_t linkStatus;
553 uint32_t devCmdStatus;
554 uint32_t devSecStatus;
555
556 /* cfg AER regs */
557 uint32_t rootErrorStatus;
558 uint32_t uncorrErrorStatus;
559 uint32_t corrErrorStatus;
560 uint32_t tlpHdr1;
561 uint32_t tlpHdr2;
562 uint32_t tlpHdr3;
563 uint32_t tlpHdr4;
564 uint32_t sourceId;
565
566 uint32_t rsv3;
567
568 /* Record data about the call to allocate a buffer */
569 uint64_t errorClass;
570 uint64_t correlator;
571
572 uint64_t nFir; /* 000 */
573 uint64_t nFirMask; /* 003 */
574 uint64_t nFirWOF; /* 008 */
575
576 /* PHB3 MMIO Error Regs */
577 uint64_t phbPlssr; /* 120 */
578 uint64_t phbCsr; /* 110 */
579 uint64_t lemFir; /* C00 */
580 uint64_t lemErrorMask; /* C18 */
581 uint64_t lemWOF; /* C40 */
582 uint64_t phbErrorStatus; /* C80 */
583 uint64_t phbFirstErrorStatus; /* C88 */
584 uint64_t phbErrorLog0; /* CC0 */
585 uint64_t phbErrorLog1; /* CC8 */
586 uint64_t mmioErrorStatus; /* D00 */
587 uint64_t mmioFirstErrorStatus; /* D08 */
588 uint64_t mmioErrorLog0; /* D40 */
589 uint64_t mmioErrorLog1; /* D48 */
590 uint64_t dma0ErrorStatus; /* D80 */
591 uint64_t dma0FirstErrorStatus; /* D88 */
592 uint64_t dma0ErrorLog0; /* DC0 */
593 uint64_t dma0ErrorLog1; /* DC8 */
594 uint64_t dma1ErrorStatus; /* E00 */
595 uint64_t dma1FirstErrorStatus; /* E08 */
596 uint64_t dma1ErrorLog0; /* E40 */
597 uint64_t dma1ErrorLog1; /* E48 */
598 uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS];
599 uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS];
600};
601
534typedef struct oppanel_line { 602typedef struct oppanel_line {
535 const char * line; 603 const char * line;
536 uint64_t line_len; 604 uint64_t line_len;
537} oppanel_line_t; 605} oppanel_line_t;
538 606
607/* /sys/firmware/opal */
608extern struct kobject *opal_kobj;
609
539/* API functions */ 610/* API functions */
540int64_t opal_console_write(int64_t term_number, int64_t *length, 611int64_t opal_console_write(int64_t term_number, __be64 *length,
541 const uint8_t *buffer); 612 const uint8_t *buffer);
542int64_t opal_console_read(int64_t term_number, int64_t *length, 613int64_t opal_console_read(int64_t term_number, __be64 *length,
543 uint8_t *buffer); 614 uint8_t *buffer);
544int64_t opal_console_write_buffer_space(int64_t term_number, 615int64_t opal_console_write_buffer_space(int64_t term_number,
545 int64_t *length); 616 __be64 *length);
546int64_t opal_rtc_read(uint32_t *year_month_day, 617int64_t opal_rtc_read(__be32 *year_month_day,
547 uint64_t *hour_minute_second_millisecond); 618 __be64 *hour_minute_second_millisecond);
548int64_t opal_rtc_write(uint32_t year_month_day, 619int64_t opal_rtc_write(uint32_t year_month_day,
549 uint64_t hour_minute_second_millisecond); 620 uint64_t hour_minute_second_millisecond);
550int64_t opal_cec_power_down(uint64_t request); 621int64_t opal_cec_power_down(uint64_t request);
551int64_t opal_cec_reboot(void); 622int64_t opal_cec_reboot(void);
552int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset); 623int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
553int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset); 624int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
554int64_t opal_handle_interrupt(uint64_t isn, uint64_t *outstanding_event_mask); 625int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
555int64_t opal_poll_events(uint64_t *outstanding_event_mask); 626int64_t opal_poll_events(__be64 *outstanding_event_mask);
556int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr, 627int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr,
557 uint64_t tce_mem_size); 628 uint64_t tce_mem_size);
558int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr, 629int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
@@ -560,9 +631,9 @@ int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
560int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func, 631int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func,
561 uint64_t offset, uint8_t *data); 632 uint64_t offset, uint8_t *data);
562int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func, 633int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func,
563 uint64_t offset, uint16_t *data); 634 uint64_t offset, __be16 *data);
564int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func, 635int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func,
565 uint64_t offset, uint32_t *data); 636 uint64_t offset, __be32 *data);
566int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func, 637int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func,
567 uint64_t offset, uint8_t data); 638 uint64_t offset, uint8_t data);
568int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func, 639int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
@@ -570,14 +641,14 @@ int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
570int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func, 641int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,
571 uint64_t offset, uint32_t data); 642 uint64_t offset, uint32_t data);
572int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority); 643int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
573int64_t opal_get_xive(uint32_t isn, uint16_t *server, uint8_t *priority); 644int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);
574int64_t opal_register_exception_handler(uint64_t opal_exception, 645int64_t opal_register_exception_handler(uint64_t opal_exception,
575 uint64_t handler_address, 646 uint64_t handler_address,
576 uint64_t glue_cache_line); 647 uint64_t glue_cache_line);
577int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number, 648int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,
578 uint8_t *freeze_state, 649 uint8_t *freeze_state,
579 uint16_t *pci_error_type, 650 __be16 *pci_error_type,
580 uint64_t *phb_status); 651 __be64 *phb_status);
581int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number, 652int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
582 uint64_t eeh_action_token); 653 uint64_t eeh_action_token);
583int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state); 654int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state);
@@ -614,13 +685,13 @@ int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
614int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number, 685int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
615 uint32_t xive_num); 686 uint32_t xive_num);
616int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num, 687int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
617 int32_t *interrupt_source_number); 688 __be32 *interrupt_source_number);
618int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num, 689int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num,
619 uint8_t msi_range, uint32_t *msi_address, 690 uint8_t msi_range, __be32 *msi_address,
620 uint32_t *message_data); 691 __be32 *message_data);
621int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number, 692int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,
622 uint32_t xive_num, uint8_t msi_range, 693 uint32_t xive_num, uint8_t msi_range,
623 uint64_t *msi_address, uint32_t *message_data); 694 __be64 *msi_address, __be32 *message_data);
624int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address); 695int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address);
625int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status); 696int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status);
626int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines); 697int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines);
@@ -642,7 +713,7 @@ int64_t opal_pci_fence_phb(uint64_t phb_id);
642int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope); 713int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope);
643int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action); 714int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
644int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); 715int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
645int64_t opal_get_epow_status(uint64_t *status); 716int64_t opal_get_epow_status(__be64 *status);
646int64_t opal_set_system_attention_led(uint8_t led_action); 717int64_t opal_set_system_attention_led(uint8_t led_action);
647int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, 718int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
648 uint16_t *pci_error_type, uint16_t *severity); 719 uint16_t *pci_error_type, uint16_t *severity);
@@ -656,6 +727,9 @@ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
656 uint32_t addr, uint32_t data, uint32_t sz); 727 uint32_t addr, uint32_t data, uint32_t sz);
657int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, 728int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
658 uint32_t addr, uint32_t *data, uint32_t sz); 729 uint32_t addr, uint32_t *data, uint32_t sz);
730int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
731int64_t opal_manage_flash(uint8_t op);
732int64_t opal_update_flash(uint64_t blk_list);
659 733
660/* Internal functions */ 734/* Internal functions */
661extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); 735extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
@@ -684,6 +758,7 @@ extern int opal_set_rtc_time(struct rtc_time *tm);
684extern void opal_get_rtc_time(struct rtc_time *tm); 758extern void opal_get_rtc_time(struct rtc_time *tm);
685extern unsigned long opal_get_boot_time(void); 759extern unsigned long opal_get_boot_time(void);
686extern void opal_nvram_init(void); 760extern void opal_nvram_init(void);
761extern void opal_flash_init(void);
687 762
688extern int opal_machine_check(struct pt_regs *regs); 763extern int opal_machine_check(struct pt_regs *regs);
689 764
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a5954cebbc55..b6ea9e068c13 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -166,7 +166,7 @@ struct paca_struct {
166 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ 166 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
167 167
168#ifdef CONFIG_KVM_BOOK3S_HANDLER 168#ifdef CONFIG_KVM_BOOK3S_HANDLER
169#ifdef CONFIG_KVM_BOOK3S_PR 169#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
170 /* We use this to store guest state in */ 170 /* We use this to store guest state in */
171 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 171 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
172#endif 172#endif
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index b9f426212d3a..32e4e212b9c1 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -78,7 +78,7 @@ extern unsigned int HPAGE_SHIFT;
78 * 78 *
79 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START 79 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
80 * 80 *
81 * There are two was to determine a physical address from a virtual one: 81 * There are two ways to determine a physical address from a virtual one:
82 * va = pa + PAGE_OFFSET - MEMORY_START 82 * va = pa + PAGE_OFFSET - MEMORY_START
83 * va = pa + KERNELBASE - PHYSICAL_START 83 * va = pa + KERNELBASE - PHYSICAL_START
84 * 84 *
@@ -403,7 +403,7 @@ void arch_free_page(struct page *page, int order);
403 403
404struct vm_area_struct; 404struct vm_area_struct;
405 405
406#ifdef CONFIG_PPC_64K_PAGES 406#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
407typedef pte_t *pgtable_t; 407typedef pte_t *pgtable_t;
408#else 408#else
409typedef struct page *pgtable_t; 409typedef struct page *pgtable_t;
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index f65e27b09bd3..16cb92d215d2 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -91,7 +91,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
91 if (!pte) 91 if (!pte)
92 return NULL; 92 return NULL;
93 page = virt_to_page(pte); 93 page = virt_to_page(pte);
94 pgtable_page_ctor(page); 94 if (!pgtable_page_ctor(page)) {
95 __free_page(page);
96 return NULL;
97 }
95 return page; 98 return page;
96} 99}
97 100
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 46db09414a10..4a191c472867 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -394,6 +394,8 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
394 hpte_slot_array[index] = hidx << 4 | 0x1 << 3; 394 hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
395} 395}
396 396
397struct page *realmode_pfn_to_page(unsigned long pfn);
398
397static inline char *get_hpte_slot_array(pmd_t *pmdp) 399static inline char *get_hpte_slot_array(pmd_t *pmdp)
398{ 400{
399 /* 401 /*
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index d7fe9f5b46d4..3132bb9365f3 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -143,6 +143,8 @@
143#define PPC_INST_LSWX 0x7c00042a 143#define PPC_INST_LSWX 0x7c00042a
144#define PPC_INST_LWARX 0x7c000028 144#define PPC_INST_LWARX 0x7c000028
145#define PPC_INST_LWSYNC 0x7c2004ac 145#define PPC_INST_LWSYNC 0x7c2004ac
146#define PPC_INST_SYNC 0x7c0004ac
147#define PPC_INST_SYNC_MASK 0xfc0007fe
146#define PPC_INST_LXVD2X 0x7c000698 148#define PPC_INST_LXVD2X 0x7c000698
147#define PPC_INST_MCRXR 0x7c000400 149#define PPC_INST_MCRXR 0x7c000400
148#define PPC_INST_MCRXR_MASK 0xfc0007fe 150#define PPC_INST_MCRXR_MASK 0xfc0007fe
@@ -181,6 +183,7 @@
181#define PPC_INST_TLBIVAX 0x7c000624 183#define PPC_INST_TLBIVAX 0x7c000624
182#define PPC_INST_TLBSRX_DOT 0x7c0006a5 184#define PPC_INST_TLBSRX_DOT 0x7c0006a5
183#define PPC_INST_XXLOR 0xf0000510 185#define PPC_INST_XXLOR 0xf0000510
186#define PPC_INST_XXSWAPD 0xf0000250
184#define PPC_INST_XVCPSGNDP 0xf0000780 187#define PPC_INST_XVCPSGNDP 0xf0000780
185#define PPC_INST_TRECHKPT 0x7c0007dd 188#define PPC_INST_TRECHKPT 0x7c0007dd
186#define PPC_INST_TRECLAIM 0x7c00075d 189#define PPC_INST_TRECLAIM 0x7c00075d
@@ -200,6 +203,7 @@
200/* Misc instructions for BPF compiler */ 203/* Misc instructions for BPF compiler */
201#define PPC_INST_LD 0xe8000000 204#define PPC_INST_LD 0xe8000000
202#define PPC_INST_LHZ 0xa0000000 205#define PPC_INST_LHZ 0xa0000000
206#define PPC_INST_LHBRX 0x7c00062c
203#define PPC_INST_LWZ 0x80000000 207#define PPC_INST_LWZ 0x80000000
204#define PPC_INST_STD 0xf8000000 208#define PPC_INST_STD 0xf8000000
205#define PPC_INST_STDU 0xf8000001 209#define PPC_INST_STDU 0xf8000001
@@ -218,7 +222,7 @@
218#define PPC_INST_MULLW 0x7c0001d6 222#define PPC_INST_MULLW 0x7c0001d6
219#define PPC_INST_MULHWU 0x7c000016 223#define PPC_INST_MULHWU 0x7c000016
220#define PPC_INST_MULLI 0x1c000000 224#define PPC_INST_MULLI 0x1c000000
221#define PPC_INST_DIVWU 0x7c0003d6 225#define PPC_INST_DIVWU 0x7c000396
222#define PPC_INST_RLWINM 0x54000000 226#define PPC_INST_RLWINM 0x54000000
223#define PPC_INST_RLDICR 0x78000004 227#define PPC_INST_RLDICR 0x78000004
224#define PPC_INST_SLW 0x7c000030 228#define PPC_INST_SLW 0x7c000030
@@ -344,6 +348,8 @@
344 VSX_XX1((s), a, b)) 348 VSX_XX1((s), a, b))
345#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ 349#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
346 VSX_XX3((t), a, b)) 350 VSX_XX3((t), a, b))
351#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \
352 VSX_XX3((t), a, a))
347#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \ 353#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
348 VSX_XX3((t), (a), (b)))) 354 VSX_XX3((t), (a), (b))))
349 355
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 599545738af3..3c1acc31a092 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -98,123 +98,51 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
98#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) 98#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
99#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) 99#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
100 100
101#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) 101#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
102#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) 102#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
103#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) 103#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
104#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) 104#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
105#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) 105#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
106#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) 106#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
107#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) 107#define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base)
108#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) 108#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
109#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) 109#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
110#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) 110#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
111#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base) 111#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
112#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) 112#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
113 113
114#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b 114#define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b
115#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) 115#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
116#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base) 116#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
117#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base) 117#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
118#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base) 118#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
119#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) 119#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
120#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b 120#define REST_VR(n,b,base) li b,16*(n); lvx n,base,b
121#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) 121#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
122#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base) 122#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
123#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base) 123#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
124#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) 124#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
125#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) 125#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
126 126
127/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in 127#ifdef __BIG_ENDIAN__
128 * thread_struct: 128#define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base)
129 */ 129#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base)
130#define SAVE_FPR_TRANSACT(n, base) stfd n,THREAD_TRANSACT_FPR0+ \ 130#else
131 8*TS_FPRWIDTH*(n)(base) 131#define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \
132#define SAVE_2FPRS_TRANSACT(n, base) SAVE_FPR_TRANSACT(n, base); \ 132 STXVD2X(n,b,base); \
133 SAVE_FPR_TRANSACT(n+1, base) 133 XXSWAPD(n,n)
134#define SAVE_4FPRS_TRANSACT(n, base) SAVE_2FPRS_TRANSACT(n, base); \
135 SAVE_2FPRS_TRANSACT(n+2, base)
136#define SAVE_8FPRS_TRANSACT(n, base) SAVE_4FPRS_TRANSACT(n, base); \
137 SAVE_4FPRS_TRANSACT(n+4, base)
138#define SAVE_16FPRS_TRANSACT(n, base) SAVE_8FPRS_TRANSACT(n, base); \
139 SAVE_8FPRS_TRANSACT(n+8, base)
140#define SAVE_32FPRS_TRANSACT(n, base) SAVE_16FPRS_TRANSACT(n, base); \
141 SAVE_16FPRS_TRANSACT(n+16, base)
142
143#define REST_FPR_TRANSACT(n, base) lfd n,THREAD_TRANSACT_FPR0+ \
144 8*TS_FPRWIDTH*(n)(base)
145#define REST_2FPRS_TRANSACT(n, base) REST_FPR_TRANSACT(n, base); \
146 REST_FPR_TRANSACT(n+1, base)
147#define REST_4FPRS_TRANSACT(n, base) REST_2FPRS_TRANSACT(n, base); \
148 REST_2FPRS_TRANSACT(n+2, base)
149#define REST_8FPRS_TRANSACT(n, base) REST_4FPRS_TRANSACT(n, base); \
150 REST_4FPRS_TRANSACT(n+4, base)
151#define REST_16FPRS_TRANSACT(n, base) REST_8FPRS_TRANSACT(n, base); \
152 REST_8FPRS_TRANSACT(n+8, base)
153#define REST_32FPRS_TRANSACT(n, base) REST_16FPRS_TRANSACT(n, base); \
154 REST_16FPRS_TRANSACT(n+16, base)
155
156
157#define SAVE_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
158 stvx n,b,base
159#define SAVE_2VRS_TRANSACT(n,b,base) SAVE_VR_TRANSACT(n,b,base); \
160 SAVE_VR_TRANSACT(n+1,b,base)
161#define SAVE_4VRS_TRANSACT(n,b,base) SAVE_2VRS_TRANSACT(n,b,base); \
162 SAVE_2VRS_TRANSACT(n+2,b,base)
163#define SAVE_8VRS_TRANSACT(n,b,base) SAVE_4VRS_TRANSACT(n,b,base); \
164 SAVE_4VRS_TRANSACT(n+4,b,base)
165#define SAVE_16VRS_TRANSACT(n,b,base) SAVE_8VRS_TRANSACT(n,b,base); \
166 SAVE_8VRS_TRANSACT(n+8,b,base)
167#define SAVE_32VRS_TRANSACT(n,b,base) SAVE_16VRS_TRANSACT(n,b,base); \
168 SAVE_16VRS_TRANSACT(n+16,b,base)
169
170#define REST_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
171 lvx n,b,base
172#define REST_2VRS_TRANSACT(n,b,base) REST_VR_TRANSACT(n,b,base); \
173 REST_VR_TRANSACT(n+1,b,base)
174#define REST_4VRS_TRANSACT(n,b,base) REST_2VRS_TRANSACT(n,b,base); \
175 REST_2VRS_TRANSACT(n+2,b,base)
176#define REST_8VRS_TRANSACT(n,b,base) REST_4VRS_TRANSACT(n,b,base); \
177 REST_4VRS_TRANSACT(n+4,b,base)
178#define REST_16VRS_TRANSACT(n,b,base) REST_8VRS_TRANSACT(n,b,base); \
179 REST_8VRS_TRANSACT(n+8,b,base)
180#define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \
181 REST_16VRS_TRANSACT(n+16,b,base)
182
183
184#define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
185 STXVD2X(n,R##base,R##b)
186#define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \
187 SAVE_VSR_TRANSACT(n+1,b,base)
188#define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \
189 SAVE_2VSRS_TRANSACT(n+2,b,base)
190#define SAVE_8VSRS_TRANSACT(n,b,base) SAVE_4VSRS_TRANSACT(n,b,base); \
191 SAVE_4VSRS_TRANSACT(n+4,b,base)
192#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base); \
193 SAVE_8VSRS_TRANSACT(n+8,b,base)
194#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \
195 SAVE_16VSRS_TRANSACT(n+16,b,base)
196
197#define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
198 LXVD2X(n,R##base,R##b)
199#define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \
200 REST_VSR_TRANSACT(n+1,b,base)
201#define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \
202 REST_2VSRS_TRANSACT(n+2,b,base)
203#define REST_8VSRS_TRANSACT(n,b,base) REST_4VSRS_TRANSACT(n,b,base); \
204 REST_4VSRS_TRANSACT(n+4,b,base)
205#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base); \
206 REST_8VSRS_TRANSACT(n+8,b,base)
207#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \
208 REST_16VSRS_TRANSACT(n+16,b,base)
209 134
135#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \
136 XXSWAPD(n,n)
137#endif
210/* Save the lower 32 VSRs in the thread VSR region */ 138/* Save the lower 32 VSRs in the thread VSR region */
211#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b) 139#define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b)
212#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) 140#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
213#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) 141#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
214#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) 142#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
215#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) 143#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
216#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) 144#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
217#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b) 145#define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
218#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) 146#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
219#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) 147#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
220#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) 148#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
@@ -478,13 +406,6 @@ BEGIN_FTR_SECTION_NESTED(945) \
478 std ra,TASKTHREADPPR(rb); \ 406 std ra,TASKTHREADPPR(rb); \
479END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945) 407END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
480 408
481#define RESTORE_PPR(ra, rb) \
482BEGIN_FTR_SECTION_NESTED(946) \
483 ld ra,PACACURRENT(r13); \
484 ld rb,TASKTHREADPPR(ra); \
485 mtspr SPRN_PPR,rb; /* Restore PPR */ \
486END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
487
488#endif 409#endif
489 410
490/* 411/*
@@ -832,6 +753,35 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
832#define N_SLINE 68 753#define N_SLINE 68
833#define N_SO 100 754#define N_SO 100
834 755
835#endif /* __ASSEMBLY__ */ 756/*
757 * Create an endian fixup trampoline
758 *
759 * This starts with a "tdi 0,0,0x48" instruction which is
760 * essentially a "trap never", and thus akin to a nop.
761 *
762 * The opcode for this instruction read with the wrong endian
763 * however results in a b . + 8
764 *
765 * So essentially we use that trick to execute the following
766 * trampoline in "reverse endian" if we are running with the
767 * MSR_LE bit set the "wrong" way for whatever endianness the
768 * kernel is built for.
769 */
836 770
771#ifdef CONFIG_PPC_BOOK3E
772#define FIXUP_ENDIAN
773#else
774#define FIXUP_ENDIAN \
775 tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
776 b $+36; /* Skip trampoline if endian is good */ \
777 .long 0x05009f42; /* bcl 20,31,$+4 */ \
778 .long 0xa602487d; /* mflr r10 */ \
779 .long 0x1c004a39; /* addi r10,r10,28 */ \
780 .long 0xa600607d; /* mfmsr r11 */ \
781 .long 0x01006b69; /* xori r11,r11,1 */ \
782 .long 0xa6035a7d; /* mtsrr0 r10 */ \
783 .long 0xa6037b7d; /* mtsrr1 r11 */ \
784 .long 0x2400004c /* rfid */
785#endif /* !CONFIG_PPC_BOOK3E */
786#endif /* __ASSEMBLY__ */
837#endif /* _ASM_POWERPC_PPC_ASM_H */ 787#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index e378cccfca55..fc14a38c7ccf 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -14,8 +14,18 @@
14 14
15#ifdef CONFIG_VSX 15#ifdef CONFIG_VSX
16#define TS_FPRWIDTH 2 16#define TS_FPRWIDTH 2
17
18#ifdef __BIG_ENDIAN__
19#define TS_FPROFFSET 0
20#define TS_VSRLOWOFFSET 1
21#else
22#define TS_FPROFFSET 1
23#define TS_VSRLOWOFFSET 0
24#endif
25
17#else 26#else
18#define TS_FPRWIDTH 1 27#define TS_FPRWIDTH 1
28#define TS_FPROFFSET 0
19#endif 29#endif
20 30
21#ifdef CONFIG_PPC64 31#ifdef CONFIG_PPC64
@@ -142,27 +152,22 @@ typedef struct {
142 unsigned long seg; 152 unsigned long seg;
143} mm_segment_t; 153} mm_segment_t;
144 154
145#define TS_FPROFFSET 0 155#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
146#define TS_VSRLOWOFFSET 1 156#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET]
147#define TS_FPR(i) fpr[i][TS_FPROFFSET]
148#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
149 157
150struct thread_struct { 158/* FP and VSX 0-31 register set */
151 unsigned long ksp; /* Kernel stack pointer */ 159struct thread_fp_state {
152 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ 160 u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
161 u64 fpscr; /* Floating point status */
162};
153 163
154#ifdef CONFIG_PPC64 164/* Complete AltiVec register set including VSCR */
155 unsigned long ksp_vsid; 165struct thread_vr_state {
156#endif 166 vector128 vr[32] __attribute__((aligned(16)));
157 struct pt_regs *regs; /* Pointer to saved register state */ 167 vector128 vscr __attribute__((aligned(16)));
158 mm_segment_t fs; /* for get_fs() validation */ 168};
159#ifdef CONFIG_BOOKE 169
160 /* BookE base exception scratch space; align on cacheline */ 170struct debug_reg {
161 unsigned long normsave[8] ____cacheline_aligned;
162#endif
163#ifdef CONFIG_PPC32
164 void *pgdir; /* root of page-table tree */
165#endif
166#ifdef CONFIG_PPC_ADV_DEBUG_REGS 171#ifdef CONFIG_PPC_ADV_DEBUG_REGS
167 /* 172 /*
168 * The following help to manage the use of Debug Control Registers 173 * The following help to manage the use of Debug Control Registers
@@ -199,13 +204,28 @@ struct thread_struct {
199 unsigned long dvc2; 204 unsigned long dvc2;
200#endif 205#endif
201#endif 206#endif
202 /* FP and VSX 0-31 register set */ 207};
203 double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
204 struct {
205 208
206 unsigned int pad; 209struct thread_struct {
207 unsigned int val; /* Floating point status */ 210 unsigned long ksp; /* Kernel stack pointer */
208 } fpscr; 211
212#ifdef CONFIG_PPC64
213 unsigned long ksp_vsid;
214#endif
215 struct pt_regs *regs; /* Pointer to saved register state */
216 mm_segment_t fs; /* for get_fs() validation */
217#ifdef CONFIG_BOOKE
218 /* BookE base exception scratch space; align on cacheline */
219 unsigned long normsave[8] ____cacheline_aligned;
220#endif
221#ifdef CONFIG_PPC32
222 void *pgdir; /* root of page-table tree */
223 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
224#endif
225 /* Debug Registers */
226 struct debug_reg debug;
227 struct thread_fp_state fp_state;
228 struct thread_fp_state *fp_save_area;
209 int fpexc_mode; /* floating-point exception mode */ 229 int fpexc_mode; /* floating-point exception mode */
210 unsigned int align_ctl; /* alignment handling control */ 230 unsigned int align_ctl; /* alignment handling control */
211#ifdef CONFIG_PPC64 231#ifdef CONFIG_PPC64
@@ -223,10 +243,8 @@ struct thread_struct {
223 struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ 243 struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
224 unsigned long trap_nr; /* last trap # on this thread */ 244 unsigned long trap_nr; /* last trap # on this thread */
225#ifdef CONFIG_ALTIVEC 245#ifdef CONFIG_ALTIVEC
226 /* Complete AltiVec register set */ 246 struct thread_vr_state vr_state;
227 vector128 vr[32] __attribute__((aligned(16))); 247 struct thread_vr_state *vr_save_area;
228 /* AltiVec status */
229 vector128 vscr __attribute__((aligned(16)));
230 unsigned long vrsave; 248 unsigned long vrsave;
231 int used_vr; /* set if process has used altivec */ 249 int used_vr; /* set if process has used altivec */
232#endif /* CONFIG_ALTIVEC */ 250#endif /* CONFIG_ALTIVEC */
@@ -263,13 +281,8 @@ struct thread_struct {
263 * transact_fpr[] is the new set of transactional values. 281 * transact_fpr[] is the new set of transactional values.
264 * VRs work the same way. 282 * VRs work the same way.
265 */ 283 */
266 double transact_fpr[32][TS_FPRWIDTH]; 284 struct thread_fp_state transact_fp;
267 struct { 285 struct thread_vr_state transact_vr;
268 unsigned int pad;
269 unsigned int val; /* Floating point status */
270 } transact_fpscr;
271 vector128 transact_vr[32] __attribute__((aligned(16)));
272 vector128 transact_vscr __attribute__((aligned(16)));
273 unsigned long transact_vrsave; 286 unsigned long transact_vrsave;
274#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 287#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
275#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 288#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -321,11 +334,8 @@ struct thread_struct {
321#else 334#else
322#define INIT_THREAD { \ 335#define INIT_THREAD { \
323 .ksp = INIT_SP, \ 336 .ksp = INIT_SP, \
324 .ksp_limit = INIT_SP_LIMIT, \
325 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 337 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
326 .fs = KERNEL_DS, \ 338 .fs = KERNEL_DS, \
327 .fpr = {{0}}, \
328 .fpscr = { .val = 0, }, \
329 .fpexc_mode = 0, \ 339 .fpexc_mode = 0, \
330 .ppr = INIT_PPR, \ 340 .ppr = INIT_PPR, \
331} 341}
@@ -363,6 +373,11 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
363extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); 373extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
364extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); 374extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
365 375
376extern void load_fp_state(struct thread_fp_state *fp);
377extern void store_fp_state(struct thread_fp_state *fp);
378extern void load_vr_state(struct thread_vr_state *vr);
379extern void store_vr_state(struct thread_vr_state *vr);
380
366static inline unsigned int __unpack_fe01(unsigned long msr_bits) 381static inline unsigned int __unpack_fe01(unsigned long msr_bits)
367{ 382{
368 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); 383 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 7d0c7f3a7171..d977b9b78696 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -1,4 +1,3 @@
1#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
2#ifndef _POWERPC_PROM_H 1#ifndef _POWERPC_PROM_H
3#define _POWERPC_PROM_H 2#define _POWERPC_PROM_H
4#ifdef __KERNEL__ 3#ifdef __KERNEL__
@@ -20,21 +19,17 @@
20#include <asm/irq.h> 19#include <asm/irq.h>
21#include <linux/atomic.h> 20#include <linux/atomic.h>
22 21
23#define HAVE_ARCH_DEVTREE_FIXUPS 22/* These includes should be removed once implicit includes are cleaned up. */
23#include <linux/of.h>
24#include <linux/of_fdt.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/platform_device.h>
24 28
25/* 29/*
26 * OF address retreival & translation 30 * OF address retreival & translation
27 */ 31 */
28 32
29/* Translate a DMA address from device space to CPU space */
30extern u64 of_translate_dma_address(struct device_node *dev,
31 const __be32 *in_addr);
32
33#ifdef CONFIG_PCI
34extern unsigned long pci_address_to_pio(phys_addr_t address);
35#define pci_address_to_pio pci_address_to_pio
36#endif /* CONFIG_PCI */
37
38/* Parse the ibm,dma-window property of an OF node into the busno, phys and 33/* Parse the ibm,dma-window property of an OF node into the busno, phys and
39 * size parameters. 34 * size parameters.
40 */ 35 */
@@ -44,16 +39,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
44 39
45extern void kdump_move_device_tree(void); 40extern void kdump_move_device_tree(void);
46 41
47/* cache lookup */
48struct device_node *of_find_next_cache_node(struct device_node *np);
49
50#ifdef CONFIG_NUMA
51extern int of_node_to_nid(struct device_node *device);
52#else
53static inline int of_node_to_nid(struct device_node *device) { return 0; }
54#endif
55#define of_node_to_nid of_node_to_nid
56
57extern void of_instantiate_rtc(void); 42extern void of_instantiate_rtc(void);
58 43
59extern int of_get_ibm_chip_id(struct device_node *np); 44extern int of_get_ibm_chip_id(struct device_node *np);
@@ -143,14 +128,5 @@ struct of_drconf_cell {
143 */ 128 */
144extern unsigned char ibm_architecture_vec[]; 129extern unsigned char ibm_architecture_vec[];
145 130
146/* These includes are put at the bottom because they may contain things
147 * that are overridden by this file. Ideally they shouldn't be included
148 * by this file, but there are a bunch of .c files that currently depend
149 * on it. Eventually they will be cleaned up. */
150#include <linux/of_fdt.h>
151#include <linux/of_address.h>
152#include <linux/of_irq.h>
153#include <linux/platform_device.h>
154
155#endif /* __KERNEL__ */ 131#endif /* __KERNEL__ */
156#endif /* _POWERPC_PROM_H */ 132#endif /* _POWERPC_PROM_H */
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
index 0156702ba24e..576ad88104cb 100644
--- a/arch/powerpc/include/asm/pte-book3e.h
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -40,7 +40,7 @@
40#define _PAGE_U1 0x010000 40#define _PAGE_U1 0x010000
41#define _PAGE_U0 0x020000 41#define _PAGE_U0 0x020000
42#define _PAGE_ACCESSED 0x040000 42#define _PAGE_ACCESSED 0x040000
43#define _PAGE_LENDIAN 0x080000 43#define _PAGE_ENDIAN 0x080000
44#define _PAGE_GUARDED 0x100000 44#define _PAGE_GUARDED 0x100000
45#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ 45#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
46#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ 46#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 10d1ef016bf1..5c45787d551e 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -115,7 +115,12 @@
115#define MSR_64BIT MSR_SF 115#define MSR_64BIT MSR_SF
116 116
117/* Server variant */ 117/* Server variant */
118#define MSR_ (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) 118#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
119#ifdef __BIG_ENDIAN__
120#define MSR_ __MSR
121#else
122#define MSR_ (__MSR | MSR_LE)
123#endif
119#define MSR_KERNEL (MSR_ | MSR_64BIT) 124#define MSR_KERNEL (MSR_ | MSR_64BIT)
120#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE) 125#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
121#define MSR_USER64 (MSR_USER32 | MSR_64BIT) 126#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
@@ -243,6 +248,7 @@
243#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ 248#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
244#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ 249#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
245#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ 250#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
251#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */
246#define SPRN_SPURR 0x134 /* Scaled PURR */ 252#define SPRN_SPURR 0x134 /* Scaled PURR */
247#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */ 253#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */
248#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */ 254#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */
@@ -283,6 +289,7 @@
283#define LPCR_ISL (1ul << (63-2)) 289#define LPCR_ISL (1ul << (63-2))
284#define LPCR_VC_SH (63-2) 290#define LPCR_VC_SH (63-2)
285#define LPCR_DPFD_SH (63-11) 291#define LPCR_DPFD_SH (63-11)
292#define LPCR_DPFD (7ul << LPCR_DPFD_SH)
286#define LPCR_VRMASD (0x1ful << (63-16)) 293#define LPCR_VRMASD (0x1ful << (63-16))
287#define LPCR_VRMA_L (1ul << (63-12)) 294#define LPCR_VRMA_L (1ul << (63-12))
288#define LPCR_VRMA_LP0 (1ul << (63-15)) 295#define LPCR_VRMA_LP0 (1ul << (63-15))
@@ -299,6 +306,7 @@
299#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ 306#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
300#define LPCR_MER 0x00000800 /* Mediated External Exception */ 307#define LPCR_MER 0x00000800 /* Mediated External Exception */
301#define LPCR_MER_SH 11 308#define LPCR_MER_SH 11
309#define LPCR_TC 0x00000200 /* Translation control */
302#define LPCR_LPES 0x0000000c 310#define LPCR_LPES 0x0000000c
303#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ 311#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
304#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ 312#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
@@ -311,6 +319,10 @@
311#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ 319#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
312#define SPRN_HMER 0x150 /* Hardware m? error recovery */ 320#define SPRN_HMER 0x150 /* Hardware m? error recovery */
313#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ 321#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
322#define SPRN_PCR 0x152 /* Processor compatibility register */
323#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
324#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
325#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
314#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ 326#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
315#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ 327#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
316#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */ 328#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
@@ -420,6 +432,7 @@
420#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ 432#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
421#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ 433#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
422#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ 434#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
435#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH)
423#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ 436#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
424#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ 437#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
425#define HID4_LPID1_SH 0 /* partition ID top 2 bits */ 438#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
@@ -1102,6 +1115,13 @@
1102#define PVR_BE 0x0070 1115#define PVR_BE 0x0070
1103#define PVR_PA6T 0x0090 1116#define PVR_PA6T 0x0090
1104 1117
1118/* "Logical" PVR values defined in PAPR, representing architecture levels */
1119#define PVR_ARCH_204 0x0f000001
1120#define PVR_ARCH_205 0x0f000002
1121#define PVR_ARCH_206 0x0f000003
1122#define PVR_ARCH_206p 0x0f100003
1123#define PVR_ARCH_207 0x0f000004
1124
1105/* Macros for setting and retrieving special purpose registers */ 1125/* Macros for setting and retrieving special purpose registers */
1106#ifndef __ASSEMBLY__ 1126#ifndef __ASSEMBLY__
1107#define mfmsr() ({unsigned long rval; \ 1127#define mfmsr() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index ed8f836da094..2e31aacd8acc 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -381,7 +381,7 @@
381#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ 381#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */
382#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ 382#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
383 383
384#define dbcr_iac_range(task) ((task)->thread.dbcr0) 384#define dbcr_iac_range(task) ((task)->thread.debug.dbcr0)
385#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */ 385#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */
386#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */ 386#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */
387#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */ 387#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */
@@ -395,7 +395,7 @@
395#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */ 395#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */
396#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */ 396#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */
397 397
398#define dbcr_dac(task) ((task)->thread.dbcr1) 398#define dbcr_dac(task) ((task)->thread.debug.dbcr1)
399#define DBCR_DAC1R DBCR1_DAC1R 399#define DBCR_DAC1R DBCR1_DAC1R
400#define DBCR_DAC1W DBCR1_DAC1W 400#define DBCR_DAC1W DBCR1_DAC1W
401#define DBCR_DAC2R DBCR1_DAC2R 401#define DBCR_DAC2R DBCR1_DAC2R
@@ -441,7 +441,7 @@
441#define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */ 441#define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */
442#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ 442#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
443 443
444#define dbcr_dac(task) ((task)->thread.dbcr0) 444#define dbcr_dac(task) ((task)->thread.debug.dbcr0)
445#define DBCR_DAC1R DBCR0_DAC1R 445#define DBCR_DAC1R DBCR0_DAC1R
446#define DBCR_DAC1W DBCR0_DAC1W 446#define DBCR_DAC1W DBCR0_DAC1W
447#define DBCR_DAC2R DBCR0_DAC2R 447#define DBCR_DAC2R DBCR0_DAC2R
@@ -475,7 +475,7 @@
475#define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */ 475#define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */
476#define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */ 476#define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */
477 477
478#define dbcr_iac_range(task) ((task)->thread.dbcr1) 478#define dbcr_iac_range(task) ((task)->thread.debug.dbcr1)
479#define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */ 479#define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */
480#define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */ 480#define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */
481#define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */ 481#define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */
diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h
index 0cabfd7bc2d1..f5cde45b1161 100644
--- a/arch/powerpc/include/asm/scom.h
+++ b/arch/powerpc/include/asm/scom.h
@@ -54,8 +54,8 @@ struct scom_controller {
54 scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count); 54 scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count);
55 void (*unmap)(scom_map_t map); 55 void (*unmap)(scom_map_t map);
56 56
57 u64 (*read)(scom_map_t map, u32 reg); 57 int (*read)(scom_map_t map, u64 reg, u64 *value);
58 void (*write)(scom_map_t map, u32 reg, u64 value); 58 int (*write)(scom_map_t map, u64 reg, u64 value);
59}; 59};
60 60
61extern const struct scom_controller *scom_controller; 61extern const struct scom_controller *scom_controller;
@@ -133,10 +133,18 @@ static inline void scom_unmap(scom_map_t map)
133 * scom_read - Read a SCOM register 133 * scom_read - Read a SCOM register
134 * @map: Result of scom_map 134 * @map: Result of scom_map
135 * @reg: Register index within that map 135 * @reg: Register index within that map
136 * @value: Updated with the value read
137 *
138 * Returns 0 (success) or a negative error code
136 */ 139 */
137static inline u64 scom_read(scom_map_t map, u32 reg) 140static inline int scom_read(scom_map_t map, u64 reg, u64 *value)
138{ 141{
139 return scom_controller->read(map, reg); 142 int rc;
143
144 rc = scom_controller->read(map, reg, value);
145 if (rc)
146 *value = 0xfffffffffffffffful;
147 return rc;
140} 148}
141 149
142/** 150/**
@@ -144,12 +152,15 @@ static inline u64 scom_read(scom_map_t map, u32 reg)
144 * @map: Result of scom_map 152 * @map: Result of scom_map
145 * @reg: Register index within that map 153 * @reg: Register index within that map
146 * @value: Value to write 154 * @value: Value to write
155 *
156 * Returns 0 (success) or a negative error code
147 */ 157 */
148static inline void scom_write(scom_map_t map, u32 reg, u64 value) 158static inline int scom_write(scom_map_t map, u64 reg, u64 value)
149{ 159{
150 scom_controller->write(map, reg, value); 160 return scom_controller->write(map, reg, value);
151} 161}
152 162
163
153#endif /* CONFIG_PPC_SCOM */ 164#endif /* CONFIG_PPC_SCOM */
154#endif /* __ASSEMBLY__ */ 165#endif /* __ASSEMBLY__ */
155#endif /* __KERNEL__ */ 166#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index d3ca85529b8b..703a8412dac2 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -23,6 +23,10 @@ extern void reloc_got2(unsigned long);
23 23
24#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) 24#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
25 25
26void check_for_initrd(void);
27void do_init_bootmem(void);
28void setup_panic(void);
29
26#endif /* !__ASSEMBLY__ */ 30#endif /* !__ASSEMBLY__ */
27 31
28#endif /* _ASM_POWERPC_SETUP_H */ 32#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index 3a7a67a0d006..d89beaba26ff 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -125,7 +125,7 @@
125#define FP_EX_DIVZERO (1 << (31 - 5)) 125#define FP_EX_DIVZERO (1 << (31 - 5))
126#define FP_EX_INEXACT (1 << (31 - 6)) 126#define FP_EX_INEXACT (1 << (31 - 6))
127 127
128#define __FPU_FPSCR (current->thread.fpscr.val) 128#define __FPU_FPSCR (current->thread.fp_state.fpscr)
129 129
130/* We only actually write to the destination register 130/* We only actually write to the destination register
131 * if exceptions signalled (if any) will not trap. 131 * if exceptions signalled (if any) will not trap.
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 93f280e23279..37b7ca39ec9f 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -235,6 +235,7 @@ extern long spu_sys_callback(struct spu_syscall_block *s);
235 235
236/* syscalls implemented in spufs */ 236/* syscalls implemented in spufs */
237struct file; 237struct file;
238struct coredump_params;
238struct spufs_calls { 239struct spufs_calls {
239 long (*create_thread)(const char __user *name, 240 long (*create_thread)(const char __user *name,
240 unsigned int flags, umode_t mode, 241 unsigned int flags, umode_t mode,
@@ -242,7 +243,7 @@ struct spufs_calls {
242 long (*spu_run)(struct file *filp, __u32 __user *unpc, 243 long (*spu_run)(struct file *filp, __u32 __user *unpc,
243 __u32 __user *ustatus); 244 __u32 __user *ustatus);
244 int (*coredump_extra_notes_size)(void); 245 int (*coredump_extra_notes_size)(void);
245 int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset); 246 int (*coredump_extra_notes_write)(struct coredump_params *cprm);
246 void (*notify_spus_active)(void); 247 void (*notify_spus_active)(void);
247 struct module *owner; 248 struct module *owner;
248}; 249};
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index e40010abcaf1..0dffad6bcc84 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -10,7 +10,9 @@
10#define __HAVE_ARCH_STRNCMP 10#define __HAVE_ARCH_STRNCMP
11#define __HAVE_ARCH_STRCAT 11#define __HAVE_ARCH_STRCAT
12#define __HAVE_ARCH_MEMSET 12#define __HAVE_ARCH_MEMSET
13#ifdef __BIG_ENDIAN__
13#define __HAVE_ARCH_MEMCPY 14#define __HAVE_ARCH_MEMCPY
15#endif
14#define __HAVE_ARCH_MEMMOVE 16#define __HAVE_ARCH_MEMMOVE
15#define __HAVE_ARCH_MEMCMP 17#define __HAVE_ARCH_MEMCMP
16#define __HAVE_ARCH_MEMCHR 18#define __HAVE_ARCH_MEMCHR
@@ -22,7 +24,9 @@ extern int strcmp(const char *,const char *);
22extern int strncmp(const char *, const char *, __kernel_size_t); 24extern int strncmp(const char *, const char *, __kernel_size_t);
23extern char * strcat(char *, const char *); 25extern char * strcat(char *, const char *);
24extern void * memset(void *,int,__kernel_size_t); 26extern void * memset(void *,int,__kernel_size_t);
27#ifdef __BIG_ENDIAN__
25extern void * memcpy(void *,const void *,__kernel_size_t); 28extern void * memcpy(void *,const void *,__kernel_size_t);
29#endif
26extern void * memmove(void *,const void *,__kernel_size_t); 30extern void * memmove(void *,const void *,__kernel_size_t);
27extern int memcmp(const void *,const void *,__kernel_size_t); 31extern int memcmp(const void *,const void *,__kernel_size_t);
28extern void * memchr(const void *,int,__kernel_size_t); 32extern void * memchr(const void *,int,__kernel_size_t);
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 2be5618cdec6..9ee12610af02 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -35,6 +35,7 @@ extern void giveup_vsx(struct task_struct *);
35extern void enable_kernel_spe(void); 35extern void enable_kernel_spe(void);
36extern void giveup_spe(struct task_struct *); 36extern void giveup_spe(struct task_struct *);
37extern void load_up_spe(struct task_struct *); 37extern void load_up_spe(struct task_struct *);
38extern void switch_booke_debug_regs(struct thread_struct *new_thread);
38 39
39#ifndef CONFIG_SMP 40#ifndef CONFIG_SMP
40extern void discard_lazy_cpu_state(void); 41extern void discard_lazy_cpu_state(void);
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 23016020915e..75c6ecdb8f37 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -37,6 +37,7 @@ typedef ppc_opcode_t uprobe_opcode_t;
37struct arch_uprobe { 37struct arch_uprobe {
38 union { 38 union {
39 u8 insn[MAX_UINSN_BYTES]; 39 u8 insn[MAX_UINSN_BYTES];
40 u8 ixol[MAX_UINSN_BYTES];
40 u32 ainsn; 41 u32 ainsn;
41 }; 42 };
42}; 43};
@@ -45,11 +46,4 @@ struct arch_uprobe_task {
45 unsigned long saved_trap_nr; 46 unsigned long saved_trap_nr;
46}; 47};
47 48
48extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
49extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
50extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
51extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
52extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
53extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
54extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
55#endif /* _ASM_UPROBES_H */ 49#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index d0b6d4ac6dda..9a5c928bb3c6 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -8,6 +8,8 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <asm/asm-compat.h> 9#include <asm/asm-compat.h>
10 10
11#ifdef __BIG_ENDIAN__
12
11struct word_at_a_time { 13struct word_at_a_time {
12 const unsigned long high_bits, low_bits; 14 const unsigned long high_bits, low_bits;
13}; 15};
@@ -38,4 +40,80 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
38 return (val + c->high_bits) & ~rhs; 40 return (val + c->high_bits) & ~rhs;
39} 41}
40 42
43#else
44
45struct word_at_a_time {
46 const unsigned long one_bits, high_bits;
47};
48
49#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
50
51#ifdef CONFIG_64BIT
52
53/* Alan Modra's little-endian strlen tail for 64-bit */
54#define create_zero_mask(mask) (mask)
55
56static inline unsigned long find_zero(unsigned long mask)
57{
58 unsigned long leading_zero_bits;
59 long trailing_zero_bit_mask;
60
61 asm ("addi %1,%2,-1\n\t"
62 "andc %1,%1,%2\n\t"
63 "popcntd %0,%1"
64 : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
65 : "r" (mask));
66 return leading_zero_bits >> 3;
67}
68
69#else /* 32-bit case */
70
71/*
72 * This is largely generic for little-endian machines, but the
73 * optimal byte mask counting is probably going to be something
74 * that is architecture-specific. If you have a reliably fast
75 * bit count instruction, that might be better than the multiply
76 * and shift, for example.
77 */
78
79/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
80static inline long count_masked_bytes(long mask)
81{
82 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
83 long a = (0x0ff0001+mask) >> 23;
84 /* Fix the 1 for 00 case */
85 return a & mask;
86}
87
88static inline unsigned long create_zero_mask(unsigned long bits)
89{
90 bits = (bits - 1) & ~bits;
91 return bits >> 7;
92}
93
94static inline unsigned long find_zero(unsigned long mask)
95{
96 return count_masked_bytes(mask);
97}
98
99#endif
100
101/* Return nonzero if it has a zero */
102static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
103{
104 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
105 *bits = mask;
106 return mask;
107}
108
109static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
110{
111 return bits;
112}
113
114/* The mask we created is directly usable as a bytemask */
115#define zero_bytemask(mask) (mask)
116
117#endif
118
41#endif /* _ASM_WORD_AT_A_TIME_H */ 119#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
index c82eb12a5b18..0abb97f3be10 100644
--- a/arch/powerpc/include/asm/xor.h
+++ b/arch/powerpc/include/asm/xor.h
@@ -1 +1,68 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) IBM Corporation, 2012
17 *
18 * Author: Anton Blanchard <anton@au.ibm.com>
19 */
20#ifndef _ASM_POWERPC_XOR_H
21#define _ASM_POWERPC_XOR_H
22
23#ifdef CONFIG_ALTIVEC
24
25#include <asm/cputable.h>
26
27void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
28 unsigned long *v2_in);
29void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
30 unsigned long *v2_in, unsigned long *v3_in);
31void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
32 unsigned long *v2_in, unsigned long *v3_in,
33 unsigned long *v4_in);
34void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
35 unsigned long *v2_in, unsigned long *v3_in,
36 unsigned long *v4_in, unsigned long *v5_in);
37
38static struct xor_block_template xor_block_altivec = {
39 .name = "altivec",
40 .do_2 = xor_altivec_2,
41 .do_3 = xor_altivec_3,
42 .do_4 = xor_altivec_4,
43 .do_5 = xor_altivec_5,
44};
45
46#define XOR_SPEED_ALTIVEC() \
47 do { \
48 if (cpu_has_feature(CPU_FTR_ALTIVEC)) \
49 xor_speed(&xor_block_altivec); \
50 } while (0)
51#else
52#define XOR_SPEED_ALTIVEC()
53#endif
54
55/* Also try the generic routines. */
1#include <asm-generic/xor.h> 56#include <asm-generic/xor.h>
57
58#undef XOR_TRY_TEMPLATES
59#define XOR_TRY_TEMPLATES \
60do { \
61 xor_speed(&xor_block_8regs); \
62 xor_speed(&xor_block_8regs_p); \
63 xor_speed(&xor_block_32regs); \
64 xor_speed(&xor_block_32regs_p); \
65 XOR_SPEED_ALTIVEC(); \
66} while (0)
67
68#endif /* _ASM_POWERPC_XOR_H */
diff --git a/arch/powerpc/include/uapi/asm/byteorder.h b/arch/powerpc/include/uapi/asm/byteorder.h
index aa6cc4fac965..ca931d074000 100644
--- a/arch/powerpc/include/uapi/asm/byteorder.h
+++ b/arch/powerpc/include/uapi/asm/byteorder.h
@@ -7,6 +7,10 @@
7 * as published by the Free Software Foundation; either version 7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version. 8 * 2 of the License, or (at your option) any later version.
9 */ 9 */
10#ifdef __LITTLE_ENDIAN__
11#include <linux/byteorder/little_endian.h>
12#else
10#include <linux/byteorder/big_endian.h> 13#include <linux/byteorder/big_endian.h>
14#endif
11 15
12#endif /* _ASM_POWERPC_BYTEORDER_H */ 16#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e9ff90..6836ec79a830 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -27,6 +27,7 @@
27#define __KVM_HAVE_PPC_SMT 27#define __KVM_HAVE_PPC_SMT
28#define __KVM_HAVE_IRQCHIP 28#define __KVM_HAVE_IRQCHIP
29#define __KVM_HAVE_IRQ_LINE 29#define __KVM_HAVE_IRQ_LINE
30#define __KVM_HAVE_GUEST_DEBUG
30 31
31struct kvm_regs { 32struct kvm_regs {
32 __u64 pc; 33 __u64 pc;
@@ -269,7 +270,24 @@ struct kvm_fpu {
269 __u64 fpr[32]; 270 __u64 fpr[32];
270}; 271};
271 272
273/*
274 * Defines for h/w breakpoint, watchpoint (read, write or both) and
275 * software breakpoint.
276 * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
277 * for KVM_DEBUG_EXIT.
278 */
279#define KVMPPC_DEBUG_NONE 0x0
280#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
281#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
282#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
272struct kvm_debug_exit_arch { 283struct kvm_debug_exit_arch {
284 __u64 address;
285 /*
286 * exiting to userspace because of h/w breakpoint, watchpoint
287 * (read, write or both) and software breakpoint.
288 */
289 __u32 status;
290 __u32 reserved;
273}; 291};
274 292
275/* for KVM_SET_GUEST_DEBUG */ 293/* for KVM_SET_GUEST_DEBUG */
@@ -281,10 +299,6 @@ struct kvm_guest_debug_arch {
281 * Type denotes h/w breakpoint, read watchpoint, write 299 * Type denotes h/w breakpoint, read watchpoint, write
282 * watchpoint or watchpoint (both read and write). 300 * watchpoint or watchpoint (both read and write).
283 */ 301 */
284#define KVMPPC_DEBUG_NONE 0x0
285#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
286#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
287#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
288 __u32 type; 302 __u32 type;
289 __u32 reserved; 303 __u32 reserved;
290 } bp[16]; 304 } bp[16];
@@ -429,6 +443,11 @@ struct kvm_get_htab_header {
429#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10) 443#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
430#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11) 444#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
431#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12) 445#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
446#define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
447#define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
448#define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
449#define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
450#define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
432 451
433#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18) 452#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
434#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19) 453#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
@@ -499,6 +518,65 @@ struct kvm_get_htab_header {
499#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a) 518#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
500#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b) 519#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
501 520
521/* Timebase offset */
522#define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
523
524/* POWER8 registers */
525#define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d)
526#define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e)
527#define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f)
528#define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0)
529#define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1)
530#define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2)
531#define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3)
532#define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4)
533#define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5)
534#define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6)
535#define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7)
536#define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8)
537#define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9)
538#define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa)
539#define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab)
540#define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac)
541#define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad)
542#define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae)
543#define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf)
544#define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0)
545#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
546#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
547#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
548
549#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
550#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
551#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
552
553/* Architecture compatibility level */
554#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
555
556/* Transactional Memory checkpointed state:
557 * This is all GPRs, all VSX regs and a subset of SPRs
558 */
559#define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000)
560/* TM GPRs */
561#define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0)
562#define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n))
563#define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f)
564/* TM VSX */
565#define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20)
566#define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n))
567#define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f)
568/* TM SPRS */
569#define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60)
570#define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61)
571#define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62)
572#define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63)
573#define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64)
574#define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65)
575#define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66)
576#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
577#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
578#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
579
502/* PPC64 eXternal Interrupt Controller Specification */ 580/* PPC64 eXternal Interrupt Controller Specification */
503#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ 581#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
504 582
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index a6d74467c9ed..fa698324a1fd 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -83,4 +83,6 @@
83 83
84#define SO_BUSY_POLL 46 84#define SO_BUSY_POLL 46
85 85
86#define SO_MAX_PACING_RATE 47
87
86#endif /* _ASM_POWERPC_SOCKET_H */ 88#endif /* _ASM_POWERPC_SOCKET_H */