aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/bitops.h5
-rw-r--r--arch/powerpc/include/asm/cache.h14
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h1
-rw-r--r--arch/powerpc/include/asm/code-patching.h7
-rw-r--r--arch/powerpc/include/asm/cputable.h12
-rw-r--r--arch/powerpc/include/asm/eeh.h14
-rw-r--r--arch/powerpc/include/asm/exception-64s.h21
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h2
-rw-r--r--arch/powerpc/include/asm/hardirq.h3
-rw-r--r--arch/powerpc/include/asm/io.h16
-rw-r--r--arch/powerpc/include/asm/iommu.h54
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/include/asm/mce.h197
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h13
-rw-r--r--arch/powerpc/include/asm/mmu.h21
-rw-r--r--arch/powerpc/include/asm/opal.h108
-rw-r--r--arch/powerpc/include/asm/paca.h16
-rw-r--r--arch/powerpc/include/asm/pgtable.h66
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h14
-rw-r--r--arch/powerpc/include/asm/processor.h8
-rw-r--r--arch/powerpc/include/asm/ps3.h1
-rw-r--r--arch/powerpc/include/asm/pte-hash64.h8
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h10
-rw-r--r--arch/powerpc/include/asm/spinlock.h12
-rw-r--r--arch/powerpc/include/asm/thread_info.h9
-rw-r--r--arch/powerpc/include/asm/tm.h1
-rw-r--r--arch/powerpc/include/asm/topology.h10
-rw-r--r--arch/powerpc/include/asm/vio.h1
30 files changed, 597 insertions, 52 deletions
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 910194e9a1e2..a5e9a7d494d8 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -46,6 +46,11 @@
46#include <asm/asm-compat.h> 46#include <asm/asm-compat.h>
47#include <asm/synch.h> 47#include <asm/synch.h>
48 48
49/* PPC bit number conversion */
50#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
51#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
52#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
53
49/* 54/*
50 * clear_bit doesn't imply a memory barrier 55 * clear_bit doesn't imply a memory barrier
51 */ 56 */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 9e495c9a6a88..ed0afc1e44a4 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -41,8 +41,20 @@ struct ppc64_caches {
41extern struct ppc64_caches ppc64_caches; 41extern struct ppc64_caches ppc64_caches;
42#endif /* __powerpc64__ && ! __ASSEMBLY__ */ 42#endif /* __powerpc64__ && ! __ASSEMBLY__ */
43 43
44#if !defined(__ASSEMBLY__) 44#if defined(__ASSEMBLY__)
45/*
46 * For a snooping icache, we still need a dummy icbi to purge all the
47 * prefetched instructions from the ifetch buffers. We also need a sync
48 * before the icbi to order the the actual stores to memory that might
49 * have modified instructions with the icbi.
50 */
51#define PURGE_PREFETCHED_INS \
52 sync; \
53 icbi 0,r3; \
54 sync; \
55 isync
45 56
57#else
46#define __read_mostly __attribute__((__section__(".data..read_mostly"))) 58#define __read_mostly __attribute__((__section__(".data..read_mostly")))
47 59
48#ifdef CONFIG_6xx 60#ifdef CONFIG_6xx
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index e245aab7f191..d463c68fe7f0 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -300,6 +300,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
300 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 300 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
301 cmpxchg_local((ptr), (o), (n)); \ 301 cmpxchg_local((ptr), (o), (n)); \
302 }) 302 })
303#define cmpxchg64_relaxed cmpxchg64_local
303#else 304#else
304#include <asm-generic/cmpxchg-local.h> 305#include <asm-generic/cmpxchg-local.h>
305#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 306#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index a6f8c7a5cbb7..97e02f985df8 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -34,6 +34,13 @@ int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
34unsigned long branch_target(const unsigned int *instr); 34unsigned long branch_target(const unsigned int *instr);
35unsigned int translate_branch(const unsigned int *dest, 35unsigned int translate_branch(const unsigned int *dest,
36 const unsigned int *src); 36 const unsigned int *src);
37#ifdef CONFIG_PPC_BOOK3E_64
38void __patch_exception(int exc, unsigned long addr);
39#define patch_exception(exc, name) do { \
40 extern unsigned int name; \
41 __patch_exception((exc), (unsigned long)&name); \
42} while (0)
43#endif
37 44
38static inline unsigned long ppc_function_entry(void *func) 45static inline unsigned long ppc_function_entry(void *func)
39{ 46{
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 0d4939ba48e7..617cc767c076 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -90,6 +90,18 @@ struct cpu_spec {
90 * if the error is fatal, 1 if it was fully recovered and 0 to 90 * if the error is fatal, 1 if it was fully recovered and 0 to
91 * pass up (not CPU originated) */ 91 * pass up (not CPU originated) */
92 int (*machine_check)(struct pt_regs *regs); 92 int (*machine_check)(struct pt_regs *regs);
93
94 /*
95 * Processor specific early machine check handler which is
96 * called in real mode to handle SLB and TLB errors.
97 */
98 long (*machine_check_early)(struct pt_regs *regs);
99
100 /*
101 * Processor specific routine to flush tlbs.
102 */
103 void (*flush_tlb)(unsigned long inval_selector);
104
93}; 105};
94 106
95extern struct cpu_spec *cur_cpu_spec; 107extern struct cpu_spec *cur_cpu_spec;
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index d3e5e9bc8f94..9e39ceb1d19f 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -90,7 +90,8 @@ struct eeh_pe {
90#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */ 90#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */
91#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */ 91#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */
92 92
93#define EEH_DEV_SYSFS (1 << 8) /* Sysfs created */ 93#define EEH_DEV_NO_HANDLER (1 << 8) /* No error handler */
94#define EEH_DEV_SYSFS (1 << 9) /* Sysfs created */
94 95
95struct eeh_dev { 96struct eeh_dev {
96 int mode; /* EEH mode */ 97 int mode; /* EEH mode */
@@ -117,6 +118,16 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
117 return edev ? edev->pdev : NULL; 118 return edev ? edev->pdev : NULL;
118} 119}
119 120
121/* Return values from eeh_ops::next_error */
122enum {
123 EEH_NEXT_ERR_NONE = 0,
124 EEH_NEXT_ERR_INF,
125 EEH_NEXT_ERR_FROZEN_PE,
126 EEH_NEXT_ERR_FENCED_PHB,
127 EEH_NEXT_ERR_DEAD_PHB,
128 EEH_NEXT_ERR_DEAD_IOC
129};
130
120/* 131/*
121 * The struct is used to trace the registered EEH operation 132 * The struct is used to trace the registered EEH operation
122 * callback functions. Actually, those operation callback 133 * callback functions. Actually, those operation callback
@@ -157,6 +168,7 @@ struct eeh_ops {
157 int (*read_config)(struct device_node *dn, int where, int size, u32 *val); 168 int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
158 int (*write_config)(struct device_node *dn, int where, int size, u32 val); 169 int (*write_config)(struct device_node *dn, int where, int size, u32 val);
159 int (*next_error)(struct eeh_pe **pe); 170 int (*next_error)(struct eeh_pe **pe);
171 int (*restore_config)(struct device_node *dn);
160}; 172};
161 173
162extern struct eeh_ops *eeh_ops; 174extern struct eeh_ops *eeh_ops;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 243ce69ad685..66830618cc19 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -301,9 +301,12 @@ do_kvm_##n: \
301 beq 4f; /* if from kernel mode */ \ 301 beq 4f; /* if from kernel mode */ \
302 ACCOUNT_CPU_USER_ENTRY(r9, r10); \ 302 ACCOUNT_CPU_USER_ENTRY(r9, r10); \
303 SAVE_PPR(area, r9, r10); \ 303 SAVE_PPR(area, r9, r10); \
3044: std r2,GPR2(r1); /* save r2 in stackframe */ \ 3044: EXCEPTION_PROLOG_COMMON_2(area) \
305 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 305 EXCEPTION_PROLOG_COMMON_3(n) \
306 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 306 ACCOUNT_STOLEN_TIME
307
308/* Save original regs values from save area to stack frame. */
309#define EXCEPTION_PROLOG_COMMON_2(area) \
307 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ 310 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
308 ld r10,area+EX_R10(r13); \ 311 ld r10,area+EX_R10(r13); \
309 std r9,GPR9(r1); \ 312 std r9,GPR9(r1); \
@@ -318,11 +321,16 @@ do_kvm_##n: \
318 ld r10,area+EX_CFAR(r13); \ 321 ld r10,area+EX_CFAR(r13); \
319 std r10,ORIG_GPR3(r1); \ 322 std r10,ORIG_GPR3(r1); \
320 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 323 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
324 GET_CTR(r10, area); \
325 std r10,_CTR(r1);
326
327#define EXCEPTION_PROLOG_COMMON_3(n) \
328 std r2,GPR2(r1); /* save r2 in stackframe */ \
329 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
330 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
321 mflr r9; /* Get LR, later save to stack */ \ 331 mflr r9; /* Get LR, later save to stack */ \
322 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 332 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
323 std r9,_LINK(r1); \ 333 std r9,_LINK(r1); \
324 GET_CTR(r10, area); \
325 std r10,_CTR(r1); \
326 lbz r10,PACASOFTIRQEN(r13); \ 334 lbz r10,PACASOFTIRQEN(r13); \
327 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 335 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
328 std r10,SOFTE(r1); \ 336 std r10,SOFTE(r1); \
@@ -332,8 +340,7 @@ do_kvm_##n: \
332 li r10,0; \ 340 li r10,0; \
333 ld r11,exception_marker@toc(r2); \ 341 ld r11,exception_marker@toc(r2); \
334 std r10,RESULT(r1); /* clear regs->result */ \ 342 std r10,RESULT(r1); /* clear regs->result */ \
335 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ 343 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
336 ACCOUNT_STOLEN_TIME
337 344
338/* 345/*
339 * Exception vectors. 346 * Exception vectors.
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 420b45368fcf..067fb0dca549 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -285,7 +285,7 @@ struct fsl_lbc_ctrl {
285 /* device info */ 285 /* device info */
286 struct device *dev; 286 struct device *dev;
287 struct fsl_lbc_regs __iomem *regs; 287 struct fsl_lbc_regs __iomem *regs;
288 int irq; 288 int irq[2];
289 wait_queue_head_t irq_wait; 289 wait_queue_head_t irq_wait;
290 spinlock_t lock; 290 spinlock_t lock;
291 void *nand; 291 void *nand;
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 3bdcfce2c42a..418fb654370d 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -6,7 +6,8 @@
6 6
7typedef struct { 7typedef struct {
8 unsigned int __softirq_pending; 8 unsigned int __softirq_pending;
9 unsigned int timer_irqs; 9 unsigned int timer_irqs_event;
10 unsigned int timer_irqs_others;
10 unsigned int pmu_irqs; 11 unsigned int pmu_irqs;
11 unsigned int mce_exceptions; 12 unsigned int mce_exceptions;
12 unsigned int spurious_irqs; 13 unsigned int spurious_irqs;
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 575fbf81fad0..97d3869991ca 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -191,8 +191,24 @@ DEF_MMIO_OUT_D(out_le32, 32, stw);
191 191
192#endif /* __BIG_ENDIAN */ 192#endif /* __BIG_ENDIAN */
193 193
194/*
195 * Cache inhibitied accessors for use in real mode, you don't want to use these
196 * unless you know what you're doing.
197 *
198 * NB. These use the cpu byte ordering.
199 */
200DEF_MMIO_OUT_X(out_rm8, 8, stbcix);
201DEF_MMIO_OUT_X(out_rm16, 16, sthcix);
202DEF_MMIO_OUT_X(out_rm32, 32, stwcix);
203DEF_MMIO_IN_X(in_rm8, 8, lbzcix);
204DEF_MMIO_IN_X(in_rm16, 16, lhzcix);
205DEF_MMIO_IN_X(in_rm32, 32, lwzcix);
206
194#ifdef __powerpc64__ 207#ifdef __powerpc64__
195 208
209DEF_MMIO_OUT_X(out_rm64, 64, stdcix);
210DEF_MMIO_IN_X(in_rm64, 64, ldcix);
211
196#ifdef __BIG_ENDIAN__ 212#ifdef __BIG_ENDIAN__
197DEF_MMIO_OUT_D(out_be64, 64, std); 213DEF_MMIO_OUT_D(out_be64, 64, std);
198DEF_MMIO_IN_D(in_be64, 64, ld); 214DEF_MMIO_IN_D(in_be64, 64, ld);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index c34656a8925e..f7a8036579b5 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -30,22 +30,19 @@
30#include <asm/machdep.h> 30#include <asm/machdep.h>
31#include <asm/types.h> 31#include <asm/types.h>
32 32
33#define IOMMU_PAGE_SHIFT 12 33#define IOMMU_PAGE_SHIFT_4K 12
34#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT) 34#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
35#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1)) 35#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
36#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE) 36#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
37
38#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
39#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
40#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
37 41
38/* Boot time flags */ 42/* Boot time flags */
39extern int iommu_is_off; 43extern int iommu_is_off;
40extern int iommu_force_on; 44extern int iommu_force_on;
41 45
42/* Pure 2^n version of get_order */
43static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
44{
45 return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
46}
47
48
49/* 46/*
50 * IOMAP_MAX_ORDER defines the largest contiguous block 47 * IOMAP_MAX_ORDER defines the largest contiguous block
51 * of dma space we can get. IOMAP_MAX_ORDER = 13 48 * of dma space we can get. IOMAP_MAX_ORDER = 13
@@ -76,11 +73,20 @@ struct iommu_table {
76 struct iommu_pool large_pool; 73 struct iommu_pool large_pool;
77 struct iommu_pool pools[IOMMU_NR_POOLS]; 74 struct iommu_pool pools[IOMMU_NR_POOLS];
78 unsigned long *it_map; /* A simple allocation bitmap for now */ 75 unsigned long *it_map; /* A simple allocation bitmap for now */
76 unsigned long it_page_shift;/* table iommu page size */
79#ifdef CONFIG_IOMMU_API 77#ifdef CONFIG_IOMMU_API
80 struct iommu_group *it_group; 78 struct iommu_group *it_group;
81#endif 79#endif
82}; 80};
83 81
82/* Pure 2^n version of get_order */
83static inline __attribute_const__
84int get_iommu_order(unsigned long size, struct iommu_table *tbl)
85{
86 return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
87}
88
89
84struct scatterlist; 90struct scatterlist;
85 91
86static inline void set_iommu_table_base(struct device *dev, void *base) 92static inline void set_iommu_table_base(struct device *dev, void *base)
@@ -101,8 +107,34 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
101 */ 107 */
102extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, 108extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
103 int nid); 109 int nid);
110#ifdef CONFIG_IOMMU_API
104extern void iommu_register_group(struct iommu_table *tbl, 111extern void iommu_register_group(struct iommu_table *tbl,
105 int pci_domain_number, unsigned long pe_num); 112 int pci_domain_number, unsigned long pe_num);
113extern int iommu_add_device(struct device *dev);
114extern void iommu_del_device(struct device *dev);
115#else
116static inline void iommu_register_group(struct iommu_table *tbl,
117 int pci_domain_number,
118 unsigned long pe_num)
119{
120}
121
122static inline int iommu_add_device(struct device *dev)
123{
124 return 0;
125}
126
127static inline void iommu_del_device(struct device *dev)
128{
129}
130#endif /* !CONFIG_IOMMU_API */
131
132static inline void set_iommu_table_base_and_group(struct device *dev,
133 void *base)
134{
135 set_iommu_table_base(dev, base);
136 iommu_add_device(dev);
137}
106 138
107extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 139extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
108 struct scatterlist *sglist, int nelems, 140 struct scatterlist *sglist, int nelems,
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 1bd92fd43cfb..1503d8c7c41b 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -74,6 +74,7 @@
74#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39 74#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
75#define BOOKE_INTERRUPT_HV_SYSCALL 40 75#define BOOKE_INTERRUPT_HV_SYSCALL 40
76#define BOOKE_INTERRUPT_HV_PRIV 41 76#define BOOKE_INTERRUPT_HV_PRIV 41
77#define BOOKE_INTERRUPT_LRAT_ERROR 42
77 78
78/* book3s */ 79/* book3s */
79 80
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 844c28de7ec0..d0a2a2f99564 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -132,8 +132,6 @@ struct slb_shadow {
132 } save_area[SLB_NUM_BOLTED]; 132 } save_area[SLB_NUM_BOLTED];
133} ____cacheline_aligned; 133} ____cacheline_aligned;
134 134
135extern struct slb_shadow slb_shadow[];
136
137/* 135/*
138 * Layout of entries in the hypervisor's dispatch trace log buffer. 136 * Layout of entries in the hypervisor's dispatch trace log buffer.
139 */ 137 */
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
new file mode 100644
index 000000000000..8e99edf6d966
--- /dev/null
+++ b/arch/powerpc/include/asm/mce.h
@@ -0,0 +1,197 @@
1/*
2 * Machine check exception header file.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
20 */
21
22#ifndef __ASM_PPC64_MCE_H__
23#define __ASM_PPC64_MCE_H__
24
25#include <linux/bitops.h>
26
27/*
28 * Machine Check bits on power7 and power8
29 */
30#define P7_SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42)) /* P8 too */
31
32/* SRR1 bits for machine check (On Power7 and Power8) */
33#define P7_SRR1_MC_IFETCH(srr1) ((srr1) & PPC_BITMASK(43, 45)) /* P8 too */
34
35#define P7_SRR1_MC_IFETCH_UE (0x1 << PPC_BITLSHIFT(45)) /* P8 too */
36#define P7_SRR1_MC_IFETCH_SLB_PARITY (0x2 << PPC_BITLSHIFT(45)) /* P8 too */
37#define P7_SRR1_MC_IFETCH_SLB_MULTIHIT (0x3 << PPC_BITLSHIFT(45)) /* P8 too */
38#define P7_SRR1_MC_IFETCH_SLB_BOTH (0x4 << PPC_BITLSHIFT(45))
39#define P7_SRR1_MC_IFETCH_TLB_MULTIHIT (0x5 << PPC_BITLSHIFT(45)) /* P8 too */
40#define P7_SRR1_MC_IFETCH_UE_TLB_RELOAD (0x6 << PPC_BITLSHIFT(45)) /* P8 too */
41#define P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL (0x7 << PPC_BITLSHIFT(45))
42
43/* SRR1 bits for machine check (On Power8) */
44#define P8_SRR1_MC_IFETCH_ERAT_MULTIHIT (0x4 << PPC_BITLSHIFT(45))
45
46/* DSISR bits for machine check (On Power7 and Power8) */
47#define P7_DSISR_MC_UE (PPC_BIT(48)) /* P8 too */
48#define P7_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) /* P8 too */
49#define P7_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) /* P8 too */
50#define P7_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) /* P8 too */
51#define P7_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) /* P8 too */
52#define P7_DSISR_MC_SLB_MULTIHIT (PPC_BIT(56)) /* P8 too */
53#define P7_DSISR_MC_SLB_MULTIHIT_PARITY (PPC_BIT(57)) /* P8 too */
54
55/*
56 * DSISR bits for machine check (Power8) in addition to above.
57 * Secondary DERAT Multihit
58 */
59#define P8_DSISR_MC_ERAT_MULTIHIT_SEC (PPC_BIT(54))
60
61/* SLB error bits */
62#define P7_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_ERAT_MULTIHIT | \
63 P7_DSISR_MC_SLB_PARITY_MFSLB | \
64 P7_DSISR_MC_SLB_MULTIHIT | \
65 P7_DSISR_MC_SLB_MULTIHIT_PARITY)
66
67#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
68 P8_DSISR_MC_ERAT_MULTIHIT_SEC)
69enum MCE_Version {
70 MCE_V1 = 1,
71};
72
73enum MCE_Severity {
74 MCE_SEV_NO_ERROR = 0,
75 MCE_SEV_WARNING = 1,
76 MCE_SEV_ERROR_SYNC = 2,
77 MCE_SEV_FATAL = 3,
78};
79
80enum MCE_Disposition {
81 MCE_DISPOSITION_RECOVERED = 0,
82 MCE_DISPOSITION_NOT_RECOVERED = 1,
83};
84
85enum MCE_Initiator {
86 MCE_INITIATOR_UNKNOWN = 0,
87 MCE_INITIATOR_CPU = 1,
88};
89
90enum MCE_ErrorType {
91 MCE_ERROR_TYPE_UNKNOWN = 0,
92 MCE_ERROR_TYPE_UE = 1,
93 MCE_ERROR_TYPE_SLB = 2,
94 MCE_ERROR_TYPE_ERAT = 3,
95 MCE_ERROR_TYPE_TLB = 4,
96};
97
98enum MCE_UeErrorType {
99 MCE_UE_ERROR_INDETERMINATE = 0,
100 MCE_UE_ERROR_IFETCH = 1,
101 MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
102 MCE_UE_ERROR_LOAD_STORE = 3,
103 MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
104};
105
106enum MCE_SlbErrorType {
107 MCE_SLB_ERROR_INDETERMINATE = 0,
108 MCE_SLB_ERROR_PARITY = 1,
109 MCE_SLB_ERROR_MULTIHIT = 2,
110};
111
112enum MCE_EratErrorType {
113 MCE_ERAT_ERROR_INDETERMINATE = 0,
114 MCE_ERAT_ERROR_PARITY = 1,
115 MCE_ERAT_ERROR_MULTIHIT = 2,
116};
117
118enum MCE_TlbErrorType {
119 MCE_TLB_ERROR_INDETERMINATE = 0,
120 MCE_TLB_ERROR_PARITY = 1,
121 MCE_TLB_ERROR_MULTIHIT = 2,
122};
123
124struct machine_check_event {
125 enum MCE_Version version:8; /* 0x00 */
126 uint8_t in_use; /* 0x01 */
127 enum MCE_Severity severity:8; /* 0x02 */
128 enum MCE_Initiator initiator:8; /* 0x03 */
129 enum MCE_ErrorType error_type:8; /* 0x04 */
130 enum MCE_Disposition disposition:8; /* 0x05 */
131 uint8_t reserved_1[2]; /* 0x06 */
132 uint64_t gpr3; /* 0x08 */
133 uint64_t srr0; /* 0x10 */
134 uint64_t srr1; /* 0x18 */
135 union { /* 0x20 */
136 struct {
137 enum MCE_UeErrorType ue_error_type:8;
138 uint8_t effective_address_provided;
139 uint8_t physical_address_provided;
140 uint8_t reserved_1[5];
141 uint64_t effective_address;
142 uint64_t physical_address;
143 uint8_t reserved_2[8];
144 } ue_error;
145
146 struct {
147 enum MCE_SlbErrorType slb_error_type:8;
148 uint8_t effective_address_provided;
149 uint8_t reserved_1[6];
150 uint64_t effective_address;
151 uint8_t reserved_2[16];
152 } slb_error;
153
154 struct {
155 enum MCE_EratErrorType erat_error_type:8;
156 uint8_t effective_address_provided;
157 uint8_t reserved_1[6];
158 uint64_t effective_address;
159 uint8_t reserved_2[16];
160 } erat_error;
161
162 struct {
163 enum MCE_TlbErrorType tlb_error_type:8;
164 uint8_t effective_address_provided;
165 uint8_t reserved_1[6];
166 uint64_t effective_address;
167 uint8_t reserved_2[16];
168 } tlb_error;
169 } u;
170};
171
172struct mce_error_info {
173 enum MCE_ErrorType error_type:8;
174 union {
175 enum MCE_UeErrorType ue_error_type:8;
176 enum MCE_SlbErrorType slb_error_type:8;
177 enum MCE_EratErrorType erat_error_type:8;
178 enum MCE_TlbErrorType tlb_error_type:8;
179 } u;
180 uint8_t reserved[2];
181};
182
183#define MAX_MC_EVT 100
184
185/* Release flags for get_mce_event() */
186#define MCE_EVENT_RELEASE true
187#define MCE_EVENT_DONTRELEASE false
188
189extern void save_mce_event(struct pt_regs *regs, long handled,
190 struct mce_error_info *mce_err, uint64_t addr);
191extern int get_mce_event(struct machine_check_event *mce, bool release);
192extern void release_mce_event(void);
193extern void machine_check_queue_event(void);
194extern void machine_check_print_event_info(struct machine_check_event *evt);
195extern uint64_t get_mce_fault_addr(struct machine_check_event *evt);
196
197#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 936db360790a..89b785d16846 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -286,8 +286,21 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
286extern int mmu_linear_psize; 286extern int mmu_linear_psize;
287extern int mmu_vmemmap_psize; 287extern int mmu_vmemmap_psize;
288 288
289struct tlb_core_data {
290 /* For software way selection, as on Freescale TLB1 */
291 u8 esel_next, esel_max, esel_first;
292
293 /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */
294 u8 lock;
295};
296
289#ifdef CONFIG_PPC64 297#ifdef CONFIG_PPC64
290extern unsigned long linear_map_top; 298extern unsigned long linear_map_top;
299extern int book3e_htw_mode;
300
301#define PPC_HTW_NONE 0
302#define PPC_HTW_IBM 1
303#define PPC_HTW_E6500 2
291 304
292/* 305/*
293 * 64-bit booke platforms don't load the tlb in the tlb miss handler code. 306 * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 691fd8aca939..f8d1d6dcf7db 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -180,16 +180,17 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
180#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ 180#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
181#define MMU_PAGE_256K 4 181#define MMU_PAGE_256K 4
182#define MMU_PAGE_1M 5 182#define MMU_PAGE_1M 5
183#define MMU_PAGE_4M 6 183#define MMU_PAGE_2M 6
184#define MMU_PAGE_8M 7 184#define MMU_PAGE_4M 7
185#define MMU_PAGE_16M 8 185#define MMU_PAGE_8M 8
186#define MMU_PAGE_64M 9 186#define MMU_PAGE_16M 9
187#define MMU_PAGE_256M 10 187#define MMU_PAGE_64M 10
188#define MMU_PAGE_1G 11 188#define MMU_PAGE_256M 11
189#define MMU_PAGE_16G 12 189#define MMU_PAGE_1G 12
190#define MMU_PAGE_64G 13 190#define MMU_PAGE_16G 13
191 191#define MMU_PAGE_64G 14
192#define MMU_PAGE_COUNT 14 192
193#define MMU_PAGE_COUNT 15
193 194
194#if defined(CONFIG_PPC_STD_MMU_64) 195#if defined(CONFIG_PPC_STD_MMU_64)
195/* 64-bit classic hash table MMU */ 196/* 64-bit classic hash table MMU */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 7bdcf340016c..40157e2ca691 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -33,6 +33,28 @@ struct opal_takeover_args {
33 u64 rd_loc; /* r11 */ 33 u64 rd_loc; /* r11 */
34}; 34};
35 35
36/*
37 * SG entry
38 *
39 * WARNING: The current implementation requires each entry
40 * to represent a block that is 4k aligned *and* each block
41 * size except the last one in the list to be as well.
42 */
43struct opal_sg_entry {
44 void *data;
45 long length;
46};
47
48/* sg list */
49struct opal_sg_list {
50 unsigned long num_entries;
51 struct opal_sg_list *next;
52 struct opal_sg_entry entry[];
53};
54
55/* We calculate number of sg entries based on PAGE_SIZE */
56#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
57
36extern long opal_query_takeover(u64 *hal_size, u64 *hal_align); 58extern long opal_query_takeover(u64 *hal_size, u64 *hal_align);
37 59
38extern long opal_do_takeover(struct opal_takeover_args *args); 60extern long opal_do_takeover(struct opal_takeover_args *args);
@@ -132,6 +154,9 @@ extern int opal_enter_rtas(struct rtas_args *args,
132#define OPAL_FLASH_VALIDATE 76 154#define OPAL_FLASH_VALIDATE 76
133#define OPAL_FLASH_MANAGE 77 155#define OPAL_FLASH_MANAGE 77
134#define OPAL_FLASH_UPDATE 78 156#define OPAL_FLASH_UPDATE 78
157#define OPAL_GET_MSG 85
158#define OPAL_CHECK_ASYNC_COMPLETION 86
159#define OPAL_SYNC_HOST_REBOOT 87
135 160
136#ifndef __ASSEMBLY__ 161#ifndef __ASSEMBLY__
137 162
@@ -211,7 +236,16 @@ enum OpalPendingState {
211 OPAL_EVENT_ERROR_LOG = 0x40, 236 OPAL_EVENT_ERROR_LOG = 0x40,
212 OPAL_EVENT_EPOW = 0x80, 237 OPAL_EVENT_EPOW = 0x80,
213 OPAL_EVENT_LED_STATUS = 0x100, 238 OPAL_EVENT_LED_STATUS = 0x100,
214 OPAL_EVENT_PCI_ERROR = 0x200 239 OPAL_EVENT_PCI_ERROR = 0x200,
240 OPAL_EVENT_MSG_PENDING = 0x800,
241};
242
243enum OpalMessageType {
244 OPAL_MSG_ASYNC_COMP = 0,
245 OPAL_MSG_MEM_ERR,
246 OPAL_MSG_EPOW,
247 OPAL_MSG_SHUTDOWN,
248 OPAL_MSG_TYPE_MAX,
215}; 249};
216 250
217/* Machine check related definitions */ 251/* Machine check related definitions */
@@ -311,12 +345,16 @@ enum OpalMveEnableAction {
311 OPAL_ENABLE_MVE = 1 345 OPAL_ENABLE_MVE = 1
312}; 346};
313 347
314enum OpalPciResetAndReinitScope { 348enum OpalPciResetScope {
315 OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3, 349 OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,
316 OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5, 350 OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,
317 OPAL_PCI_IODA_TABLE_RESET = 6, 351 OPAL_PCI_IODA_TABLE_RESET = 6,
318}; 352};
319 353
354enum OpalPciReinitScope {
355 OPAL_REINIT_PCI_DEV = 1000
356};
357
320enum OpalPciResetState { 358enum OpalPciResetState {
321 OPAL_DEASSERT_RESET = 0, 359 OPAL_DEASSERT_RESET = 0,
322 OPAL_ASSERT_RESET = 1 360 OPAL_ASSERT_RESET = 1
@@ -356,6 +394,12 @@ enum OpalLPCAddressType {
356 OPAL_LPC_FW = 2, 394 OPAL_LPC_FW = 2,
357}; 395};
358 396
397struct opal_msg {
398 uint32_t msg_type;
399 uint32_t reserved;
400 uint64_t params[8];
401};
402
359struct opal_machine_check_event { 403struct opal_machine_check_event {
360 enum OpalMCE_Version version:8; /* 0x00 */ 404 enum OpalMCE_Version version:8; /* 0x00 */
361 uint8_t in_use; /* 0x01 */ 405 uint8_t in_use; /* 0x01 */
@@ -404,6 +448,58 @@ struct opal_machine_check_event {
404 } u; 448 } u;
405}; 449};
406 450
451/* FSP memory errors handling */
452enum OpalMemErr_Version {
453 OpalMemErr_V1 = 1,
454};
455
456enum OpalMemErrType {
457 OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
458 OPAL_MEM_ERR_TYPE_DYN_DALLOC,
459 OPAL_MEM_ERR_TYPE_SCRUB,
460};
461
462/* Memory Reilience error type */
463enum OpalMemErr_ResilErrType {
464 OPAL_MEM_RESILIENCE_CE = 0,
465 OPAL_MEM_RESILIENCE_UE,
466 OPAL_MEM_RESILIENCE_UE_SCRUB,
467};
468
469/* Dynamic Memory Deallocation type */
470enum OpalMemErr_DynErrType {
471 OPAL_MEM_DYNAMIC_DEALLOC = 0,
472};
473
474/* OpalMemoryErrorData->flags */
475#define OPAL_MEM_CORRECTED_ERROR 0x0001
476#define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002
477#define OPAL_MEM_ACK_REQUIRED 0x8000
478
479struct OpalMemoryErrorData {
480 enum OpalMemErr_Version version:8; /* 0x00 */
481 enum OpalMemErrType type:8; /* 0x01 */
482 uint16_t flags; /* 0x02 */
483 uint8_t reserved_1[4]; /* 0x04 */
484
485 union {
486 /* Memory Resilience corrected/uncorrected error info */
487 struct {
488 enum OpalMemErr_ResilErrType resil_err_type:8;
489 uint8_t reserved_1[7];
490 uint64_t physical_address_start;
491 uint64_t physical_address_end;
492 } resilience;
493 /* Dynamic memory deallocation error info */
494 struct {
495 enum OpalMemErr_DynErrType dyn_err_type:8;
496 uint8_t reserved_1[7];
497 uint64_t physical_address_start;
498 uint64_t physical_address_end;
499 } dyn_dealloc;
500 } u;
501};
502
407enum { 503enum {
408 OPAL_P7IOC_DIAG_TYPE_NONE = 0, 504 OPAL_P7IOC_DIAG_TYPE_NONE = 0,
409 OPAL_P7IOC_DIAG_TYPE_RGC = 1, 505 OPAL_P7IOC_DIAG_TYPE_RGC = 1,
@@ -710,7 +806,7 @@ int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer,
710int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer, 806int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer,
711 uint64_t diag_buffer_len); 807 uint64_t diag_buffer_len);
712int64_t opal_pci_fence_phb(uint64_t phb_id); 808int64_t opal_pci_fence_phb(uint64_t phb_id);
713int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope); 809int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
714int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action); 810int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
715int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); 811int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
716int64_t opal_get_epow_status(__be64 *status); 812int64_t opal_get_epow_status(__be64 *status);
@@ -731,6 +827,10 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
731int64_t opal_manage_flash(uint8_t op); 827int64_t opal_manage_flash(uint8_t op);
732int64_t opal_update_flash(uint64_t blk_list); 828int64_t opal_update_flash(uint64_t blk_list);
733 829
830int64_t opal_get_msg(uint64_t buffer, size_t size);
831int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
832int64_t opal_sync_host_reboot(void);
833
734/* Internal functions */ 834/* Internal functions */
735extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); 835extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
736 836
@@ -744,6 +844,8 @@ extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
744 int depth, void *data); 844 int depth, void *data);
745 845
746extern int opal_notifier_register(struct notifier_block *nb); 846extern int opal_notifier_register(struct notifier_block *nb);
847extern int opal_message_notifier_register(enum OpalMessageType msg_type,
848 struct notifier_block *nb);
747extern void opal_notifier_enable(void); 849extern void opal_notifier_enable(void);
748extern void opal_notifier_disable(void); 850extern void opal_notifier_disable(void);
749extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); 851extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index b6ea9e068c13..9c5dbc3833fb 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -16,7 +16,6 @@
16 16
17#ifdef CONFIG_PPC64 17#ifdef CONFIG_PPC64
18 18
19#include <linux/init.h>
20#include <asm/types.h> 19#include <asm/types.h>
21#include <asm/lppaca.h> 20#include <asm/lppaca.h>
22#include <asm/mmu.h> 21#include <asm/mmu.h>
@@ -113,6 +112,10 @@ struct paca_struct {
113 /* Keep pgd in the same cacheline as the start of extlb */ 112 /* Keep pgd in the same cacheline as the start of extlb */
114 pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */ 113 pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
115 pgd_t *kernel_pgd; /* Kernel PGD */ 114 pgd_t *kernel_pgd; /* Kernel PGD */
115
116 /* Shared by all threads of a core -- points to tcd of first thread */
117 struct tlb_core_data *tcd_ptr;
118
116 /* We can have up to 3 levels of reentrancy in the TLB miss handler */ 119 /* We can have up to 3 levels of reentrancy in the TLB miss handler */
117 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)]; 120 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)];
118 u64 exmc[8]; /* used for machine checks */ 121 u64 exmc[8]; /* used for machine checks */
@@ -123,6 +126,8 @@ struct paca_struct {
123 void *mc_kstack; 126 void *mc_kstack;
124 void *crit_kstack; 127 void *crit_kstack;
125 void *dbg_kstack; 128 void *dbg_kstack;
129
130 struct tlb_core_data tcd;
126#endif /* CONFIG_PPC_BOOK3E */ 131#endif /* CONFIG_PPC_BOOK3E */
127 132
128 mm_context_t context; 133 mm_context_t context;
@@ -152,6 +157,15 @@ struct paca_struct {
152 */ 157 */
153 struct opal_machine_check_event *opal_mc_evt; 158 struct opal_machine_check_event *opal_mc_evt;
154#endif 159#endif
160#ifdef CONFIG_PPC_BOOK3S_64
161 /* Exclusive emergency stack pointer for machine check exception. */
162 void *mc_emergency_sp;
163 /*
164 * Flag to check whether we are in machine check early handler
165 * and already using emergency stack.
166 */
167 u16 in_mce;
168#endif
155 169
156 /* Stuff for accurate time accounting */ 170 /* Stuff for accurate time accounting */
157 u64 user_time; /* accumulated usermode TB ticks */ 171 u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 7d6eacf249cf..b999ca318985 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -3,6 +3,7 @@
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
6#include <linux/mmdebug.h>
6#include <asm/processor.h> /* For TASK_SIZE */ 7#include <asm/processor.h> /* For TASK_SIZE */
7#include <asm/mmu.h> 8#include <asm/mmu.h>
8#include <asm/page.h> 9#include <asm/page.h>
@@ -33,10 +34,73 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
33static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 34static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
34static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 35static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
35static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 36static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
36static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
37static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } 37static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
38static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } 38static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
39 39
40#ifdef CONFIG_NUMA_BALANCING
41
42static inline int pte_present(pte_t pte)
43{
44 return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA);
45}
46
47#define pte_numa pte_numa
48static inline int pte_numa(pte_t pte)
49{
50 return (pte_val(pte) &
51 (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
52}
53
54#define pte_mknonnuma pte_mknonnuma
55static inline pte_t pte_mknonnuma(pte_t pte)
56{
57 pte_val(pte) &= ~_PAGE_NUMA;
58 pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED;
59 return pte;
60}
61
62#define pte_mknuma pte_mknuma
63static inline pte_t pte_mknuma(pte_t pte)
64{
65 /*
66 * We should not set _PAGE_NUMA on non present ptes. Also clear the
67 * present bit so that hash_page will return 1 and we collect this
68 * as numa fault.
69 */
70 if (pte_present(pte)) {
71 pte_val(pte) |= _PAGE_NUMA;
72 pte_val(pte) &= ~_PAGE_PRESENT;
73 } else
74 VM_BUG_ON(1);
75 return pte;
76}
77
78#define pmd_numa pmd_numa
79static inline int pmd_numa(pmd_t pmd)
80{
81 return pte_numa(pmd_pte(pmd));
82}
83
84#define pmd_mknonnuma pmd_mknonnuma
85static inline pmd_t pmd_mknonnuma(pmd_t pmd)
86{
87 return pte_pmd(pte_mknonnuma(pmd_pte(pmd)));
88}
89
90#define pmd_mknuma pmd_mknuma
91static inline pmd_t pmd_mknuma(pmd_t pmd)
92{
93 return pte_pmd(pte_mknuma(pmd_pte(pmd)));
94}
95
96# else
97
98static inline int pte_present(pte_t pte)
99{
100 return pte_val(pte) & _PAGE_PRESENT;
101}
102#endif /* CONFIG_NUMA_BALANCING */
103
40/* Conversion functions: convert a page and protection to a page entry, 104/* Conversion functions: convert a page and protection to a page entry,
41 * and a page entry and page directory to the page they refer to. 105 * and a page entry and page directory to the page they refer to.
42 * 106 *
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index f595b98079ee..6586a40a46ce 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -4,7 +4,6 @@
4#ifndef _ASM_POWERPC_PPC_ASM_H 4#ifndef _ASM_POWERPC_PPC_ASM_H
5#define _ASM_POWERPC_PPC_ASM_H 5#define _ASM_POWERPC_PPC_ASM_H
6 6
7#include <linux/init.h>
8#include <linux/stringify.h> 7#include <linux/stringify.h>
9#include <asm/asm-compat.h> 8#include <asm/asm-compat.h>
10#include <asm/processor.h> 9#include <asm/processor.h>
@@ -295,6 +294,11 @@ n:
295 * you want to access various offsets within it). On ppc32 this is 294 * you want to access various offsets within it). On ppc32 this is
296 * identical to LOAD_REG_IMMEDIATE. 295 * identical to LOAD_REG_IMMEDIATE.
297 * 296 *
297 * LOAD_REG_ADDR_PIC(rn, name)
298 * Loads the address of label 'name' into register 'run'. Use this when
299 * the kernel doesn't run at the linked or relocated address. Please
300 * note that this macro will clobber the lr register.
301 *
298 * LOAD_REG_ADDRBASE(rn, name) 302 * LOAD_REG_ADDRBASE(rn, name)
299 * ADDROFF(name) 303 * ADDROFF(name)
300 * LOAD_REG_ADDRBASE loads part of the address of label 'name' into 304 * LOAD_REG_ADDRBASE loads part of the address of label 'name' into
@@ -305,6 +309,14 @@ n:
305 * LOAD_REG_ADDRBASE(rX, name) 309 * LOAD_REG_ADDRBASE(rX, name)
306 * ld rY,ADDROFF(name)(rX) 310 * ld rY,ADDROFF(name)(rX)
307 */ 311 */
312
313/* Be careful, this will clobber the lr register. */
314#define LOAD_REG_ADDR_PIC(reg, name) \
315 bl 0f; \
3160: mflr reg; \
317 addis reg,reg,(name - 0b)@ha; \
318 addi reg,reg,(name - 0b)@l;
319
308#ifdef __powerpc64__ 320#ifdef __powerpc64__
309#define LOAD_REG_IMMEDIATE(reg,expr) \ 321#define LOAD_REG_IMMEDIATE(reg,expr) \
310 lis reg,(expr)@highest; \ 322 lis reg,(expr)@highest; \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index fc14a38c7ccf..8ca20ac28dc2 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -256,6 +256,8 @@ struct thread_struct {
256 unsigned long evr[32]; /* upper 32-bits of SPE regs */ 256 unsigned long evr[32]; /* upper 32-bits of SPE regs */
257 u64 acc; /* Accumulator */ 257 u64 acc; /* Accumulator */
258 unsigned long spefscr; /* SPE & eFP status */ 258 unsigned long spefscr; /* SPE & eFP status */
259 unsigned long spefscr_last; /* SPEFSCR value on last prctl
260 call or trap return */
259 int used_spe; /* set if process has used spe */ 261 int used_spe; /* set if process has used spe */
260#endif /* CONFIG_SPE */ 262#endif /* CONFIG_SPE */
261#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 263#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -317,7 +319,9 @@ struct thread_struct {
317 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) 319 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
318 320
319#ifdef CONFIG_SPE 321#ifdef CONFIG_SPE
320#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, 322#define SPEFSCR_INIT \
323 .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
324 .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
321#else 325#else
322#define SPEFSCR_INIT 326#define SPEFSCR_INIT
323#endif 327#endif
@@ -373,6 +377,8 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
373extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); 377extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
374extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); 378extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
375 379
380extern void fp_enable(void);
381extern void vec_enable(void);
376extern void load_fp_state(struct thread_fp_state *fp); 382extern void load_fp_state(struct thread_fp_state *fp);
377extern void store_fp_state(struct thread_fp_state *fp); 383extern void store_fp_state(struct thread_fp_state *fp);
378extern void load_vr_state(struct thread_vr_state *vr); 384extern void load_vr_state(struct thread_vr_state *vr);
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index 678a7c1d9cb8..a1bc7e758422 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -21,7 +21,6 @@
21#if !defined(_ASM_POWERPC_PS3_H) 21#if !defined(_ASM_POWERPC_PS3_H)
22#define _ASM_POWERPC_PS3_H 22#define _ASM_POWERPC_PS3_H
23 23
24#include <linux/init.h>
25#include <linux/types.h> 24#include <linux/types.h>
26#include <linux/device.h> 25#include <linux/device.h>
27#include <asm/cell-pmu.h> 26#include <asm/cell-pmu.h>
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h
index 0419eeb53274..2505d8eab15c 100644
--- a/arch/powerpc/include/asm/pte-hash64.h
+++ b/arch/powerpc/include/asm/pte-hash64.h
@@ -19,7 +19,7 @@
19#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 19#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
20#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 20#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
21#define _PAGE_GUARDED 0x0008 21#define _PAGE_GUARDED 0x0008
22#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ 22/* We can derive Memory coherence from _PAGE_NO_CACHE */
23#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ 23#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
24#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ 24#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
25#define _PAGE_DIRTY 0x0080 /* C: page changed */ 25#define _PAGE_DIRTY 0x0080 /* C: page changed */
@@ -27,6 +27,12 @@
27#define _PAGE_RW 0x0200 /* software: user write access allowed */ 27#define _PAGE_RW 0x0200 /* software: user write access allowed */
28#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ 28#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
29 29
30/*
31 * Used for tracking numa faults
32 */
33#define _PAGE_NUMA 0x00000010 /* Gather numa placement stats */
34
35
30/* No separate kernel read-only */ 36/* No separate kernel read-only */
31#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ 37#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
32#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW 38#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fa8388ed94c5..62b114e079cf 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1075,6 +1075,8 @@
1075#define PVR_8560 0x80200000 1075#define PVR_8560 0x80200000
1076#define PVR_VER_E500V1 0x8020 1076#define PVR_VER_E500V1 0x8020
1077#define PVR_VER_E500V2 0x8021 1077#define PVR_VER_E500V2 0x8021
1078#define PVR_VER_E6500 0x8040
1079
1078/* 1080/*
1079 * For the 8xx processors, all of them report the same PVR family for 1081 * For the 8xx processors, all of them report the same PVR family for
1080 * the PowerPC core. The various versions of these processors must be 1082 * the PowerPC core. The various versions of these processors must be
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 2e31aacd8acc..163c3b05a76e 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -101,6 +101,7 @@
101#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ 101#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
102#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ 102#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
103#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ 103#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
104#define SPRN_IVOR42 0x1B4 /* Interrupt Vector Offset Register 42 */
104#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */ 105#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
105#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */ 106#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
106#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */ 107#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
@@ -170,6 +171,7 @@
170#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */ 171#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */
171#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */ 172#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
172#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */ 173#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */
174#define SPRN_PWRMGTCR0 0x3FB /* Power management control register 0 */
173#define SPRN_SVR 0x3FF /* System Version Register */ 175#define SPRN_SVR 0x3FF /* System Version Register */
174 176
175/* 177/*
@@ -216,6 +218,14 @@
216#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */ 218#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
217#define CCR1_TCS 0x00000080 /* Timer Clock Select */ 219#define CCR1_TCS 0x00000080 /* Timer Clock Select */
218 220
221/* Bit definitions for PWRMGTCR0. */
222#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */
223#define PWRMGTCR0_PW20_ENT_SHIFT 8
224#define PWRMGTCR0_PW20_ENT 0x3F00
225#define PWRMGTCR0_AV_IDLE_PD_EN (1 << 22) /* Altivec idle enable */
226#define PWRMGTCR0_AV_IDLE_CNT_SHIFT 16
227#define PWRMGTCR0_AV_IDLE_CNT 0x3F0000
228
219/* Bit definitions for the MCSR. */ 229/* Bit definitions for the MCSR. */
220#define MCSR_MCS 0x80000000 /* Machine Check Summary */ 230#define MCSR_MCS 0x80000000 /* Machine Check Summary */
221#define MCSR_IB 0x40000000 /* Instruction PLB Error */ 231#define MCSR_IB 0x40000000 /* Instruction PLB Error */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f6e78d63fb6a..35aa339410bd 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -30,8 +30,6 @@
30 30
31#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ 31#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
32 32
33#define arch_spin_is_locked(x) ((x)->slock != 0)
34
35#ifdef CONFIG_PPC64 33#ifdef CONFIG_PPC64
36/* use 0x800000yy when locked, where yy == CPU number */ 34/* use 0x800000yy when locked, where yy == CPU number */
37#ifdef __BIG_ENDIAN__ 35#ifdef __BIG_ENDIAN__
@@ -56,6 +54,16 @@
56#define SYNC_IO 54#define SYNC_IO
57#endif 55#endif
58 56
57static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
58{
59 return lock.slock == 0;
60}
61
62static inline int arch_spin_is_locked(arch_spinlock_t *lock)
63{
64 return !arch_spin_value_unlocked(*lock);
65}
66
59/* 67/*
60 * This returns the old value in the lock, so we succeeded 68 * This returns the old value in the lock, so we succeeded
61 * in getting the lock if the return value is 0. 69 * in getting the lock if the return value is 0.
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 9854c564ac52..b034ecdb7c74 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -91,8 +91,7 @@ static inline struct thread_info *current_thread_info(void)
91#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling 91#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
92 TIF_NEED_RESCHED */ 92 TIF_NEED_RESCHED */
93#define TIF_32BIT 4 /* 32 bit binary */ 93#define TIF_32BIT 4 /* 32 bit binary */
94#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */ 94#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
95#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
96#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 95#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
97#define TIF_SINGLESTEP 8 /* singlestepping active */ 96#define TIF_SINGLESTEP 8 /* singlestepping active */
98#define TIF_NOHZ 9 /* in adaptive nohz mode */ 97#define TIF_NOHZ 9 /* in adaptive nohz mode */
@@ -115,8 +114,7 @@ static inline struct thread_info *current_thread_info(void)
115#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 114#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 115#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
117#define _TIF_32BIT (1<<TIF_32BIT) 116#define _TIF_32BIT (1<<TIF_32BIT)
118#define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK) 117#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
119#define _TIF_PERFMON_CTXSW (1<<TIF_PERFMON_CTXSW)
120#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 118#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
121#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) 119#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
122#define _TIF_SECCOMP (1<<TIF_SECCOMP) 120#define _TIF_SECCOMP (1<<TIF_SECCOMP)
@@ -132,7 +130,8 @@ static inline struct thread_info *current_thread_info(void)
132 _TIF_NOHZ) 130 _TIF_NOHZ)
133 131
134#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 132#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
135 _TIF_NOTIFY_RESUME | _TIF_UPROBE) 133 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
134 _TIF_RESTORE_TM)
136#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) 135#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
137 136
138/* Bits in local_flags */ 137/* Bits in local_flags */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 9dfbc34bdbf5..0c9f8b74dd97 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -15,6 +15,7 @@ extern void do_load_up_transact_altivec(struct thread_struct *thread);
15extern void tm_enable(void); 15extern void tm_enable(void);
16extern void tm_reclaim(struct thread_struct *thread, 16extern void tm_reclaim(struct thread_struct *thread,
17 unsigned long orig_msr, uint8_t cause); 17 unsigned long orig_msr, uint8_t cause);
18extern void tm_reclaim_current(uint8_t cause);
18extern void tm_recheckpoint(struct thread_struct *thread, 19extern void tm_recheckpoint(struct thread_struct *thread,
19 unsigned long orig_msr); 20 unsigned long orig_msr);
20extern void tm_abort(uint8_t cause); 21extern void tm_abort(uint8_t cause);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 89e3ef2496ac..d0b5fca6b077 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -22,7 +22,15 @@ struct device_node;
22 22
23static inline int cpu_to_node(int cpu) 23static inline int cpu_to_node(int cpu)
24{ 24{
25 return numa_cpu_lookup_table[cpu]; 25 int nid;
26
27 nid = numa_cpu_lookup_table[cpu];
28
29 /*
30 * During early boot, the numa-cpu lookup table might not have been
31 * setup for all CPUs yet. In such cases, default to node 0.
32 */
33 return (nid < 0) ? 0 : nid;
26} 34}
27 35
28#define parent_node(node) (node) 36#define parent_node(node) (node)
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 68d0cc998b1b..4f9b7ca0710f 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -15,7 +15,6 @@
15#define _ASM_POWERPC_VIO_H 15#define _ASM_POWERPC_VIO_H
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17 17
18#include <linux/init.h>
19#include <linux/errno.h> 18#include <linux/errno.h>
20#include <linux/device.h> 19#include <linux/device.h>
21#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>