aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/agp.h4
-rw-r--r--arch/powerpc/include/asm/bitops.h196
-rw-r--r--arch/powerpc/include/asm/cell-regs.h11
-rw-r--r--arch/powerpc/include/asm/cputhreads.h16
-rw-r--r--arch/powerpc/include/asm/device.h10
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h323
-rw-r--r--arch/powerpc/include/asm/exception-64e.h205
-rw-r--r--arch/powerpc/include/asm/exception-64s.h (renamed from arch/powerpc/include/asm/exception.h)25
-rw-r--r--arch/powerpc/include/asm/hardirq.h30
-rw-r--r--arch/powerpc/include/asm/hw_irq.h27
-rw-r--r--arch/powerpc/include/asm/iommu.h10
-rw-r--r--arch/powerpc/include/asm/irq.h7
-rw-r--r--arch/powerpc/include/asm/kvm_host.h4
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/include/asm/mman.h2
-rw-r--r--arch/powerpc/include/asm/mmu-40x.h3
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h6
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h208
-rw-r--r--arch/powerpc/include/asm/mmu-hash32.h16
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h22
-rw-r--r--arch/powerpc/include/asm/mmu.h46
-rw-r--r--arch/powerpc/include/asm/mmu_context.h15
-rw-r--r--arch/powerpc/include/asm/nvram.h3
-rw-r--r--arch/powerpc/include/asm/paca.h25
-rw-r--r--arch/powerpc/include/asm/page.h4
-rw-r--r--arch/powerpc/include/asm/page_64.h10
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h40
-rw-r--r--arch/powerpc/include/asm/pci.h12
-rw-r--r--arch/powerpc/include/asm/perf_event.h (renamed from arch/powerpc/include/asm/perf_counter.h)22
-rw-r--r--arch/powerpc/include/asm/pgalloc.h46
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h9
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h67
-rw-r--r--arch/powerpc/include/asm/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/pmc.h16
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h6
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h1
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h26
-rw-r--r--arch/powerpc/include/asm/pte-40x.h2
-rw-r--r--arch/powerpc/include/asm/pte-44x.h2
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h1
-rw-r--r--arch/powerpc/include/asm/pte-book3e.h84
-rw-r--r--arch/powerpc/include/asm/pte-common.h25
-rw-r--r--arch/powerpc/include/asm/pte-fsl-booke.h9
-rw-r--r--arch/powerpc/include/asm/pte-hash32.h1
-rw-r--r--arch/powerpc/include/asm/qe.h1
-rw-r--r--arch/powerpc/include/asm/reg.h141
-rw-r--r--arch/powerpc/include/asm/reg_booke.h50
-rw-r--r--arch/powerpc/include/asm/setup.h2
-rw-r--r--arch/powerpc/include/asm/smp.h10
-rw-r--r--arch/powerpc/include/asm/socket.h3
-rw-r--r--arch/powerpc/include/asm/spinlock.h20
-rw-r--r--arch/powerpc/include/asm/swiotlb.h8
-rw-r--r--arch/powerpc/include/asm/systbl.h6
-rw-r--r--arch/powerpc/include/asm/tlb.h38
-rw-r--r--arch/powerpc/include/asm/tlbflush.h11
-rw-r--r--arch/powerpc/include/asm/topology.h9
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/asm/vdso.h3
60 files changed, 1159 insertions, 761 deletions
diff --git a/arch/powerpc/include/asm/agp.h b/arch/powerpc/include/asm/agp.h
index 86455c4c31ee..416e12c2d505 100644
--- a/arch/powerpc/include/asm/agp.h
+++ b/arch/powerpc/include/asm/agp.h
@@ -8,10 +8,6 @@
8#define unmap_page_from_agp(page) 8#define unmap_page_from_agp(page)
9#define flush_agp_cache() mb() 9#define flush_agp_cache() mb()
10 10
11/* Convert a physical address to an address suitable for the GART. */
12#define phys_to_gart(x) (x)
13#define gart_to_phys(x) (x)
14
15/* GATT allocation. Returns/accepts GATT kernel virtual address. */ 11/* GATT allocation. Returns/accepts GATT kernel virtual address. */
16#define alloc_gatt_pages(order) \ 12#define alloc_gatt_pages(order) \
17 ((char *)__get_free_pages(GFP_KERNEL, (order))) 13 ((char *)__get_free_pages(GFP_KERNEL, (order)))
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 897eade3afbe..56f2f2ea5631 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -56,174 +56,102 @@
56#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) 56#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
57#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) 57#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
58 58
59/* Macro for generating the ***_bits() functions */
60#define DEFINE_BITOP(fn, op, prefix, postfix) \
61static __inline__ void fn(unsigned long mask, \
62 volatile unsigned long *_p) \
63{ \
64 unsigned long old; \
65 unsigned long *p = (unsigned long *)_p; \
66 __asm__ __volatile__ ( \
67 prefix \
68"1:" PPC_LLARX "%0,0,%3\n" \
69 stringify_in_c(op) "%0,%0,%2\n" \
70 PPC405_ERR77(0,%3) \
71 PPC_STLCX "%0,0,%3\n" \
72 "bne- 1b\n" \
73 postfix \
74 : "=&r" (old), "+m" (*p) \
75 : "r" (mask), "r" (p) \
76 : "cc", "memory"); \
77}
78
79DEFINE_BITOP(set_bits, or, "", "")
80DEFINE_BITOP(clear_bits, andc, "", "")
81DEFINE_BITOP(clear_bits_unlock, andc, LWSYNC_ON_SMP, "")
82DEFINE_BITOP(change_bits, xor, "", "")
83
59static __inline__ void set_bit(int nr, volatile unsigned long *addr) 84static __inline__ void set_bit(int nr, volatile unsigned long *addr)
60{ 85{
61 unsigned long old; 86 set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
62 unsigned long mask = BITOP_MASK(nr);
63 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
64
65 __asm__ __volatile__(
66"1:" PPC_LLARX "%0,0,%3 # set_bit\n"
67 "or %0,%0,%2\n"
68 PPC405_ERR77(0,%3)
69 PPC_STLCX "%0,0,%3\n"
70 "bne- 1b"
71 : "=&r" (old), "+m" (*p)
72 : "r" (mask), "r" (p)
73 : "cc" );
74} 87}
75 88
76static __inline__ void clear_bit(int nr, volatile unsigned long *addr) 89static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
77{ 90{
78 unsigned long old; 91 clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
79 unsigned long mask = BITOP_MASK(nr);
80 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
81
82 __asm__ __volatile__(
83"1:" PPC_LLARX "%0,0,%3 # clear_bit\n"
84 "andc %0,%0,%2\n"
85 PPC405_ERR77(0,%3)
86 PPC_STLCX "%0,0,%3\n"
87 "bne- 1b"
88 : "=&r" (old), "+m" (*p)
89 : "r" (mask), "r" (p)
90 : "cc" );
91} 92}
92 93
93static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) 94static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
94{ 95{
95 unsigned long old; 96 clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr));
96 unsigned long mask = BITOP_MASK(nr);
97 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
98
99 __asm__ __volatile__(
100 LWSYNC_ON_SMP
101"1:" PPC_LLARX "%0,0,%3 # clear_bit_unlock\n"
102 "andc %0,%0,%2\n"
103 PPC405_ERR77(0,%3)
104 PPC_STLCX "%0,0,%3\n"
105 "bne- 1b"
106 : "=&r" (old), "+m" (*p)
107 : "r" (mask), "r" (p)
108 : "cc", "memory");
109} 97}
110 98
111static __inline__ void change_bit(int nr, volatile unsigned long *addr) 99static __inline__ void change_bit(int nr, volatile unsigned long *addr)
112{ 100{
113 unsigned long old; 101 change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
114 unsigned long mask = BITOP_MASK(nr); 102}
115 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); 103
116 104/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
117 __asm__ __volatile__( 105 * operands. */
118"1:" PPC_LLARX "%0,0,%3 # change_bit\n" 106#define DEFINE_TESTOP(fn, op, prefix, postfix) \
119 "xor %0,%0,%2\n" 107static __inline__ unsigned long fn( \
120 PPC405_ERR77(0,%3) 108 unsigned long mask, \
121 PPC_STLCX "%0,0,%3\n" 109 volatile unsigned long *_p) \
122 "bne- 1b" 110{ \
123 : "=&r" (old), "+m" (*p) 111 unsigned long old, t; \
124 : "r" (mask), "r" (p) 112 unsigned long *p = (unsigned long *)_p; \
125 : "cc" ); 113 __asm__ __volatile__ ( \
114 prefix \
115"1:" PPC_LLARX "%0,0,%3\n" \
116 stringify_in_c(op) "%1,%0,%2\n" \
117 PPC405_ERR77(0,%3) \
118 PPC_STLCX "%1,0,%3\n" \
119 "bne- 1b\n" \
120 postfix \
121 : "=&r" (old), "=&r" (t) \
122 : "r" (mask), "r" (p) \
123 : "cc", "memory"); \
124 return (old & mask); \
126} 125}
127 126
127DEFINE_TESTOP(test_and_set_bits, or, LWSYNC_ON_SMP, ISYNC_ON_SMP)
128DEFINE_TESTOP(test_and_set_bits_lock, or, "", ISYNC_ON_SMP)
129DEFINE_TESTOP(test_and_clear_bits, andc, LWSYNC_ON_SMP, ISYNC_ON_SMP)
130DEFINE_TESTOP(test_and_change_bits, xor, LWSYNC_ON_SMP, ISYNC_ON_SMP)
131
128static __inline__ int test_and_set_bit(unsigned long nr, 132static __inline__ int test_and_set_bit(unsigned long nr,
129 volatile unsigned long *addr) 133 volatile unsigned long *addr)
130{ 134{
131 unsigned long old, t; 135 return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
132 unsigned long mask = BITOP_MASK(nr);
133 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
134
135 __asm__ __volatile__(
136 LWSYNC_ON_SMP
137"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n"
138 "or %1,%0,%2 \n"
139 PPC405_ERR77(0,%3)
140 PPC_STLCX "%1,0,%3 \n"
141 "bne- 1b"
142 ISYNC_ON_SMP
143 : "=&r" (old), "=&r" (t)
144 : "r" (mask), "r" (p)
145 : "cc", "memory");
146
147 return (old & mask) != 0;
148} 136}
149 137
150static __inline__ int test_and_set_bit_lock(unsigned long nr, 138static __inline__ int test_and_set_bit_lock(unsigned long nr,
151 volatile unsigned long *addr) 139 volatile unsigned long *addr)
152{ 140{
153 unsigned long old, t; 141 return test_and_set_bits_lock(BITOP_MASK(nr),
154 unsigned long mask = BITOP_MASK(nr); 142 addr + BITOP_WORD(nr)) != 0;
155 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
156
157 __asm__ __volatile__(
158"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit_lock\n"
159 "or %1,%0,%2 \n"
160 PPC405_ERR77(0,%3)
161 PPC_STLCX "%1,0,%3 \n"
162 "bne- 1b"
163 ISYNC_ON_SMP
164 : "=&r" (old), "=&r" (t)
165 : "r" (mask), "r" (p)
166 : "cc", "memory");
167
168 return (old & mask) != 0;
169} 143}
170 144
171static __inline__ int test_and_clear_bit(unsigned long nr, 145static __inline__ int test_and_clear_bit(unsigned long nr,
172 volatile unsigned long *addr) 146 volatile unsigned long *addr)
173{ 147{
174 unsigned long old, t; 148 return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
175 unsigned long mask = BITOP_MASK(nr);
176 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
177
178 __asm__ __volatile__(
179 LWSYNC_ON_SMP
180"1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n"
181 "andc %1,%0,%2 \n"
182 PPC405_ERR77(0,%3)
183 PPC_STLCX "%1,0,%3 \n"
184 "bne- 1b"
185 ISYNC_ON_SMP
186 : "=&r" (old), "=&r" (t)
187 : "r" (mask), "r" (p)
188 : "cc", "memory");
189
190 return (old & mask) != 0;
191} 149}
192 150
193static __inline__ int test_and_change_bit(unsigned long nr, 151static __inline__ int test_and_change_bit(unsigned long nr,
194 volatile unsigned long *addr) 152 volatile unsigned long *addr)
195{ 153{
196 unsigned long old, t; 154 return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
197 unsigned long mask = BITOP_MASK(nr);
198 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
199
200 __asm__ __volatile__(
201 LWSYNC_ON_SMP
202"1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n"
203 "xor %1,%0,%2 \n"
204 PPC405_ERR77(0,%3)
205 PPC_STLCX "%1,0,%3 \n"
206 "bne- 1b"
207 ISYNC_ON_SMP
208 : "=&r" (old), "=&r" (t)
209 : "r" (mask), "r" (p)
210 : "cc", "memory");
211
212 return (old & mask) != 0;
213}
214
215static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
216{
217 unsigned long old;
218
219 __asm__ __volatile__(
220"1:" PPC_LLARX "%0,0,%3 # set_bits\n"
221 "or %0,%0,%2\n"
222 PPC_STLCX "%0,0,%3\n"
223 "bne- 1b"
224 : "=&r" (old), "+m" (*addr)
225 : "r" (mask), "r" (addr)
226 : "cc");
227} 155}
228 156
229#include <asm-generic/bitops/non-atomic.h> 157#include <asm-generic/bitops/non-atomic.h>
diff --git a/arch/powerpc/include/asm/cell-regs.h b/arch/powerpc/include/asm/cell-regs.h
index fd6fd00434ef..fdf64fd25950 100644
--- a/arch/powerpc/include/asm/cell-regs.h
+++ b/arch/powerpc/include/asm/cell-regs.h
@@ -303,6 +303,17 @@ struct cbe_mic_tm_regs {
303extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np); 303extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
304extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu); 304extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
305 305
306
307/* Cell page table entries */
308#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */
309#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */
310#define CBE_IOPTE_M 0x2000000000000000ul /* coherency required */
311#define CBE_IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
312#define CBE_IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
313#define CBE_IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
314#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */
315#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
316
306/* some utility functions to deal with SMT */ 317/* some utility functions to deal with SMT */
307extern u32 cbe_get_hw_thread_id(int cpu); 318extern u32 cbe_get_hw_thread_id(int cpu);
308extern u32 cbe_cpu_to_node(int cpu); 319extern u32 cbe_cpu_to_node(int cpu);
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index fb11b0c459b8..a8e18447c62b 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -5,6 +5,15 @@
5 5
6/* 6/*
7 * Mapping of threads to cores 7 * Mapping of threads to cores
8 *
9 * Note: This implementation is limited to a power of 2 number of
10 * threads per core and the same number for each core in the system
11 * (though it would work if some processors had less threads as long
12 * as the CPU numbers are still allocated, just not brought offline).
13 *
14 * However, the API allows for a different implementation in the future
15 * if needed, as long as you only use the functions and not the variables
16 * directly.
8 */ 17 */
9 18
10#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
@@ -67,5 +76,12 @@ static inline int cpu_first_thread_in_core(int cpu)
67 return cpu & ~(threads_per_core - 1); 76 return cpu & ~(threads_per_core - 1);
68} 77}
69 78
79static inline int cpu_last_thread_in_core(int cpu)
80{
81 return cpu | (threads_per_core - 1);
82}
83
84
85
70#endif /* _ASM_POWERPC_CPUTHREADS_H */ 86#endif /* _ASM_POWERPC_CPUTHREADS_H */
71 87
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 7d2277cef09a..9dade15d1ab4 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -6,7 +6,7 @@
6#ifndef _ASM_POWERPC_DEVICE_H 6#ifndef _ASM_POWERPC_DEVICE_H
7#define _ASM_POWERPC_DEVICE_H 7#define _ASM_POWERPC_DEVICE_H
8 8
9struct dma_mapping_ops; 9struct dma_map_ops;
10struct device_node; 10struct device_node;
11 11
12struct dev_archdata { 12struct dev_archdata {
@@ -14,8 +14,11 @@ struct dev_archdata {
14 struct device_node *of_node; 14 struct device_node *of_node;
15 15
16 /* DMA operations on that device */ 16 /* DMA operations on that device */
17 struct dma_mapping_ops *dma_ops; 17 struct dma_map_ops *dma_ops;
18 void *dma_data; 18 void *dma_data;
19#ifdef CONFIG_SWIOTLB
20 dma_addr_t max_direct_dma_addr;
21#endif
19}; 22};
20 23
21static inline void dev_archdata_set_node(struct dev_archdata *ad, 24static inline void dev_archdata_set_node(struct dev_archdata *ad,
@@ -30,4 +33,7 @@ dev_archdata_get_node(const struct dev_archdata *ad)
30 return ad->of_node; 33 return ad->of_node;
31} 34}
32 35
36struct pdev_archdata {
37};
38
33#endif /* _ASM_POWERPC_DEVICE_H */ 39#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index b44aaabdd1a6..cb2ca41dd526 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -14,6 +14,7 @@
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/scatterlist.h> 15#include <linux/scatterlist.h>
16#include <linux/dma-attrs.h> 16#include <linux/dma-attrs.h>
17#include <linux/dma-debug.h>
17#include <asm/io.h> 18#include <asm/io.h>
18#include <asm/swiotlb.h> 19#include <asm/swiotlb.h>
19 20
@@ -64,58 +65,14 @@ static inline unsigned long device_to_mask(struct device *dev)
64} 65}
65 66
66/* 67/*
67 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
68 */
69struct dma_mapping_ops {
70 void * (*alloc_coherent)(struct device *dev, size_t size,
71 dma_addr_t *dma_handle, gfp_t flag);
72 void (*free_coherent)(struct device *dev, size_t size,
73 void *vaddr, dma_addr_t dma_handle);
74 int (*map_sg)(struct device *dev, struct scatterlist *sg,
75 int nents, enum dma_data_direction direction,
76 struct dma_attrs *attrs);
77 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
78 int nents, enum dma_data_direction direction,
79 struct dma_attrs *attrs);
80 int (*dma_supported)(struct device *dev, u64 mask);
81 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
82 dma_addr_t (*map_page)(struct device *dev, struct page *page,
83 unsigned long offset, size_t size,
84 enum dma_data_direction direction,
85 struct dma_attrs *attrs);
86 void (*unmap_page)(struct device *dev,
87 dma_addr_t dma_address, size_t size,
88 enum dma_data_direction direction,
89 struct dma_attrs *attrs);
90 int (*addr_needs_map)(struct device *dev, dma_addr_t addr,
91 size_t size);
92#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
93 void (*sync_single_range_for_cpu)(struct device *hwdev,
94 dma_addr_t dma_handle, unsigned long offset,
95 size_t size,
96 enum dma_data_direction direction);
97 void (*sync_single_range_for_device)(struct device *hwdev,
98 dma_addr_t dma_handle, unsigned long offset,
99 size_t size,
100 enum dma_data_direction direction);
101 void (*sync_sg_for_cpu)(struct device *hwdev,
102 struct scatterlist *sg, int nelems,
103 enum dma_data_direction direction);
104 void (*sync_sg_for_device)(struct device *hwdev,
105 struct scatterlist *sg, int nelems,
106 enum dma_data_direction direction);
107#endif
108};
109
110/*
111 * Available generic sets of operations 68 * Available generic sets of operations
112 */ 69 */
113#ifdef CONFIG_PPC64 70#ifdef CONFIG_PPC64
114extern struct dma_mapping_ops dma_iommu_ops; 71extern struct dma_map_ops dma_iommu_ops;
115#endif 72#endif
116extern struct dma_mapping_ops dma_direct_ops; 73extern struct dma_map_ops dma_direct_ops;
117 74
118static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) 75static inline struct dma_map_ops *get_dma_ops(struct device *dev)
119{ 76{
120 /* We don't handle the NULL dev case for ISA for now. We could 77 /* We don't handle the NULL dev case for ISA for now. We could
121 * do it via an out of line call but it is not needed for now. The 78 * do it via an out of line call but it is not needed for now. The
@@ -128,14 +85,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
128 return dev->archdata.dma_ops; 85 return dev->archdata.dma_ops;
129} 86}
130 87
131static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops) 88static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
132{ 89{
133 dev->archdata.dma_ops = ops; 90 dev->archdata.dma_ops = ops;
134} 91}
135 92
93/* this will be removed soon */
94#define flush_write_buffers()
95
96#include <asm-generic/dma-mapping-common.h>
97
136static inline int dma_supported(struct device *dev, u64 mask) 98static inline int dma_supported(struct device *dev, u64 mask)
137{ 99{
138 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 100 struct dma_map_ops *dma_ops = get_dma_ops(dev);
139 101
140 if (unlikely(dma_ops == NULL)) 102 if (unlikely(dma_ops == NULL))
141 return 0; 103 return 0;
@@ -149,7 +111,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
149 111
150static inline int dma_set_mask(struct device *dev, u64 dma_mask) 112static inline int dma_set_mask(struct device *dev, u64 dma_mask)
151{ 113{
152 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 114 struct dma_map_ops *dma_ops = get_dma_ops(dev);
153 115
154 if (unlikely(dma_ops == NULL)) 116 if (unlikely(dma_ops == NULL))
155 return -EIO; 117 return -EIO;
@@ -161,267 +123,70 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
161 return 0; 123 return 0;
162} 124}
163 125
164/*
165 * map_/unmap_single actually call through to map/unmap_page now that all the
166 * dma_mapping_ops have been converted over. We just have to get the page and
167 * offset to pass through to map_page
168 */
169static inline dma_addr_t dma_map_single_attrs(struct device *dev,
170 void *cpu_addr,
171 size_t size,
172 enum dma_data_direction direction,
173 struct dma_attrs *attrs)
174{
175 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
176
177 BUG_ON(!dma_ops);
178
179 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
180 (unsigned long)cpu_addr % PAGE_SIZE, size,
181 direction, attrs);
182}
183
184static inline void dma_unmap_single_attrs(struct device *dev,
185 dma_addr_t dma_addr,
186 size_t size,
187 enum dma_data_direction direction,
188 struct dma_attrs *attrs)
189{
190 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
191
192 BUG_ON(!dma_ops);
193
194 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
195}
196
197static inline dma_addr_t dma_map_page_attrs(struct device *dev,
198 struct page *page,
199 unsigned long offset, size_t size,
200 enum dma_data_direction direction,
201 struct dma_attrs *attrs)
202{
203 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
204
205 BUG_ON(!dma_ops);
206
207 return dma_ops->map_page(dev, page, offset, size, direction, attrs);
208}
209
210static inline void dma_unmap_page_attrs(struct device *dev,
211 dma_addr_t dma_address,
212 size_t size,
213 enum dma_data_direction direction,
214 struct dma_attrs *attrs)
215{
216 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
217
218 BUG_ON(!dma_ops);
219
220 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
221}
222
223static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
224 int nents, enum dma_data_direction direction,
225 struct dma_attrs *attrs)
226{
227 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
228
229 BUG_ON(!dma_ops);
230 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
231}
232
233static inline void dma_unmap_sg_attrs(struct device *dev,
234 struct scatterlist *sg,
235 int nhwentries,
236 enum dma_data_direction direction,
237 struct dma_attrs *attrs)
238{
239 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
240
241 BUG_ON(!dma_ops);
242 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
243}
244
245static inline void *dma_alloc_coherent(struct device *dev, size_t size, 126static inline void *dma_alloc_coherent(struct device *dev, size_t size,
246 dma_addr_t *dma_handle, gfp_t flag) 127 dma_addr_t *dma_handle, gfp_t flag)
247{ 128{
248 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 129 struct dma_map_ops *dma_ops = get_dma_ops(dev);
249 130 void *cpu_addr;
250 BUG_ON(!dma_ops);
251 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
252}
253
254static inline void dma_free_coherent(struct device *dev, size_t size,
255 void *cpu_addr, dma_addr_t dma_handle)
256{
257 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
258
259 BUG_ON(!dma_ops);
260 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
261}
262
263static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
264 size_t size,
265 enum dma_data_direction direction)
266{
267 return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
268}
269
270static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
271 size_t size,
272 enum dma_data_direction direction)
273{
274 dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
275}
276
277static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
278 unsigned long offset, size_t size,
279 enum dma_data_direction direction)
280{
281 return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
282}
283
284static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
285 size_t size,
286 enum dma_data_direction direction)
287{
288 dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
289}
290
291static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
292 int nents, enum dma_data_direction direction)
293{
294 return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
295}
296
297static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
298 int nhwentries,
299 enum dma_data_direction direction)
300{
301 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
302}
303
304#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
305static inline void dma_sync_single_for_cpu(struct device *dev,
306 dma_addr_t dma_handle, size_t size,
307 enum dma_data_direction direction)
308{
309 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
310
311 BUG_ON(!dma_ops);
312
313 if (dma_ops->sync_single_range_for_cpu)
314 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
315 size, direction);
316}
317
318static inline void dma_sync_single_for_device(struct device *dev,
319 dma_addr_t dma_handle, size_t size,
320 enum dma_data_direction direction)
321{
322 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
323 131
324 BUG_ON(!dma_ops); 132 BUG_ON(!dma_ops);
325 133
326 if (dma_ops->sync_single_range_for_device) 134 cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag);
327 dma_ops->sync_single_range_for_device(dev, dma_handle,
328 0, size, direction);
329}
330
331static inline void dma_sync_sg_for_cpu(struct device *dev,
332 struct scatterlist *sgl, int nents,
333 enum dma_data_direction direction)
334{
335 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
336 135
337 BUG_ON(!dma_ops); 136 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
338 137
339 if (dma_ops->sync_sg_for_cpu) 138 return cpu_addr;
340 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
341} 139}
342 140
343static inline void dma_sync_sg_for_device(struct device *dev, 141static inline void dma_free_coherent(struct device *dev, size_t size,
344 struct scatterlist *sgl, int nents, 142 void *cpu_addr, dma_addr_t dma_handle)
345 enum dma_data_direction direction)
346{ 143{
347 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 144 struct dma_map_ops *dma_ops = get_dma_ops(dev);
348 145
349 BUG_ON(!dma_ops); 146 BUG_ON(!dma_ops);
350 147
351 if (dma_ops->sync_sg_for_device) 148 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
352 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
353}
354
355static inline void dma_sync_single_range_for_cpu(struct device *dev,
356 dma_addr_t dma_handle, unsigned long offset, size_t size,
357 enum dma_data_direction direction)
358{
359 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
360
361 BUG_ON(!dma_ops);
362 149
363 if (dma_ops->sync_single_range_for_cpu) 150 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
364 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
365 offset, size, direction);
366} 151}
367 152
368static inline void dma_sync_single_range_for_device(struct device *dev, 153static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
369 dma_addr_t dma_handle, unsigned long offset, size_t size,
370 enum dma_data_direction direction)
371{ 154{
372 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 155 struct dma_map_ops *dma_ops = get_dma_ops(dev);
373 156
374 BUG_ON(!dma_ops); 157 if (dma_ops->mapping_error)
158 return dma_ops->mapping_error(dev, dma_addr);
375 159
376 if (dma_ops->sync_single_range_for_device) 160#ifdef CONFIG_PPC64
377 dma_ops->sync_single_range_for_device(dev, dma_handle, offset, 161 return (dma_addr == DMA_ERROR_CODE);
378 size, direction); 162#else
379} 163 return 0;
380#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ 164#endif
381static inline void dma_sync_single_for_cpu(struct device *dev,
382 dma_addr_t dma_handle, size_t size,
383 enum dma_data_direction direction)
384{
385} 165}
386 166
387static inline void dma_sync_single_for_device(struct device *dev, 167static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
388 dma_addr_t dma_handle, size_t size,
389 enum dma_data_direction direction)
390{ 168{
391} 169#ifdef CONFIG_SWIOTLB
170 struct dev_archdata *sd = &dev->archdata;
392 171
393static inline void dma_sync_sg_for_cpu(struct device *dev, 172 if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
394 struct scatterlist *sgl, int nents, 173 return 0;
395 enum dma_data_direction direction) 174#endif
396{
397}
398 175
399static inline void dma_sync_sg_for_device(struct device *dev, 176 if (!dev->dma_mask)
400 struct scatterlist *sgl, int nents, 177 return 0;
401 enum dma_data_direction direction)
402{
403}
404 178
405static inline void dma_sync_single_range_for_cpu(struct device *dev, 179 return addr + size <= *dev->dma_mask;
406 dma_addr_t dma_handle, unsigned long offset, size_t size,
407 enum dma_data_direction direction)
408{
409} 180}
410 181
411static inline void dma_sync_single_range_for_device(struct device *dev, 182static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
412 dma_addr_t dma_handle, unsigned long offset, size_t size,
413 enum dma_data_direction direction)
414{ 183{
184 return paddr + get_dma_direct_offset(dev);
415} 185}
416#endif
417 186
418static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 187static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
419{ 188{
420#ifdef CONFIG_PPC64 189 return daddr - get_dma_direct_offset(dev);
421 return (dma_addr == DMA_ERROR_CODE);
422#else
423 return 0;
424#endif
425} 190}
426 191
427#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 192#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
new file mode 100644
index 000000000000..6d53f311d942
--- /dev/null
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -0,0 +1,205 @@
1/*
2 * Definitions for use by exception code on Book3-E
3 *
4 * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#ifndef _ASM_POWERPC_EXCEPTION_64E_H
12#define _ASM_POWERPC_EXCEPTION_64E_H
13
14/*
15 * SPRGs usage an other considerations...
16 *
17 * Since TLB miss and other standard exceptions can be interrupted by
18 * critical exceptions which can themselves be interrupted by machine
19 * checks, and since the two later can themselves cause a TLB miss when
20 * hitting the linear mapping for the kernel stacks, we need to be a bit
21 * creative on how we use SPRGs.
22 *
23 * The base idea is that we have one SRPG reserved for critical and one
24 * for machine check interrupts. Those are used to save a GPR that can
25 * then be used to get the PACA, and store as much context as we need
26 * to save in there. That includes saving the SPRGs used by the TLB miss
27 * handler for linear mapping misses and the associated SRR0/1 due to
28 * the above re-entrancy issue.
29 *
30 * So here's the current usage pattern. It's done regardless of which
31 * SPRGs are user-readable though, thus we might have to change some of
32 * this later. In order to do that more easily, we use special constants
33 * for naming them
34 *
35 * WARNING: Some of these SPRGs are user readable. We need to do something
36 * about it as some point by making sure they can't be used to leak kernel
37 * critical data
38 */
39
40
41/* We are out of SPRGs so we save some things in the PACA. The normal
42 * exception frame is smaller than the CRIT or MC one though
43 */
44#define EX_R1 (0 * 8)
45#define EX_CR (1 * 8)
46#define EX_R10 (2 * 8)
47#define EX_R11 (3 * 8)
48#define EX_R14 (4 * 8)
49#define EX_R15 (5 * 8)
50
51/* The TLB miss exception uses different slots */
52
53#define EX_TLB_R10 ( 0 * 8)
54#define EX_TLB_R11 ( 1 * 8)
55#define EX_TLB_R12 ( 2 * 8)
56#define EX_TLB_R13 ( 3 * 8)
57#define EX_TLB_R14 ( 4 * 8)
58#define EX_TLB_R15 ( 5 * 8)
59#define EX_TLB_R16 ( 6 * 8)
60#define EX_TLB_CR ( 7 * 8)
61#define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */
62#define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */
63#define EX_TLB_SRR0 (10 * 8)
64#define EX_TLB_SRR1 (11 * 8)
65#define EX_TLB_MMUCR0 (12 * 8) /* Level 0 */
66#define EX_TLB_MAS1 (12 * 8) /* Level 0 */
67#define EX_TLB_MAS2 (13 * 8) /* Level 0 */
68#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
69#define EX_TLB_R8 (14 * 8)
70#define EX_TLB_R9 (15 * 8)
71#define EX_TLB_LR (16 * 8)
72#define EX_TLB_SIZE (17 * 8)
73#else
74#define EX_TLB_SIZE (14 * 8)
75#endif
76
77#define START_EXCEPTION(label) \
78 .globl exc_##label##_book3e; \
79exc_##label##_book3e:
80
81/* TLB miss exception prolog
82 *
83 * This prolog handles re-entrancy (up to 3 levels supported in the PACA
84 * though we currently don't test for overflow). It provides you with a
85 * re-entrancy safe working space of r10...r16 and CR with r12 being used
86 * as the exception area pointer in the PACA for that level of re-entrancy
87 * and r13 containing the PACA pointer.
88 *
89 * SRR0 and SRR1 are saved, but DEAR and ESR are not, since they don't apply
90 * as-is for instruction exceptions. It's up to the actual exception code
91 * to save them as well if required.
92 */
93#define TLB_MISS_PROLOG \
94 mtspr SPRN_SPRG_TLB_SCRATCH,r12; \
95 mfspr r12,SPRN_SPRG_TLB_EXFRAME; \
96 std r10,EX_TLB_R10(r12); \
97 mfcr r10; \
98 std r11,EX_TLB_R11(r12); \
99 mfspr r11,SPRN_SPRG_TLB_SCRATCH; \
100 std r13,EX_TLB_R13(r12); \
101 mfspr r13,SPRN_SPRG_PACA; \
102 std r14,EX_TLB_R14(r12); \
103 addi r14,r12,EX_TLB_SIZE; \
104 std r15,EX_TLB_R15(r12); \
105 mfspr r15,SPRN_SRR1; \
106 std r16,EX_TLB_R16(r12); \
107 mfspr r16,SPRN_SRR0; \
108 std r10,EX_TLB_CR(r12); \
109 std r11,EX_TLB_R12(r12); \
110 mtspr SPRN_SPRG_TLB_EXFRAME,r14; \
111 std r15,EX_TLB_SRR1(r12); \
112 std r16,EX_TLB_SRR0(r12); \
113 TLB_MISS_PROLOG_STATS
114
115/* And these are the matching epilogs that restores things
116 *
117 * There are 3 epilogs:
118 *
119 * - SUCCESS : Unwinds one level
120 * - ERROR : restore from level 0 and reset
121 * - ERROR_SPECIAL : restore from current level and reset
122 *
123 * Normal errors use ERROR, that is, they restore the initial fault context
124 * and trigger a fault. However, there is a special case for linear mapping
125 * errors. Those should basically never happen, but if they do happen, we
126 * want the error to point out the context that did that linear mapping
127 * fault, not the initial level 0 (basically, we got a bogus PGF or something
128 * like that). For userland errors on the linear mapping, there is no
129 * difference since those are always level 0 anyway
130 */
131
132#define TLB_MISS_RESTORE(freg) \
133 ld r14,EX_TLB_CR(r12); \
134 ld r10,EX_TLB_R10(r12); \
135 ld r15,EX_TLB_SRR0(r12); \
136 ld r16,EX_TLB_SRR1(r12); \
137 mtspr SPRN_SPRG_TLB_EXFRAME,freg; \
138 ld r11,EX_TLB_R11(r12); \
139 mtcr r14; \
140 ld r13,EX_TLB_R13(r12); \
141 ld r14,EX_TLB_R14(r12); \
142 mtspr SPRN_SRR0,r15; \
143 ld r15,EX_TLB_R15(r12); \
144 mtspr SPRN_SRR1,r16; \
145 TLB_MISS_RESTORE_STATS \
146 ld r16,EX_TLB_R16(r12); \
147 ld r12,EX_TLB_R12(r12); \
148
149#define TLB_MISS_EPILOG_SUCCESS \
150 TLB_MISS_RESTORE(r12)
151
152#define TLB_MISS_EPILOG_ERROR \
153 addi r12,r13,PACA_EXTLB; \
154 TLB_MISS_RESTORE(r12)
155
156#define TLB_MISS_EPILOG_ERROR_SPECIAL \
157 addi r11,r13,PACA_EXTLB; \
158 TLB_MISS_RESTORE(r11)
159
160#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
161#define TLB_MISS_PROLOG_STATS \
162 mflr r10; \
163 std r8,EX_TLB_R8(r12); \
164 std r9,EX_TLB_R9(r12); \
165 std r10,EX_TLB_LR(r12);
166#define TLB_MISS_RESTORE_STATS \
167 ld r16,EX_TLB_LR(r12); \
168 ld r9,EX_TLB_R9(r12); \
169 ld r8,EX_TLB_R8(r12); \
170 mtlr r16;
171#define TLB_MISS_STATS_D(name) \
172 addi r9,r13,MMSTAT_DSTATS+name; \
173 bl .tlb_stat_inc;
174#define TLB_MISS_STATS_I(name) \
175 addi r9,r13,MMSTAT_ISTATS+name; \
176 bl .tlb_stat_inc;
177#define TLB_MISS_STATS_X(name) \
178 ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \
179 cmpdi cr2,r8,-1; \
180 beq cr2,61f; \
181 addi r9,r13,MMSTAT_DSTATS+name; \
182 b 62f; \
18361: addi r9,r13,MMSTAT_ISTATS+name; \
18462: bl .tlb_stat_inc;
185#define TLB_MISS_STATS_SAVE_INFO \
186 std r14,EX_TLB_ESR(r12); /* save ESR */ \
187
188
189#else
190#define TLB_MISS_PROLOG_STATS
191#define TLB_MISS_RESTORE_STATS
192#define TLB_MISS_STATS_D(name)
193#define TLB_MISS_STATS_I(name)
194#define TLB_MISS_STATS_X(name)
195#define TLB_MISS_STATS_Y(name)
196#define TLB_MISS_STATS_SAVE_INFO
197#endif
198
199#define SET_IVOR(vector_number, vector_offset) \
200 li r3,vector_offset@l; \
201 ori r3,r3,interrupt_base_book3e@l; \
202 mtspr SPRN_IVOR##vector_number,r3;
203
204#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
205
diff --git a/arch/powerpc/include/asm/exception.h b/arch/powerpc/include/asm/exception-64s.h
index d3d4534e3c74..a98653b26231 100644
--- a/arch/powerpc/include/asm/exception.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -57,17 +57,16 @@
57 addi reg,reg,(label)-_stext; /* virt addr of handler ... */ 57 addi reg,reg,(label)-_stext; /* virt addr of handler ... */
58 58
59#define EXCEPTION_PROLOG_1(area) \ 59#define EXCEPTION_PROLOG_1(area) \
60 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 60 mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \
61 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 61 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
62 std r10,area+EX_R10(r13); \ 62 std r10,area+EX_R10(r13); \
63 std r11,area+EX_R11(r13); \ 63 std r11,area+EX_R11(r13); \
64 std r12,area+EX_R12(r13); \ 64 std r12,area+EX_R12(r13); \
65 mfspr r9,SPRN_SPRG1; \ 65 mfspr r9,SPRN_SPRG_SCRATCH0; \
66 std r9,area+EX_R13(r13); \ 66 std r9,area+EX_R13(r13); \
67 mfcr r9 67 mfcr r9
68 68
69#define EXCEPTION_PROLOG_PSERIES(area, label) \ 69#define EXCEPTION_PROLOG_PSERIES_1(label) \
70 EXCEPTION_PROLOG_1(area); \
71 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 70 ld r12,PACAKBASE(r13); /* get high part of &label */ \
72 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ 71 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
73 mfspr r11,SPRN_SRR0; /* save SRR0 */ \ 72 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
@@ -78,6 +77,10 @@
78 rfid; \ 77 rfid; \
79 b . /* prevent speculative execution */ 78 b . /* prevent speculative execution */
80 79
80#define EXCEPTION_PROLOG_PSERIES(area, label) \
81 EXCEPTION_PROLOG_1(area); \
82 EXCEPTION_PROLOG_PSERIES_1(label);
83
81/* 84/*
82 * The common exception prolog is used for all except a few exceptions 85 * The common exception prolog is used for all except a few exceptions
83 * such as a segment miss on a kernel address. We have to be prepared 86 * such as a segment miss on a kernel address. We have to be prepared
@@ -144,7 +147,7 @@
144 .globl label##_pSeries; \ 147 .globl label##_pSeries; \
145label##_pSeries: \ 148label##_pSeries: \
146 HMT_MEDIUM; \ 149 HMT_MEDIUM; \
147 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 150 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
148 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 151 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
149 152
150#define HSTD_EXCEPTION_PSERIES(n, label) \ 153#define HSTD_EXCEPTION_PSERIES(n, label) \
@@ -152,13 +155,13 @@ label##_pSeries: \
152 .globl label##_pSeries; \ 155 .globl label##_pSeries; \
153label##_pSeries: \ 156label##_pSeries: \
154 HMT_MEDIUM; \ 157 HMT_MEDIUM; \
155 mtspr SPRN_SPRG1,r20; /* save r20 */ \ 158 mtspr SPRN_SPRG_SCRATCH0,r20; /* save r20 */ \
156 mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \ 159 mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
157 mtspr SPRN_SRR0,r20; \ 160 mtspr SPRN_SRR0,r20; \
158 mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \ 161 mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
159 mtspr SPRN_SRR1,r20; \ 162 mtspr SPRN_SRR1,r20; \
160 mfspr r20,SPRN_SPRG1; /* restore r20 */ \ 163 mfspr r20,SPRN_SPRG_SCRATCH0; /* restore r20 */ \
161 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 164 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
162 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 165 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
163 166
164 167
@@ -167,15 +170,15 @@ label##_pSeries: \
167 .globl label##_pSeries; \ 170 .globl label##_pSeries; \
168label##_pSeries: \ 171label##_pSeries: \
169 HMT_MEDIUM; \ 172 HMT_MEDIUM; \
170 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 173 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
171 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 174 mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \
172 std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \ 175 std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
173 std r10,PACA_EXGEN+EX_R10(r13); \ 176 std r10,PACA_EXGEN+EX_R10(r13); \
174 lbz r10,PACASOFTIRQEN(r13); \ 177 lbz r10,PACASOFTIRQEN(r13); \
175 mfcr r9; \ 178 mfcr r9; \
176 cmpwi r10,0; \ 179 cmpwi r10,0; \
177 beq masked_interrupt; \ 180 beq masked_interrupt; \
178 mfspr r10,SPRN_SPRG1; \ 181 mfspr r10,SPRN_SPRG_SCRATCH0; \
179 std r10,PACA_EXGEN+EX_R13(r13); \ 182 std r10,PACA_EXGEN+EX_R13(r13); \
180 std r11,PACA_EXGEN+EX_R11(r13); \ 183 std r11,PACA_EXGEN+EX_R11(r13); \
181 std r12,PACA_EXGEN+EX_R12(r13); \ 184 std r12,PACA_EXGEN+EX_R12(r13); \
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 288e14d53b7f..fb3c05a0cbbf 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -1,29 +1 @@
1#ifndef _ASM_POWERPC_HARDIRQ_H #include <asm-generic/hardirq.h>
2#define _ASM_POWERPC_HARDIRQ_H
3#ifdef __KERNEL__
4
5#include <asm/irq.h>
6#include <asm/bug.h>
7
8/* The __last_jiffy_stamp field is needed to ensure that no decrementer
9 * interrupt is lost on SMP machines. Since on most CPUs it is in the same
10 * cache line as local_irq_count, it is cheap to access and is also used on UP
11 * for uniformity.
12 */
13typedef struct {
14 unsigned int __softirq_pending; /* set_bit is used on this */
15 unsigned int __last_jiffy_stamp;
16} ____cacheline_aligned irq_cpustat_t;
17
18#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
19
20#define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp)
21
22static inline void ack_bad_irq(int irq)
23{
24 printk(KERN_CRIT "illegal vector %d received!\n", irq);
25 BUG();
26}
27
28#endif /* __KERNEL__ */
29#endif /* _ASM_POWERPC_HARDIRQ_H */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 8b505eaaa38a..abbc2aaaced5 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -49,8 +49,13 @@ extern void iseries_handle_interrupts(void);
49#define raw_irqs_disabled() (local_get_flags() == 0) 49#define raw_irqs_disabled() (local_get_flags() == 0)
50#define raw_irqs_disabled_flags(flags) ((flags) == 0) 50#define raw_irqs_disabled_flags(flags) ((flags) == 0)
51 51
52#ifdef CONFIG_PPC_BOOK3E
53#define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory");
54#define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory");
55#else
52#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) 56#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
53#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) 57#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
58#endif
54 59
55#define hard_irq_disable() \ 60#define hard_irq_disable() \
56 do { \ 61 do { \
@@ -130,43 +135,43 @@ static inline int irqs_disabled_flags(unsigned long flags)
130 */ 135 */
131struct irq_chip; 136struct irq_chip;
132 137
133#ifdef CONFIG_PERF_COUNTERS 138#ifdef CONFIG_PERF_EVENTS
134 139
135#ifdef CONFIG_PPC64 140#ifdef CONFIG_PPC64
136static inline unsigned long test_perf_counter_pending(void) 141static inline unsigned long test_perf_event_pending(void)
137{ 142{
138 unsigned long x; 143 unsigned long x;
139 144
140 asm volatile("lbz %0,%1(13)" 145 asm volatile("lbz %0,%1(13)"
141 : "=r" (x) 146 : "=r" (x)
142 : "i" (offsetof(struct paca_struct, perf_counter_pending))); 147 : "i" (offsetof(struct paca_struct, perf_event_pending)));
143 return x; 148 return x;
144} 149}
145 150
146static inline void set_perf_counter_pending(void) 151static inline void set_perf_event_pending(void)
147{ 152{
148 asm volatile("stb %0,%1(13)" : : 153 asm volatile("stb %0,%1(13)" : :
149 "r" (1), 154 "r" (1),
150 "i" (offsetof(struct paca_struct, perf_counter_pending))); 155 "i" (offsetof(struct paca_struct, perf_event_pending)));
151} 156}
152 157
153static inline void clear_perf_counter_pending(void) 158static inline void clear_perf_event_pending(void)
154{ 159{
155 asm volatile("stb %0,%1(13)" : : 160 asm volatile("stb %0,%1(13)" : :
156 "r" (0), 161 "r" (0),
157 "i" (offsetof(struct paca_struct, perf_counter_pending))); 162 "i" (offsetof(struct paca_struct, perf_event_pending)));
158} 163}
159#endif /* CONFIG_PPC64 */ 164#endif /* CONFIG_PPC64 */
160 165
161#else /* CONFIG_PERF_COUNTERS */ 166#else /* CONFIG_PERF_EVENTS */
162 167
163static inline unsigned long test_perf_counter_pending(void) 168static inline unsigned long test_perf_event_pending(void)
164{ 169{
165 return 0; 170 return 0;
166} 171}
167 172
168static inline void clear_perf_counter_pending(void) {} 173static inline void clear_perf_event_pending(void) {}
169#endif /* CONFIG_PERF_COUNTERS */ 174#endif /* CONFIG_PERF_EVENTS */
170 175
171#endif /* __KERNEL__ */ 176#endif /* __KERNEL__ */
172#endif /* _ASM_POWERPC_HW_IRQ_H */ 177#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 7ead7c16fb7c..7464c0daddd1 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -35,16 +35,6 @@
35#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1)) 35#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
36#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE) 36#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
37 37
38/* Cell page table entries */
39#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */
40#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */
41#define CBE_IOPTE_M 0x2000000000000000ul /* coherency required */
42#define CBE_IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
43#define CBE_IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
44#define CBE_IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
45#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */
46#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
47
48/* Boot time flags */ 38/* Boot time flags */
49extern int iommu_is_off; 39extern int iommu_is_off;
50extern int iommu_force_on; 40extern int iommu_force_on;
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0a5137676e1b..bbcd1aaf3dfd 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -302,7 +302,8 @@ extern void irq_free_virt(unsigned int virq, unsigned int count);
302 302
303/* -- OF helpers -- */ 303/* -- OF helpers -- */
304 304
305/* irq_create_of_mapping - Map a hardware interrupt into linux virq space 305/**
306 * irq_create_of_mapping - Map a hardware interrupt into linux virq space
306 * @controller: Device node of the interrupt controller 307 * @controller: Device node of the interrupt controller
307 * @inspec: Interrupt specifier from the device-tree 308 * @inspec: Interrupt specifier from the device-tree
308 * @intsize: Size of the interrupt specifier from the device-tree 309 * @intsize: Size of the interrupt specifier from the device-tree
@@ -314,8 +315,8 @@ extern void irq_free_virt(unsigned int virq, unsigned int count);
314extern unsigned int irq_create_of_mapping(struct device_node *controller, 315extern unsigned int irq_create_of_mapping(struct device_node *controller,
315 u32 *intspec, unsigned int intsize); 316 u32 *intspec, unsigned int intsize);
316 317
317 318/**
318/* irq_of_parse_and_map - Parse nad Map an interrupt into linux virq space 319 * irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
319 * @device: Device node of the device whose interrupt is to be mapped 320 * @device: Device node of the device whose interrupt is to be mapped
320 * @index: Index of the interrupt to map 321 * @index: Index of the interrupt to map
321 * 322 *
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index fddc3ed715fa..c9c930ed11d7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,7 +34,8 @@
34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
35 35
36/* We don't currently support large pages. */ 36/* We don't currently support large pages. */
37#define KVM_PAGES_PER_HPAGE (1UL << 31) 37#define KVM_NR_PAGE_SIZES 1
38#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
38 39
39struct kvm; 40struct kvm;
40struct kvm_run; 41struct kvm_run;
@@ -153,7 +154,6 @@ struct kvm_vcpu_arch {
153 u32 pid; 154 u32 pid;
154 u32 swap_pid; 155 u32 swap_pid;
155 156
156 u32 pvr;
157 u32 ccr0; 157 u32 ccr0;
158 u32 ccr1; 158 u32 ccr1;
159 u32 dbcr0; 159 u32 dbcr0;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 11d1fc3a8962..9efa2be78331 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -209,14 +209,14 @@ struct machdep_calls {
209 /* 209 /*
210 * optional PCI "hooks" 210 * optional PCI "hooks"
211 */ 211 */
212 /* Called in indirect_* to avoid touching devices */
213 int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
214
215 /* Called at then very end of pcibios_init() */ 212 /* Called at then very end of pcibios_init() */
216 void (*pcibios_after_init)(void); 213 void (*pcibios_after_init)(void);
217 214
218#endif /* CONFIG_PPC32 */ 215#endif /* CONFIG_PPC32 */
219 216
217 /* Called in indirect_* to avoid touching devices */
218 int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
219
220 /* Called after PPC generic resource fixup to perform 220 /* Called after PPC generic resource fixup to perform
221 machine specific fixups */ 221 machine specific fixups */
222 void (*pcibios_fixup_resources)(struct pci_dev *); 222 void (*pcibios_fixup_resources)(struct pci_dev *);
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 7b1c49811a24..d4a7f645c5db 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -25,6 +25,8 @@
25 25
26#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 26#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
27#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 27#define MAP_NONBLOCK 0x10000 /* do not block on IO */
28#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
29#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
28 30
29#ifdef __KERNEL__ 31#ifdef __KERNEL__
30#ifdef CONFIG_PPC64 32#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h
index 776f415a36aa..34916865eaef 100644
--- a/arch/powerpc/include/asm/mmu-40x.h
+++ b/arch/powerpc/include/asm/mmu-40x.h
@@ -61,4 +61,7 @@ typedef struct {
61 61
62#endif /* !__ASSEMBLY__ */ 62#endif /* !__ASSEMBLY__ */
63 63
64#define mmu_virtual_psize MMU_PAGE_4K
65#define mmu_linear_psize MMU_PAGE_256M
66
64#endif /* _ASM_POWERPC_MMU_40X_H_ */ 67#endif /* _ASM_POWERPC_MMU_40X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index 3c86576bfefa..0372669383a8 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -79,16 +79,22 @@ typedef struct {
79 79
80#if (PAGE_SHIFT == 12) 80#if (PAGE_SHIFT == 12)
81#define PPC44x_TLBE_SIZE PPC44x_TLB_4K 81#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
82#define mmu_virtual_psize MMU_PAGE_4K
82#elif (PAGE_SHIFT == 14) 83#elif (PAGE_SHIFT == 14)
83#define PPC44x_TLBE_SIZE PPC44x_TLB_16K 84#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
85#define mmu_virtual_psize MMU_PAGE_16K
84#elif (PAGE_SHIFT == 16) 86#elif (PAGE_SHIFT == 16)
85#define PPC44x_TLBE_SIZE PPC44x_TLB_64K 87#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
88#define mmu_virtual_psize MMU_PAGE_64K
86#elif (PAGE_SHIFT == 18) 89#elif (PAGE_SHIFT == 18)
87#define PPC44x_TLBE_SIZE PPC44x_TLB_256K 90#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
91#define mmu_virtual_psize MMU_PAGE_256K
88#else 92#else
89#error "Unsupported PAGE_SIZE" 93#error "Unsupported PAGE_SIZE"
90#endif 94#endif
91 95
96#define mmu_linear_psize MMU_PAGE_256M
97
92#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2) 98#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
93#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2) 99#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
94#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2) 100#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 07865a357848..3d11d3ce79ec 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -143,4 +143,7 @@ typedef struct {
143} mm_context_t; 143} mm_context_t;
144#endif /* !__ASSEMBLY__ */ 144#endif /* !__ASSEMBLY__ */
145 145
146#define mmu_virtual_psize MMU_PAGE_4K
147#define mmu_linear_psize MMU_PAGE_8M
148
146#endif /* _ASM_POWERPC_MMU_8XX_H_ */ 149#endif /* _ASM_POWERPC_MMU_8XX_H_ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 7e74cff81d86..74695816205c 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -38,58 +38,140 @@
38#define BOOK3E_PAGESZ_1TB 30 38#define BOOK3E_PAGESZ_1TB 30
39#define BOOK3E_PAGESZ_2TB 31 39#define BOOK3E_PAGESZ_2TB 31
40 40
41#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) 41/* MAS registers bit definitions */
42#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) 42
43#define MAS0_NV(x) ((x) & 0x00000FFF) 43#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000)
44 44#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000)
45#define MAS1_VALID 0x80000000 45#define MAS0_NV(x) ((x) & 0x00000FFF)
46#define MAS1_IPROT 0x40000000 46#define MAS0_HES 0x00004000
47#define MAS1_TID(x) ((x << 16) & 0x3FFF0000) 47#define MAS0_WQ_ALLWAYS 0x00000000
48#define MAS1_IND 0x00002000 48#define MAS0_WQ_COND 0x00001000
49#define MAS1_TS 0x00001000 49#define MAS0_WQ_CLR_RSRV 0x00002000
50#define MAS1_TSIZE(x) ((x << 7) & 0x00000F80) 50
51 51#define MAS1_VALID 0x80000000
52#define MAS2_EPN 0xFFFFF000 52#define MAS1_IPROT 0x40000000
53#define MAS2_X0 0x00000040 53#define MAS1_TID(x) ((x << 16) & 0x3FFF0000)
54#define MAS2_X1 0x00000020 54#define MAS1_IND 0x00002000
55#define MAS2_W 0x00000010 55#define MAS1_TS 0x00001000
56#define MAS2_I 0x00000008 56#define MAS1_TSIZE_MASK 0x00000f80
57#define MAS2_M 0x00000004 57#define MAS1_TSIZE_SHIFT 7
58#define MAS2_G 0x00000002 58#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
59#define MAS2_E 0x00000001 59
60#define MAS2_EPN 0xFFFFF000
61#define MAS2_X0 0x00000040
62#define MAS2_X1 0x00000020
63#define MAS2_W 0x00000010
64#define MAS2_I 0x00000008
65#define MAS2_M 0x00000004
66#define MAS2_G 0x00000002
67#define MAS2_E 0x00000001
60#define MAS2_EPN_MASK(size) (~0 << (size + 10)) 68#define MAS2_EPN_MASK(size) (~0 << (size + 10))
61#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags)) 69#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
62 70
63#define MAS3_RPN 0xFFFFF000 71#define MAS3_RPN 0xFFFFF000
64#define MAS3_U0 0x00000200 72#define MAS3_U0 0x00000200
65#define MAS3_U1 0x00000100 73#define MAS3_U1 0x00000100
66#define MAS3_U2 0x00000080 74#define MAS3_U2 0x00000080
67#define MAS3_U3 0x00000040 75#define MAS3_U3 0x00000040
68#define MAS3_UX 0x00000020 76#define MAS3_UX 0x00000020
69#define MAS3_SX 0x00000010 77#define MAS3_SX 0x00000010
70#define MAS3_UW 0x00000008 78#define MAS3_UW 0x00000008
71#define MAS3_SW 0x00000004 79#define MAS3_SW 0x00000004
72#define MAS3_UR 0x00000002 80#define MAS3_UR 0x00000002
73#define MAS3_SR 0x00000001 81#define MAS3_SR 0x00000001
74 82#define MAS3_SPSIZE 0x0000003e
75#define MAS4_TLBSELD(x) MAS0_TLBSEL(x) 83#define MAS3_SPSIZE_SHIFT 1
76#define MAS4_INDD 0x00008000 84
77#define MAS4_TSIZED(x) MAS1_TSIZE(x) 85#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
78#define MAS4_X0D 0x00000040 86#define MAS4_INDD 0x00008000 /* Default IND */
79#define MAS4_X1D 0x00000020 87#define MAS4_TSIZED(x) MAS1_TSIZE(x)
80#define MAS4_WD 0x00000010 88#define MAS4_X0D 0x00000040
81#define MAS4_ID 0x00000008 89#define MAS4_X1D 0x00000020
82#define MAS4_MD 0x00000004 90#define MAS4_WD 0x00000010
83#define MAS4_GD 0x00000002 91#define MAS4_ID 0x00000008
84#define MAS4_ED 0x00000001 92#define MAS4_MD 0x00000004
85 93#define MAS4_GD 0x00000002
86#define MAS6_SPID0 0x3FFF0000 94#define MAS4_ED 0x00000001
87#define MAS6_SPID1 0x00007FFE 95#define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */
88#define MAS6_ISIZE(x) MAS1_TSIZE(x) 96#define MAS4_WIMGED_SHIFT 0
89#define MAS6_SAS 0x00000001 97#define MAS4_VLED MAS4_X1D /* Default VLE */
90#define MAS6_SPID MAS6_SPID0 98#define MAS4_ACMD 0x000000c0 /* Default ACM */
91 99#define MAS4_ACMD_SHIFT 6
92#define MAS7_RPN 0xFFFFFFFF 100#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */
101#define MAS4_TSIZED_SHIFT 7
102
103#define MAS6_SPID0 0x3FFF0000
104#define MAS6_SPID1 0x00007FFE
105#define MAS6_ISIZE(x) MAS1_TSIZE(x)
106#define MAS6_SAS 0x00000001
107#define MAS6_SPID MAS6_SPID0
108#define MAS6_SIND 0x00000002 /* Indirect page */
109#define MAS6_SIND_SHIFT 1
110#define MAS6_SPID_MASK 0x3fff0000
111#define MAS6_SPID_SHIFT 16
112#define MAS6_ISIZE_MASK 0x00000f80
113#define MAS6_ISIZE_SHIFT 7
114
115#define MAS7_RPN 0xFFFFFFFF
116
117/* Bit definitions for MMUCSR0 */
118#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
119#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
120#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
121#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
122#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
123 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
124#define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */
125#define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */
126#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */
127#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */
128
129/* TLBnCFG encoding */
130#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */
131#define TLBnCFG_HES 0x00002000 /* HW select supported */
132#define TLBnCFG_IPROT 0x00008000 /* IPROT supported */
133#define TLBnCFG_GTWE 0x00010000 /* Guest can write */
134#define TLBnCFG_IND 0x00020000 /* IND entries supported */
135#define TLBnCFG_PT 0x00040000 /* Can load from page table */
136#define TLBnCFG_ASSOC 0xff000000 /* Associativity */
137
138/* TLBnPS encoding */
139#define TLBnPS_4K 0x00000004
140#define TLBnPS_8K 0x00000008
141#define TLBnPS_16K 0x00000010
142#define TLBnPS_32K 0x00000020
143#define TLBnPS_64K 0x00000040
144#define TLBnPS_128K 0x00000080
145#define TLBnPS_256K 0x00000100
146#define TLBnPS_512K 0x00000200
147#define TLBnPS_1M 0x00000400
148#define TLBnPS_2M 0x00000800
149#define TLBnPS_4M 0x00001000
150#define TLBnPS_8M 0x00002000
151#define TLBnPS_16M 0x00004000
152#define TLBnPS_32M 0x00008000
153#define TLBnPS_64M 0x00010000
154#define TLBnPS_128M 0x00020000
155#define TLBnPS_256M 0x00040000
156#define TLBnPS_512M 0x00080000
157#define TLBnPS_1G 0x00100000
158#define TLBnPS_2G 0x00200000
159#define TLBnPS_4G 0x00400000
160#define TLBnPS_8G 0x00800000
161#define TLBnPS_16G 0x01000000
162#define TLBnPS_32G 0x02000000
163#define TLBnPS_64G 0x04000000
164#define TLBnPS_128G 0x08000000
165#define TLBnPS_256G 0x10000000
166
167/* tlbilx action encoding */
168#define TLBILX_T_ALL 0
169#define TLBILX_T_TID 1
170#define TLBILX_T_FULLMATCH 3
171#define TLBILX_T_CLASS0 4
172#define TLBILX_T_CLASS1 5
173#define TLBILX_T_CLASS2 6
174#define TLBILX_T_CLASS3 7
93 175
94#ifndef __ASSEMBLY__ 176#ifndef __ASSEMBLY__
95 177
@@ -100,6 +182,34 @@ typedef struct {
100 unsigned int active; 182 unsigned int active;
101 unsigned long vdso_base; 183 unsigned long vdso_base;
102} mm_context_t; 184} mm_context_t;
185
186/* Page size definitions, common between 32 and 64-bit
187 *
188 * shift : is the "PAGE_SHIFT" value for that page size
189 * penc : is the pte encoding mask
190 *
191 */
192struct mmu_psize_def
193{
194 unsigned int shift; /* number of bits */
195 unsigned int enc; /* PTE encoding */
196};
197extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
198
199/* The page sizes use the same names as 64-bit hash but are
200 * constants
201 */
202#if defined(CONFIG_PPC_4K_PAGES)
203#define mmu_virtual_psize MMU_PAGE_4K
204#elif defined(CONFIG_PPC_64K_PAGES)
205#define mmu_virtual_psize MMU_PAGE_64K
206#else
207#error Unsupported page size
208#endif
209
210extern int mmu_linear_psize;
211extern int mmu_vmemmap_psize;
212
103#endif /* !__ASSEMBLY__ */ 213#endif /* !__ASSEMBLY__ */
104 214
105#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ 215#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash32.h b/arch/powerpc/include/asm/mmu-hash32.h
index 16b1a1e77e64..16f513e5cbd7 100644
--- a/arch/powerpc/include/asm/mmu-hash32.h
+++ b/arch/powerpc/include/asm/mmu-hash32.h
@@ -55,21 +55,25 @@ struct ppc_bat {
55 55
56#ifndef __ASSEMBLY__ 56#ifndef __ASSEMBLY__
57 57
58/* Hardware Page Table Entry */ 58/*
59 * Hardware Page Table Entry
60 * Note that the xpn and x bitfields are used only by processors that
61 * support extended addressing; otherwise, those bits are reserved.
62 */
59struct hash_pte { 63struct hash_pte {
60 unsigned long v:1; /* Entry is valid */ 64 unsigned long v:1; /* Entry is valid */
61 unsigned long vsid:24; /* Virtual segment identifier */ 65 unsigned long vsid:24; /* Virtual segment identifier */
62 unsigned long h:1; /* Hash algorithm indicator */ 66 unsigned long h:1; /* Hash algorithm indicator */
63 unsigned long api:6; /* Abbreviated page index */ 67 unsigned long api:6; /* Abbreviated page index */
64 unsigned long rpn:20; /* Real (physical) page number */ 68 unsigned long rpn:20; /* Real (physical) page number */
65 unsigned long :3; /* Unused */ 69 unsigned long xpn:3; /* Real page number bits 0-2, optional */
66 unsigned long r:1; /* Referenced */ 70 unsigned long r:1; /* Referenced */
67 unsigned long c:1; /* Changed */ 71 unsigned long c:1; /* Changed */
68 unsigned long w:1; /* Write-thru cache mode */ 72 unsigned long w:1; /* Write-thru cache mode */
69 unsigned long i:1; /* Cache inhibited */ 73 unsigned long i:1; /* Cache inhibited */
70 unsigned long m:1; /* Memory coherence */ 74 unsigned long m:1; /* Memory coherence */
71 unsigned long g:1; /* Guarded */ 75 unsigned long g:1; /* Guarded */
72 unsigned long :1; /* Unused */ 76 unsigned long x:1; /* Real page number bit 3, optional */
73 unsigned long pp:2; /* Page protection */ 77 unsigned long pp:2; /* Page protection */
74}; 78};
75 79
@@ -80,4 +84,10 @@ typedef struct {
80 84
81#endif /* !__ASSEMBLY__ */ 85#endif /* !__ASSEMBLY__ */
82 86
87/* We happily ignore the smaller BATs on 601, we don't actually use
88 * those definitions on hash32 at the moment anyway
89 */
90#define mmu_virtual_psize MMU_PAGE_4K
91#define mmu_linear_psize MMU_PAGE_256M
92
83#endif /* _ASM_POWERPC_MMU_HASH32_H_ */ 93#endif /* _ASM_POWERPC_MMU_HASH32_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 98c104a09961..bebe31c2e907 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -41,6 +41,7 @@ extern char initial_stab[];
41 41
42#define SLB_NUM_BOLTED 3 42#define SLB_NUM_BOLTED 3
43#define SLB_CACHE_ENTRIES 8 43#define SLB_CACHE_ENTRIES 8
44#define SLB_MIN_SIZE 32
44 45
45/* Bits in the SLB ESID word */ 46/* Bits in the SLB ESID word */
46#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ 47#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
@@ -139,26 +140,6 @@ struct mmu_psize_def
139#endif /* __ASSEMBLY__ */ 140#endif /* __ASSEMBLY__ */
140 141
141/* 142/*
142 * The kernel use the constants below to index in the page sizes array.
143 * The use of fixed constants for this purpose is better for performances
144 * of the low level hash refill handlers.
145 *
146 * A non supported page size has a "shift" field set to 0
147 *
148 * Any new page size being implemented can get a new entry in here. Whether
149 * the kernel will use it or not is a different matter though. The actual page
150 * size used by hugetlbfs is not defined here and may be made variable
151 */
152
153#define MMU_PAGE_4K 0 /* 4K */
154#define MMU_PAGE_64K 1 /* 64K */
155#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
156#define MMU_PAGE_1M 3 /* 1M */
157#define MMU_PAGE_16M 4 /* 16M */
158#define MMU_PAGE_16G 5 /* 16G */
159#define MMU_PAGE_COUNT 6
160
161/*
162 * Segment sizes. 143 * Segment sizes.
163 * These are the values used by hardware in the B field of 144 * These are the values used by hardware in the B field of
164 * SLB entries and the first dword of MMU hashtable entries. 145 * SLB entries and the first dword of MMU hashtable entries.
@@ -296,6 +277,7 @@ extern void slb_flush_and_rebolt(void);
296extern void stab_initialize(unsigned long stab); 277extern void stab_initialize(unsigned long stab);
297 278
298extern void slb_vmalloc_update(void); 279extern void slb_vmalloc_update(void);
280extern void slb_set_size(u16 size);
299#endif /* __ASSEMBLY__ */ 281#endif /* __ASSEMBLY__ */
300 282
301/* 283/*
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index fb57ded592f9..7ffbb65ff7a9 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -17,6 +17,7 @@
17#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) 17#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
18#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) 18#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
19#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) 19#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
20#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
20 21
21/* 22/*
22 * This is individual features 23 * This is individual features
@@ -57,6 +58,15 @@
57 */ 58 */
58#define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000) 59#define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000)
59 60
61/* Enable use of TLB reservation. Processor should support tlbsrx.
62 * instruction and MAS0[WQ].
63 */
64#define MMU_FTR_USE_TLBRSRV ASM_CONST(0x00800000)
65
66/* Use paired MAS registers (MAS7||MAS3, etc.)
67 */
68#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
69
60#ifndef __ASSEMBLY__ 70#ifndef __ASSEMBLY__
61#include <asm/cputable.h> 71#include <asm/cputable.h>
62 72
@@ -73,6 +83,41 @@ extern void early_init_mmu_secondary(void);
73 83
74#endif /* !__ASSEMBLY__ */ 84#endif /* !__ASSEMBLY__ */
75 85
86/* The kernel use the constants below to index in the page sizes array.
87 * The use of fixed constants for this purpose is better for performances
88 * of the low level hash refill handlers.
89 *
90 * A non supported page size has a "shift" field set to 0
91 *
92 * Any new page size being implemented can get a new entry in here. Whether
93 * the kernel will use it or not is a different matter though. The actual page
94 * size used by hugetlbfs is not defined here and may be made variable
95 *
96 * Note: This array ended up being a false good idea as it's growing to the
97 * point where I wonder if we should replace it with something different,
98 * to think about, feedback welcome. --BenH.
99 */
100
101/* There are #define as they have to be used in assembly
102 *
103 * WARNING: If you change this list, make sure to update the array of
104 * names currently in arch/powerpc/mm/hugetlbpage.c or bad things will
105 * happen
106 */
107#define MMU_PAGE_4K 0
108#define MMU_PAGE_16K 1
109#define MMU_PAGE_64K 2
110#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
111#define MMU_PAGE_256K 4
112#define MMU_PAGE_1M 5
113#define MMU_PAGE_8M 6
114#define MMU_PAGE_16M 7
115#define MMU_PAGE_256M 8
116#define MMU_PAGE_1G 9
117#define MMU_PAGE_16G 10
118#define MMU_PAGE_64G 11
119#define MMU_PAGE_COUNT 12
120
76 121
77#if defined(CONFIG_PPC_STD_MMU_64) 122#if defined(CONFIG_PPC_STD_MMU_64)
78/* 64-bit classic hash table MMU */ 123/* 64-bit classic hash table MMU */
@@ -94,5 +139,6 @@ extern void early_init_mmu_secondary(void);
94# include <asm/mmu-8xx.h> 139# include <asm/mmu-8xx.h>
95#endif 140#endif
96 141
142
97#endif /* __KERNEL__ */ 143#endif /* __KERNEL__ */
98#endif /* _ASM_POWERPC_MMU_H_ */ 144#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b7063669f972..b34e94d94435 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -14,7 +14,6 @@
14/* 14/*
15 * Most if the context management is out of line 15 * Most if the context management is out of line
16 */ 16 */
17extern void mmu_context_init(void);
18extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 17extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
19extern void destroy_context(struct mm_struct *mm); 18extern void destroy_context(struct mm_struct *mm);
20 19
@@ -23,6 +22,12 @@ extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
23extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 22extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
24extern void set_context(unsigned long id, pgd_t *pgd); 23extern void set_context(unsigned long id, pgd_t *pgd);
25 24
25#ifdef CONFIG_PPC_BOOK3S_64
26static inline void mmu_context_init(void) { }
27#else
28extern void mmu_context_init(void);
29#endif
30
26/* 31/*
27 * switch_mm is the entry point called from the architecture independent 32 * switch_mm is the entry point called from the architecture independent
28 * code in kernel/sched.c 33 * code in kernel/sched.c
@@ -38,6 +43,10 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
38 tsk->thread.pgdir = next->pgd; 43 tsk->thread.pgdir = next->pgd;
39#endif /* CONFIG_PPC32 */ 44#endif /* CONFIG_PPC32 */
40 45
46 /* 64-bit Book3E keeps track of current PGD in the PACA */
47#ifdef CONFIG_PPC_BOOK3E_64
48 get_paca()->pgd = next->pgd;
49#endif
41 /* Nothing else to do if we aren't actually switching */ 50 /* Nothing else to do if we aren't actually switching */
42 if (prev == next) 51 if (prev == next)
43 return; 52 return;
@@ -84,6 +93,10 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
84static inline void enter_lazy_tlb(struct mm_struct *mm, 93static inline void enter_lazy_tlb(struct mm_struct *mm,
85 struct task_struct *tsk) 94 struct task_struct *tsk)
86{ 95{
96 /* 64-bit Book3E keeps track of current PGD in the PACA */
97#ifdef CONFIG_PPC_BOOK3E_64
98 get_paca()->pgd = NULL;
99#endif
87} 100}
88 101
89#endif /* __KERNEL__ */ 102#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index efde5ac82f7b..6c587eddee59 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -107,6 +107,9 @@ extern void pmac_xpram_write(int xpaddr, u8 data);
107/* Synchronize NVRAM */ 107/* Synchronize NVRAM */
108extern void nvram_sync(void); 108extern void nvram_sync(void);
109 109
110/* Determine NVRAM size */
111extern ssize_t nvram_get_size(void);
112
110/* Normal access to NVRAM */ 113/* Normal access to NVRAM */
111extern unsigned char nvram_read_byte(int i); 114extern unsigned char nvram_read_byte(int i);
112extern void nvram_write_byte(unsigned char c, int i); 115extern void nvram_write_byte(unsigned char c, int i);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index c8a3cbfe02ff..7d8514ceceae 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -14,9 +14,11 @@
14#define _ASM_POWERPC_PACA_H 14#define _ASM_POWERPC_PACA_H
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16 16
17#include <asm/types.h> 17#include <asm/types.h>
18#include <asm/lppaca.h> 18#include <asm/lppaca.h>
19#include <asm/mmu.h> 19#include <asm/mmu.h>
20#include <asm/page.h>
21#include <asm/exception-64e.h>
20 22
21register struct paca_struct *local_paca asm("r13"); 23register struct paca_struct *local_paca asm("r13");
22 24
@@ -91,6 +93,21 @@ struct paca_struct {
91 u16 slb_cache[SLB_CACHE_ENTRIES]; 93 u16 slb_cache[SLB_CACHE_ENTRIES];
92#endif /* CONFIG_PPC_STD_MMU_64 */ 94#endif /* CONFIG_PPC_STD_MMU_64 */
93 95
96#ifdef CONFIG_PPC_BOOK3E
97 pgd_t *pgd; /* Current PGD */
98 pgd_t *kernel_pgd; /* Kernel PGD */
99 u64 exgen[8] __attribute__((aligned(0x80)));
100 u64 extlb[EX_TLB_SIZE*3] __attribute__((aligned(0x80)));
101 u64 exmc[8]; /* used for machine checks */
102 u64 excrit[8]; /* used for crit interrupts */
103 u64 exdbg[8]; /* used for debug interrupts */
104
105 /* Kernel stack pointers for use by special exceptions */
106 void *mc_kstack;
107 void *crit_kstack;
108 void *dbg_kstack;
109#endif /* CONFIG_PPC_BOOK3E */
110
94 mm_context_t context; 111 mm_context_t context;
95 112
96 /* 113 /*
@@ -105,7 +122,7 @@ struct paca_struct {
105 u8 soft_enabled; /* irq soft-enable flag */ 122 u8 soft_enabled; /* irq soft-enable flag */
106 u8 hard_enabled; /* set if irqs are enabled in MSR */ 123 u8 hard_enabled; /* set if irqs are enabled in MSR */
107 u8 io_sync; /* writel() needs spin_unlock sync */ 124 u8 io_sync; /* writel() needs spin_unlock sync */
108 u8 perf_counter_pending; /* PM interrupt while soft-disabled */ 125 u8 perf_event_pending; /* PM interrupt while soft-disabled */
109 126
110 /* Stuff for accurate time accounting */ 127 /* Stuff for accurate time accounting */
111 u64 user_time; /* accumulated usermode TB ticks */ 128 u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 4940662ee87e..ff24254990e1 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -139,7 +139,11 @@ extern phys_addr_t kernstart_addr;
139 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for 139 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
140 * "kernelness", use is_kernel_addr() - it should do what you want. 140 * "kernelness", use is_kernel_addr() - it should do what you want.
141 */ 141 */
142#ifdef CONFIG_PPC_BOOK3E_64
143#define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
144#else
142#define is_kernel_addr(x) ((x) >= PAGE_OFFSET) 145#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
146#endif
143 147
144#ifndef __ASSEMBLY__ 148#ifndef __ASSEMBLY__
145 149
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 5817a3b747e5..3f17b83f55a1 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -135,12 +135,22 @@ extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
135#endif /* __ASSEMBLY__ */ 135#endif /* __ASSEMBLY__ */
136#else 136#else
137#define slice_init() 137#define slice_init()
138#ifdef CONFIG_PPC_STD_MMU_64
138#define get_slice_psize(mm, addr) ((mm)->context.user_psize) 139#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
139#define slice_set_user_psize(mm, psize) \ 140#define slice_set_user_psize(mm, psize) \
140do { \ 141do { \
141 (mm)->context.user_psize = (psize); \ 142 (mm)->context.user_psize = (psize); \
142 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ 143 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
143} while (0) 144} while (0)
145#else /* CONFIG_PPC_STD_MMU_64 */
146#ifdef CONFIG_PPC_64K_PAGES
147#define get_slice_psize(mm, addr) MMU_PAGE_64K
148#else /* CONFIG_PPC_64K_PAGES */
149#define get_slice_psize(mm, addr) MMU_PAGE_4K
150#endif /* !CONFIG_PPC_64K_PAGES */
151#define slice_set_user_psize(mm, psize) do { BUG(); } while(0)
152#endif /* !CONFIG_PPC_STD_MMU_64 */
153
144#define slice_set_range_psize(mm, start, len, psize) \ 154#define slice_set_range_psize(mm, start, len, psize) \
145 slice_set_user_psize((mm), (psize)) 155 slice_set_user_psize((mm), (psize))
146#define slice_mm_new_context(mm) 1 156#define slice_mm_new_context(mm) 1
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 4c61fa0b8d75..76e1f313a58e 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -77,9 +77,7 @@ struct pci_controller {
77 77
78 int first_busno; 78 int first_busno;
79 int last_busno; 79 int last_busno;
80#ifndef CONFIG_PPC64
81 int self_busno; 80 int self_busno;
82#endif
83 81
84 void __iomem *io_base_virt; 82 void __iomem *io_base_virt;
85#ifdef CONFIG_PPC64 83#ifdef CONFIG_PPC64
@@ -104,7 +102,6 @@ struct pci_controller {
104 unsigned int __iomem *cfg_addr; 102 unsigned int __iomem *cfg_addr;
105 void __iomem *cfg_data; 103 void __iomem *cfg_data;
106 104
107#ifndef CONFIG_PPC64
108 /* 105 /*
109 * Used for variants of PCI indirect handling and possible quirks: 106 * Used for variants of PCI indirect handling and possible quirks:
110 * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1 107 * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
@@ -128,7 +125,6 @@ struct pci_controller {
128#define PPC_INDIRECT_TYPE_BIG_ENDIAN 0x00000010 125#define PPC_INDIRECT_TYPE_BIG_ENDIAN 0x00000010
129#define PPC_INDIRECT_TYPE_BROKEN_MRM 0x00000020 126#define PPC_INDIRECT_TYPE_BROKEN_MRM 0x00000020
130 u32 indirect_type; 127 u32 indirect_type;
131#endif /* !CONFIG_PPC64 */
132 /* Currently, we limit ourselves to 1 IO range and 3 mem 128 /* Currently, we limit ourselves to 1 IO range and 3 mem
133 * ranges since the common pci_bus structure can't handle more 129 * ranges since the common pci_bus structure can't handle more
134 */ 130 */
@@ -146,21 +142,6 @@ struct pci_controller {
146#endif /* CONFIG_PPC64 */ 142#endif /* CONFIG_PPC64 */
147}; 143};
148 144
149#ifndef CONFIG_PPC64
150
151static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
152{
153 return bus->sysdata;
154}
155
156static inline int isa_vaddr_is_ioport(void __iomem *address)
157{
158 /* No specific ISA handling on ppc32 at this stage, it
159 * all goes through PCI
160 */
161 return 0;
162}
163
164/* These are used for config access before all the PCI probing 145/* These are used for config access before all the PCI probing
165 has been done. */ 146 has been done. */
166extern int early_read_config_byte(struct pci_controller *hose, int bus, 147extern int early_read_config_byte(struct pci_controller *hose, int bus,
@@ -182,6 +163,22 @@ extern int early_find_capability(struct pci_controller *hose, int bus,
182extern void setup_indirect_pci(struct pci_controller* hose, 163extern void setup_indirect_pci(struct pci_controller* hose,
183 resource_size_t cfg_addr, 164 resource_size_t cfg_addr,
184 resource_size_t cfg_data, u32 flags); 165 resource_size_t cfg_data, u32 flags);
166
167#ifndef CONFIG_PPC64
168
169static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
170{
171 return bus->sysdata;
172}
173
174static inline int isa_vaddr_is_ioport(void __iomem *address)
175{
176 /* No specific ISA handling on ppc32 at this stage, it
177 * all goes through PCI
178 */
179 return 0;
180}
181
185#else /* CONFIG_PPC64 */ 182#else /* CONFIG_PPC64 */
186 183
187/* 184/*
@@ -284,11 +281,6 @@ static inline int isa_vaddr_is_ioport(void __iomem *address)
284extern int pcibios_unmap_io_space(struct pci_bus *bus); 281extern int pcibios_unmap_io_space(struct pci_bus *bus);
285extern int pcibios_map_io_space(struct pci_bus *bus); 282extern int pcibios_map_io_space(struct pci_bus *bus);
286 283
287/* Return values for ppc_md.pci_probe_mode function */
288#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
289#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
290#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
291
292#ifdef CONFIG_NUMA 284#ifdef CONFIG_NUMA
293#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE)) 285#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE))
294#else 286#else
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index d9483c504d2d..b5ea626eea2d 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -22,6 +22,11 @@
22 22
23#include <asm-generic/pci-dma-compat.h> 23#include <asm-generic/pci-dma-compat.h>
24 24
25/* Return values for ppc_md.pci_probe_mode function */
26#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
27#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
28#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
29
25#define PCIBIOS_MIN_IO 0x1000 30#define PCIBIOS_MIN_IO 0x1000
26#define PCIBIOS_MIN_MEM 0x10000000 31#define PCIBIOS_MIN_MEM 0x10000000
27 32
@@ -40,7 +45,6 @@ struct pci_dev;
40 */ 45 */
41#define pcibios_assign_all_busses() \ 46#define pcibios_assign_all_busses() \
42 (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS)) 47 (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
43#define pcibios_scan_all_fns(a, b) 0
44 48
45static inline void pcibios_set_master(struct pci_dev *dev) 49static inline void pcibios_set_master(struct pci_dev *dev)
46{ 50{
@@ -61,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
61} 65}
62 66
63#ifdef CONFIG_PCI 67#ifdef CONFIG_PCI
64extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); 68extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
65extern struct dma_mapping_ops *get_pci_dma_ops(void); 69extern struct dma_map_ops *get_pci_dma_ops(void);
66#else /* CONFIG_PCI */ 70#else /* CONFIG_PCI */
67#define set_pci_dma_ops(d) 71#define set_pci_dma_ops(d)
68#define get_pci_dma_ops() NULL 72#define get_pci_dma_ops() NULL
@@ -228,6 +232,8 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
228 232
229extern void pcibios_setup_bus_devices(struct pci_bus *bus); 233extern void pcibios_setup_bus_devices(struct pci_bus *bus);
230extern void pcibios_setup_bus_self(struct pci_bus *bus); 234extern void pcibios_setup_bus_self(struct pci_bus *bus);
235extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
236extern void pcibios_scan_phb(struct pci_controller *hose, void *sysdata);
231 237
232#endif /* __KERNEL__ */ 238#endif /* __KERNEL__ */
233#endif /* __ASM_POWERPC_PCI_H */ 239#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_event.h
index 0ea0639fcf75..3288ce3997e0 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Performance counter support - PowerPC-specific definitions. 2 * Performance event support - PowerPC-specific definitions.
3 * 3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 * 5 *
@@ -12,7 +12,7 @@
12 12
13#include <asm/hw_irq.h> 13#include <asm/hw_irq.h>
14 14
15#define MAX_HWCOUNTERS 8 15#define MAX_HWEVENTS 8
16#define MAX_EVENT_ALTERNATIVES 8 16#define MAX_EVENT_ALTERNATIVES 8
17#define MAX_LIMITED_HWCOUNTERS 2 17#define MAX_LIMITED_HWCOUNTERS 2
18 18
@@ -28,12 +28,12 @@ struct power_pmu {
28 unsigned long test_adder; 28 unsigned long test_adder;
29 int (*compute_mmcr)(u64 events[], int n_ev, 29 int (*compute_mmcr)(u64 events[], int n_ev,
30 unsigned int hwc[], unsigned long mmcr[]); 30 unsigned int hwc[], unsigned long mmcr[]);
31 int (*get_constraint)(u64 event, unsigned long *mskp, 31 int (*get_constraint)(u64 event_id, unsigned long *mskp,
32 unsigned long *valp); 32 unsigned long *valp);
33 int (*get_alternatives)(u64 event, unsigned int flags, 33 int (*get_alternatives)(u64 event_id, unsigned int flags,
34 u64 alt[]); 34 u64 alt[]);
35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); 35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
36 int (*limited_pmc_event)(u64 event); 36 int (*limited_pmc_event)(u64 event_id);
37 u32 flags; 37 u32 flags;
38 int n_generic; 38 int n_generic;
39 int *generic_events; 39 int *generic_events;
@@ -61,10 +61,10 @@ struct pt_regs;
61extern unsigned long perf_misc_flags(struct pt_regs *regs); 61extern unsigned long perf_misc_flags(struct pt_regs *regs);
62extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 62extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
63 63
64#define PERF_COUNTER_INDEX_OFFSET 1 64#define PERF_EVENT_INDEX_OFFSET 1
65 65
66/* 66/*
67 * Only override the default definitions in include/linux/perf_counter.h 67 * Only override the default definitions in include/linux/perf_event.h
68 * if we have hardware PMU support. 68 * if we have hardware PMU support.
69 */ 69 */
70#ifdef CONFIG_PPC_PERF_CTRS 70#ifdef CONFIG_PPC_PERF_CTRS
@@ -73,14 +73,14 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
73 73
74/* 74/*
75 * The power_pmu.get_constraint function returns a 32/64-bit value and 75 * The power_pmu.get_constraint function returns a 32/64-bit value and
76 * a 32/64-bit mask that express the constraints between this event and 76 * a 32/64-bit mask that express the constraints between this event_id and
77 * other events. 77 * other events.
78 * 78 *
79 * The value and mask are divided up into (non-overlapping) bitfields 79 * The value and mask are divided up into (non-overlapping) bitfields
80 * of three different types: 80 * of three different types:
81 * 81 *
82 * Select field: this expresses the constraint that some set of bits 82 * Select field: this expresses the constraint that some set of bits
83 * in MMCR* needs to be set to a specific value for this event. For a 83 * in MMCR* needs to be set to a specific value for this event_id. For a
84 * select field, the mask contains 1s in every bit of the field, and 84 * select field, the mask contains 1s in every bit of the field, and
85 * the value contains a unique value for each possible setting of the 85 * the value contains a unique value for each possible setting of the
86 * MMCR* bits. The constraint checking code will ensure that two events 86 * MMCR* bits. The constraint checking code will ensure that two events
@@ -102,9 +102,9 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
102 * possible.) For N classes, the field is N+1 bits wide, and each class 102 * possible.) For N classes, the field is N+1 bits wide, and each class
103 * is assigned one bit from the least-significant N bits. The mask has 103 * is assigned one bit from the least-significant N bits. The mask has
104 * only the most-significant bit set, and the value has only the bit 104 * only the most-significant bit set, and the value has only the bit
105 * for the event's class set. The test_adder has the least significant 105 * for the event_id's class set. The test_adder has the least significant
106 * bit set in the field. 106 * bit set in the field.
107 * 107 *
108 * If an event is not subject to the constraint expressed by a particular 108 * If an event_id is not subject to the constraint expressed by a particular
109 * field, then it will have 0 in both the mask and value for that field. 109 * field, then it will have 0 in both the mask and value for that field.
110 */ 110 */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 1730e5e298d6..f2e812de7c3c 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -4,6 +4,15 @@
4 4
5#include <linux/mm.h> 5#include <linux/mm.h>
6 6
7#ifdef CONFIG_PPC_BOOK3E
8extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
9#else /* CONFIG_PPC_BOOK3E */
10static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
11 unsigned long address)
12{
13}
14#endif /* !CONFIG_PPC_BOOK3E */
15
7static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 16static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
8{ 17{
9 free_page((unsigned long)pte); 18 free_page((unsigned long)pte);
@@ -19,7 +28,12 @@ typedef struct pgtable_free {
19 unsigned long val; 28 unsigned long val;
20} pgtable_free_t; 29} pgtable_free_t;
21 30
22#define PGF_CACHENUM_MASK 0x7 31/* This needs to be big enough to allow for MMU_PAGE_COUNT + 2 to be stored
32 * and small enough to fit in the low bits of any naturally aligned page
33 * table cache entry. Arbitrarily set to 0x1f, that should give us some
34 * room to grow
35 */
36#define PGF_CACHENUM_MASK 0x1f
23 37
24static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, 38static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
25 unsigned long mask) 39 unsigned long mask)
@@ -35,19 +49,27 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
35#include <asm/pgalloc-32.h> 49#include <asm/pgalloc-32.h>
36#endif 50#endif
37 51
38extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
39
40#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
41#define __pte_free_tlb(tlb,ptepage,address) \ 53extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
42do { \ 54extern void pte_free_finish(void);
43 pgtable_page_dtor(ptepage); \ 55#else /* CONFIG_SMP */
44 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 56static inline void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
45 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ 57{
46} while (0) 58 pgtable_free(pgf);
47#else 59}
48#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte)) 60static inline void pte_free_finish(void) { }
49#endif 61#endif /* !CONFIG_SMP */
50 62
63static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
64 unsigned long address)
65{
66 pgtable_free_t pgf = pgtable_free_cache(page_address(ptepage),
67 PTE_NONCACHE_NUM,
68 PTE_TABLE_SIZE-1);
69 tlb_flush_pgtable(tlb, address);
70 pgtable_page_dtor(ptepage);
71 pgtable_free_tlb(tlb, pgf);
72}
51 73
52#endif /* __KERNEL__ */ 74#endif /* __KERNEL__ */
53#endif /* _ASM_POWERPC_PGALLOC_H */ 75#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index c9ff9d75990e..55646adfa843 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -111,6 +111,8 @@ extern int icache_44x_need_flush;
111#include <asm/pte-40x.h> 111#include <asm/pte-40x.h>
112#elif defined(CONFIG_44x) 112#elif defined(CONFIG_44x)
113#include <asm/pte-44x.h> 113#include <asm/pte-44x.h>
114#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
115#include <asm/pte-book3e.h>
114#elif defined(CONFIG_FSL_BOOKE) 116#elif defined(CONFIG_FSL_BOOKE)
115#include <asm/pte-fsl-booke.h> 117#include <asm/pte-fsl-booke.h>
116#elif defined(CONFIG_8xx) 118#elif defined(CONFIG_8xx)
@@ -186,7 +188,7 @@ static inline unsigned long pte_update(pte_t *p,
186#endif /* !PTE_ATOMIC_UPDATES */ 188#endif /* !PTE_ATOMIC_UPDATES */
187 189
188#ifdef CONFIG_44x 190#ifdef CONFIG_44x
189 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) 191 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
190 icache_44x_need_flush = 1; 192 icache_44x_need_flush = 1;
191#endif 193#endif
192 return old; 194 return old;
@@ -217,7 +219,7 @@ static inline unsigned long long pte_update(pte_t *p,
217#endif /* !PTE_ATOMIC_UPDATES */ 219#endif /* !PTE_ATOMIC_UPDATES */
218 220
219#ifdef CONFIG_44x 221#ifdef CONFIG_44x
220 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) 222 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
221 icache_44x_need_flush = 1; 223 icache_44x_need_flush = 1;
222#endif 224#endif
223 return old; 225 return old;
@@ -267,8 +269,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
267static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 269static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
268{ 270{
269 unsigned long bits = pte_val(entry) & 271 unsigned long bits = pte_val(entry) &
270 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | 272 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
271 _PAGE_HWEXEC | _PAGE_EXEC);
272 pte_update(ptep, 0, bits); 273 pte_update(ptep, 0, bits);
273} 274}
274 275
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
index 6cc085b945a5..90533ddcd703 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -10,10 +10,10 @@
10#define PGD_INDEX_SIZE 4 10#define PGD_INDEX_SIZE 4
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13
14#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) 13#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
15#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 14#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
16#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 15#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
16#endif /* __ASSEMBLY__ */
17 17
18#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 18#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
19#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 19#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
@@ -32,8 +32,6 @@
32#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 32#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
33#define PGDIR_MASK (~(PGDIR_SIZE-1)) 33#define PGDIR_MASK (~(PGDIR_SIZE-1))
34 34
35#endif /* __ASSEMBLY__ */
36
37/* Bits to mask out from a PMD to get to the PTE page */ 35/* Bits to mask out from a PMD to get to the PTE page */
38#define PMD_MASKED_BITS 0x1ff 36#define PMD_MASKED_BITS 0x1ff
39/* Bits to mask out from a PGD/PUD to get to the PMD page */ 37/* Bits to mask out from a PGD/PUD to get to the PMD page */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 8cd083c61503..806abe7a3fa5 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -5,11 +5,6 @@
5 * the ppc64 hashed page table. 5 * the ppc64 hashed page table.
6 */ 6 */
7 7
8#ifndef __ASSEMBLY__
9#include <linux/stddef.h>
10#include <asm/tlbflush.h>
11#endif /* __ASSEMBLY__ */
12
13#ifdef CONFIG_PPC_64K_PAGES 8#ifdef CONFIG_PPC_64K_PAGES
14#include <asm/pgtable-ppc64-64k.h> 9#include <asm/pgtable-ppc64-64k.h>
15#else 10#else
@@ -38,26 +33,47 @@
38#endif 33#endif
39 34
40/* 35/*
41 * Define the address range of the vmalloc VM area. 36 * Define the address range of the kernel non-linear virtual area
37 */
38
39#ifdef CONFIG_PPC_BOOK3E
40#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
41#else
42#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
43#endif
44#define KERN_VIRT_SIZE PGTABLE_RANGE
45
46/*
47 * The vmalloc space starts at the beginning of that region, and
48 * occupies half of it on hash CPUs and a quarter of it on Book3E
49 * (we keep a quarter for the virtual memmap)
42 */ 50 */
43#define VMALLOC_START ASM_CONST(0xD000000000000000) 51#define VMALLOC_START KERN_VIRT_START
44#define VMALLOC_SIZE (PGTABLE_RANGE >> 1) 52#ifdef CONFIG_PPC_BOOK3E
45#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 53#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2)
54#else
55#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
56#endif
57#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
46 58
47/* 59/*
48 * Define the address ranges for MMIO and IO space : 60 * The second half of the kernel virtual space is used for IO mappings,
61 * it's itself carved into the PIO region (ISA and PHB IO space) and
62 * the ioremap space
49 * 63 *
50 * ISA_IO_BASE = VMALLOC_END, 64K reserved area 64 * ISA_IO_BASE = KERN_IO_START, 64K reserved area
51 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces 65 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
52 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE 66 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
53 */ 67 */
68#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
54#define FULL_IO_SIZE 0x80000000ul 69#define FULL_IO_SIZE 0x80000000ul
55#define ISA_IO_BASE (VMALLOC_END) 70#define ISA_IO_BASE (KERN_IO_START)
56#define ISA_IO_END (VMALLOC_END + 0x10000ul) 71#define ISA_IO_END (KERN_IO_START + 0x10000ul)
57#define PHB_IO_BASE (ISA_IO_END) 72#define PHB_IO_BASE (ISA_IO_END)
58#define PHB_IO_END (VMALLOC_END + FULL_IO_SIZE) 73#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
59#define IOREMAP_BASE (PHB_IO_END) 74#define IOREMAP_BASE (PHB_IO_END)
60#define IOREMAP_END (VMALLOC_START + PGTABLE_RANGE) 75#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
76
61 77
62/* 78/*
63 * Region IDs 79 * Region IDs
@@ -68,23 +84,32 @@
68 84
69#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) 85#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
70#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) 86#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
71#define VMEMMAP_REGION_ID (0xfUL) 87#define VMEMMAP_REGION_ID (0xfUL) /* Server only */
72#define USER_REGION_ID (0UL) 88#define USER_REGION_ID (0UL)
73 89
74/* 90/*
75 * Defines the address of the vmemap area, in its own region 91 * Defines the address of the vmemap area, in its own region on
92 * hash table CPUs and after the vmalloc space on Book3E
76 */ 93 */
94#ifdef CONFIG_PPC_BOOK3E
95#define VMEMMAP_BASE VMALLOC_END
96#define VMEMMAP_END KERN_IO_START
97#else
77#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) 98#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
99#endif
78#define vmemmap ((struct page *)VMEMMAP_BASE) 100#define vmemmap ((struct page *)VMEMMAP_BASE)
79 101
80 102
81/* 103/*
82 * Include the PTE bits definitions 104 * Include the PTE bits definitions
83 */ 105 */
106#ifdef CONFIG_PPC_BOOK3S
84#include <asm/pte-hash64.h> 107#include <asm/pte-hash64.h>
108#else
109#include <asm/pte-book3e.h>
110#endif
85#include <asm/pte-common.h> 111#include <asm/pte-common.h>
86 112
87
88#ifdef CONFIG_PPC_MM_SLICES 113#ifdef CONFIG_PPC_MM_SLICES
89#define HAVE_ARCH_UNMAPPED_AREA 114#define HAVE_ARCH_UNMAPPED_AREA
90#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 115#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
@@ -92,6 +117,9 @@
92 117
93#ifndef __ASSEMBLY__ 118#ifndef __ASSEMBLY__
94 119
120#include <linux/stddef.h>
121#include <asm/tlbflush.h>
122
95/* 123/*
96 * This is the default implementation of various PTE accessors, it's 124 * This is the default implementation of various PTE accessors, it's
97 * used in all cases except Book3S with 64K pages where we have a 125 * used in all cases except Book3S with 64K pages where we have a
@@ -285,8 +313,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
285static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 313static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
286{ 314{
287 unsigned long bits = pte_val(entry) & 315 unsigned long bits = pte_val(entry) &
288 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | 316 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
289 _PAGE_EXEC | _PAGE_HWEXEC);
290 317
291#ifdef PTE_ATOMIC_UPDATES 318#ifdef PTE_ATOMIC_UPDATES
292 unsigned long old, tmp; 319 unsigned long old, tmp;
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index eb17da781128..2a5da069714e 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
104 else 104 else
105 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); 105 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
106 106
107#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) 107#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
108 /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we 108 /* Second case is 32-bit with 64-bit PTE. In this case, we
109 * can just store as long as we do the two halves in the right order 109 * can just store as long as we do the two halves in the right order
110 * with a barrier in between. This is possible because we take care, 110 * with a barrier in between. This is possible because we take care,
111 * in the hash code, to pre-invalidate if the PTE was already hashed, 111 * in the hash code, to pre-invalidate if the PTE was already hashed,
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
140 140
141#else 141#else
142 /* Anything else just stores the PTE normally. That covers all 64-bit 142 /* Anything else just stores the PTE normally. That covers all 64-bit
143 * cases, and 32-bit non-hash with 64-bit PTEs in UP mode 143 * cases, and 32-bit non-hash with 32-bit PTEs.
144 */ 144 */
145 *ptep = pte; 145 *ptep = pte;
146#endif 146#endif
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
index d6a616a1b3ea..ccc68b50d05d 100644
--- a/arch/powerpc/include/asm/pmc.h
+++ b/arch/powerpc/include/asm/pmc.h
@@ -27,10 +27,22 @@ extern perf_irq_t perf_irq;
27 27
28int reserve_pmc_hardware(perf_irq_t new_perf_irq); 28int reserve_pmc_hardware(perf_irq_t new_perf_irq);
29void release_pmc_hardware(void); 29void release_pmc_hardware(void);
30void ppc_enable_pmcs(void);
30 31
31#ifdef CONFIG_PPC64 32#ifdef CONFIG_PPC64
32void power4_enable_pmcs(void); 33#include <asm/lppaca.h>
33void pasemi_enable_pmcs(void); 34
35static inline void ppc_set_pmu_inuse(int inuse)
36{
37 get_lppaca()->pmcregs_in_use = inuse;
38}
39
40extern void power4_enable_pmcs(void);
41
42#else /* CONFIG_PPC64 */
43
44static inline void ppc_set_pmu_inuse(int inuse) { }
45
34#endif 46#endif
35 47
36#endif /* __KERNEL__ */ 48#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index b74f16d45cb4..ef9aa84cac5a 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -48,6 +48,8 @@
48#define PPC_INST_TLBIE 0x7c000264 48#define PPC_INST_TLBIE 0x7c000264
49#define PPC_INST_TLBILX 0x7c000024 49#define PPC_INST_TLBILX 0x7c000024
50#define PPC_INST_WAIT 0x7c00007c 50#define PPC_INST_WAIT 0x7c00007c
51#define PPC_INST_TLBIVAX 0x7c000624
52#define PPC_INST_TLBSRX_DOT 0x7c0006a5
51 53
52/* macros to insert fields into opcodes */ 54/* macros to insert fields into opcodes */
53#define __PPC_RA(a) (((a) & 0x1f) << 16) 55#define __PPC_RA(a) (((a) & 0x1f) << 16)
@@ -76,6 +78,10 @@
76 __PPC_WC(w)) 78 __PPC_WC(w))
77#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \ 79#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \
78 __PPC_RB(a) | __PPC_RS(lp)) 80 __PPC_RB(a) | __PPC_RS(lp))
81#define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \
82 __PPC_RA(a) | __PPC_RB(b))
83#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \
84 __PPC_RA(a) | __PPC_RB(b))
79 85
80/* 86/*
81 * Define what the VSX XX1 form instructions will look like, then add 87 * Define what the VSX XX1 form instructions will look like, then add
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 854ab713f56c..2828f9d0f66d 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -39,7 +39,6 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
39 39
40extern void pci_devs_phb_init(void); 40extern void pci_devs_phb_init(void);
41extern void pci_devs_phb_init_dynamic(struct pci_controller *phb); 41extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
42extern void scan_phb(struct pci_controller *hose);
43 42
44/* From rtas_pci.h */ 43/* From rtas_pci.h */
45extern void init_pci_config_tokens (void); 44extern void init_pci_config_tokens (void);
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index f9729529c20d..498fe09263d3 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -98,13 +98,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
98#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base) 98#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
99#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) 99#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
100 100
101#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base 101#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b
102#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) 102#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
103#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base) 103#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
104#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base) 104#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
105#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base) 105#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
106#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) 106#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
107#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base 107#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b
108#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) 108#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
109#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base) 109#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
110#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base) 110#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
@@ -112,26 +112,26 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
112#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) 112#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
113 113
114/* Save the lower 32 VSRs in the thread VSR region */ 114/* Save the lower 32 VSRs in the thread VSR region */
115#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,b,base) 115#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,base,b)
116#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) 116#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
117#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) 117#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
118#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) 118#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
119#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) 119#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
120#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) 120#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
121#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,b,base) 121#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,base,b)
122#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) 122#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
123#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) 123#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
124#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) 124#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
125#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) 125#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
126#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) 126#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
127/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */ 127/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
128#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,b,base) 128#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,base,b)
129#define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base) 129#define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
130#define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base) 130#define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
131#define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base) 131#define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
132#define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base) 132#define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
133#define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base) 133#define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base)
134#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,b,base) 134#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,base,b)
135#define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base) 135#define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
136#define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base) 136#define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
137#define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base) 137#define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
@@ -375,8 +375,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
375#define PPC440EP_ERR42 375#define PPC440EP_ERR42
376#endif 376#endif
377 377
378 378/*
379#if defined(CONFIG_BOOKE) 379 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
380 * keep the address intact to be compatible with code shared with
381 * 32-bit classic.
382 *
383 * On the other hand, I find it useful to have them behave as expected
384 * by their name (ie always do the addition) on 64-bit BookE
385 */
386#if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
380#define toreal(rd) 387#define toreal(rd)
381#define fromreal(rd) 388#define fromreal(rd)
382 389
@@ -426,10 +433,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
426 .previous 433 .previous
427#endif 434#endif
428 435
429#ifdef CONFIG_PPC64 436#ifdef CONFIG_PPC_BOOK3S_64
430#define RFI rfid 437#define RFI rfid
431#define MTMSRD(r) mtmsrd r 438#define MTMSRD(r) mtmsrd r
432
433#else 439#else
434#define FIX_SRR1(ra, rb) 440#define FIX_SRR1(ra, rb)
435#ifndef CONFIG_40x 441#ifndef CONFIG_40x
diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/pte-40x.h
index 07630faae029..6c3e1f4378d4 100644
--- a/arch/powerpc/include/asm/pte-40x.h
+++ b/arch/powerpc/include/asm/pte-40x.h
@@ -46,7 +46,7 @@
46#define _PAGE_RW 0x040 /* software: Writes permitted */ 46#define _PAGE_RW 0x040 /* software: Writes permitted */
47#define _PAGE_DIRTY 0x080 /* software: dirty page */ 47#define _PAGE_DIRTY 0x080 /* software: dirty page */
48#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ 48#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
49#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */ 49#define _PAGE_EXEC 0x200 /* hardware: EX permission */
50#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ 50#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
51 51
52#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ 52#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
diff --git a/arch/powerpc/include/asm/pte-44x.h b/arch/powerpc/include/asm/pte-44x.h
index 37e98bcf83e0..4192b9bad901 100644
--- a/arch/powerpc/include/asm/pte-44x.h
+++ b/arch/powerpc/include/asm/pte-44x.h
@@ -78,7 +78,7 @@
78#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ 78#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
79#define _PAGE_RW 0x00000002 /* S: Write permission */ 79#define _PAGE_RW 0x00000002 /* S: Write permission */
80#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ 80#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
81#define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */ 81#define _PAGE_EXEC 0x00000004 /* H: Execute permission */
82#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ 82#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
83#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ 83#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
84#define _PAGE_SPECIAL 0x00000020 /* S: Special page */ 84#define _PAGE_SPECIAL 0x00000020 /* S: Special page */
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
index 8c6e31251034..94e979718dcf 100644
--- a/arch/powerpc/include/asm/pte-8xx.h
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -36,7 +36,6 @@
36/* These five software bits must be masked out when the entry is loaded 36/* These five software bits must be masked out when the entry is loaded
37 * into the TLB. 37 * into the TLB.
38 */ 38 */
39#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */
40#define _PAGE_GUARDED 0x0010 /* software: guarded access */ 39#define _PAGE_GUARDED 0x0010 /* software: guarded access */
41#define _PAGE_DIRTY 0x0020 /* software: page changed */ 40#define _PAGE_DIRTY 0x0020 /* software: page changed */
42#define _PAGE_RW 0x0040 /* software: user write access allowed */ 41#define _PAGE_RW 0x0040 /* software: user write access allowed */
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
new file mode 100644
index 000000000000..082d515930a2
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -0,0 +1,84 @@
1#ifndef _ASM_POWERPC_PTE_BOOK3E_H
2#define _ASM_POWERPC_PTE_BOOK3E_H
3#ifdef __KERNEL__
4
5/* PTE bit definitions for processors compliant to the Book3E
6 * architecture 2.06 or later. The position of the PTE bits
7 * matches the HW definition of the optional Embedded Page Table
8 * category.
9 */
10
11/* Architected bits */
12#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */
13#define _PAGE_FILE 0x000002 /* (!present only) software: pte holds file offset */
14#define _PAGE_SW1 0x000002
15#define _PAGE_BAP_SR 0x000004
16#define _PAGE_BAP_UR 0x000008
17#define _PAGE_BAP_SW 0x000010
18#define _PAGE_BAP_UW 0x000020
19#define _PAGE_BAP_SX 0x000040
20#define _PAGE_BAP_UX 0x000080
21#define _PAGE_PSIZE_MSK 0x000f00
22#define _PAGE_PSIZE_4K 0x000200
23#define _PAGE_PSIZE_8K 0x000300
24#define _PAGE_PSIZE_16K 0x000400
25#define _PAGE_PSIZE_32K 0x000500
26#define _PAGE_PSIZE_64K 0x000600
27#define _PAGE_PSIZE_128K 0x000700
28#define _PAGE_PSIZE_256K 0x000800
29#define _PAGE_PSIZE_512K 0x000900
30#define _PAGE_PSIZE_1M 0x000a00
31#define _PAGE_PSIZE_2M 0x000b00
32#define _PAGE_PSIZE_4M 0x000c00
33#define _PAGE_PSIZE_8M 0x000d00
34#define _PAGE_PSIZE_16M 0x000e00
35#define _PAGE_PSIZE_32M 0x000f00
36#define _PAGE_DIRTY 0x001000 /* C: page changed */
37#define _PAGE_SW0 0x002000
38#define _PAGE_U3 0x004000
39#define _PAGE_U2 0x008000
40#define _PAGE_U1 0x010000
41#define _PAGE_U0 0x020000
42#define _PAGE_ACCESSED 0x040000
43#define _PAGE_LENDIAN 0x080000
44#define _PAGE_GUARDED 0x100000
45#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
46#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
47#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */
48
49/* "Higher level" linux bit combinations */
50#define _PAGE_EXEC _PAGE_BAP_UX /* .. and was cache cleaned */
51#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
52#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
53#define _PAGE_KERNEL_RO (_PAGE_BAP_SR)
54#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
55#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX)
56#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
57
58#define _PAGE_HASHPTE 0
59#define _PAGE_BUSY 0
60
61#define _PAGE_SPECIAL _PAGE_SW0
62
63/* Flags to be preserved on PTE modifications */
64#define _PAGE_HPTEFLAGS _PAGE_BUSY
65
66/* Base page size */
67#ifdef CONFIG_PPC_64K_PAGES
68#define _PAGE_PSIZE _PAGE_PSIZE_64K
69#define PTE_RPN_SHIFT (28)
70#else
71#define _PAGE_PSIZE _PAGE_PSIZE_4K
72#define PTE_RPN_SHIFT (24)
73#endif
74
75/* On 32-bit, we never clear the top part of the PTE */
76#ifdef CONFIG_PPC32
77#define _PTE_NONE_MASK 0xffffffff00000000ULL
78#define _PMD_PRESENT 0
79#define _PMD_PRESENT_MASK (PAGE_MASK)
80#define _PMD_BAD (~PAGE_MASK)
81#endif
82
83#endif /* __KERNEL__ */
84#endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index a7e210b6b48c..c3b65076a263 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -13,9 +13,6 @@
13#ifndef _PAGE_HWWRITE 13#ifndef _PAGE_HWWRITE
14#define _PAGE_HWWRITE 0 14#define _PAGE_HWWRITE 0
15#endif 15#endif
16#ifndef _PAGE_HWEXEC
17#define _PAGE_HWEXEC 0
18#endif
19#ifndef _PAGE_EXEC 16#ifndef _PAGE_EXEC
20#define _PAGE_EXEC 0 17#define _PAGE_EXEC 0
21#endif 18#endif
@@ -34,6 +31,9 @@
34#ifndef _PAGE_4K_PFN 31#ifndef _PAGE_4K_PFN
35#define _PAGE_4K_PFN 0 32#define _PAGE_4K_PFN 0
36#endif 33#endif
34#ifndef _PAGE_SAO
35#define _PAGE_SAO 0
36#endif
37#ifndef _PAGE_PSIZE 37#ifndef _PAGE_PSIZE
38#define _PAGE_PSIZE 0 38#define _PAGE_PSIZE 0
39#endif 39#endif
@@ -45,10 +45,16 @@
45#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() 45#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
46#endif 46#endif
47#ifndef _PAGE_KERNEL_RO 47#ifndef _PAGE_KERNEL_RO
48#define _PAGE_KERNEL_RO 0 48#define _PAGE_KERNEL_RO 0
49#endif
50#ifndef _PAGE_KERNEL_ROX
51#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
49#endif 52#endif
50#ifndef _PAGE_KERNEL_RW 53#ifndef _PAGE_KERNEL_RW
51#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) 54#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
55#endif
56#ifndef _PAGE_KERNEL_RWX
57#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
52#endif 58#endif
53#ifndef _PAGE_HPTEFLAGS 59#ifndef _PAGE_HPTEFLAGS
54#define _PAGE_HPTEFLAGS _PAGE_HASHPTE 60#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
@@ -93,8 +99,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
93#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ 99#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
94 _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \ 100 _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
95 _PAGE_USER | _PAGE_ACCESSED | \ 101 _PAGE_USER | _PAGE_ACCESSED | \
96 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ 102 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
97 _PAGE_EXEC | _PAGE_HWEXEC)
98 103
99/* 104/*
100 * We define 2 sets of base prot bits, one for basic pages (ie, 105 * We define 2 sets of base prot bits, one for basic pages (ie,
@@ -151,11 +156,9 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
151 _PAGE_NO_CACHE) 156 _PAGE_NO_CACHE)
152#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ 157#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
153 _PAGE_NO_CACHE | _PAGE_GUARDED) 158 _PAGE_NO_CACHE | _PAGE_GUARDED)
154#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC | \ 159#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
155 _PAGE_HWEXEC)
156#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) 160#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
157#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC | \ 161#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
158 _PAGE_HWEXEC)
159 162
160/* Protection used for kernel text. We want the debuggers to be able to 163/* Protection used for kernel text. We want the debuggers to be able to
161 * set breakpoints anywhere, so don't write protect the kernel text 164 * set breakpoints anywhere, so don't write protect the kernel text
diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/pte-fsl-booke.h
index 10820f58acf5..2c12be5f677a 100644
--- a/arch/powerpc/include/asm/pte-fsl-booke.h
+++ b/arch/powerpc/include/asm/pte-fsl-booke.h
@@ -23,7 +23,7 @@
23#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */ 23#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
24#define _PAGE_RW 0x00004 /* S: Write permission (SW) */ 24#define _PAGE_RW 0x00004 /* S: Write permission (SW) */
25#define _PAGE_DIRTY 0x00008 /* S: Page dirty */ 25#define _PAGE_DIRTY 0x00008 /* S: Page dirty */
26#define _PAGE_HWEXEC 0x00010 /* H: SX permission */ 26#define _PAGE_EXEC 0x00010 /* H: SX permission */
27#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */ 27#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */
28 28
29#define _PAGE_ENDIAN 0x00040 /* H: E bit */ 29#define _PAGE_ENDIAN 0x00040 /* H: E bit */
@@ -33,13 +33,6 @@
33#define _PAGE_WRITETHRU 0x00400 /* H: W bit */ 33#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
34#define _PAGE_SPECIAL 0x00800 /* S: Special page */ 34#define _PAGE_SPECIAL 0x00800 /* S: Special page */
35 35
36#ifdef CONFIG_PTE_64BIT
37/* ERPN in a PTE never gets cleared, ignore it */
38#define _PTE_NONE_MASK 0xffffffffffff0000ULL
39/* We extend the size of the PTE flags area when using 64-bit PTEs */
40#define PTE_RPN_SHIFT (PAGE_SHIFT + 8)
41#endif
42
43#define _PMD_PRESENT 0 36#define _PMD_PRESENT 0
44#define _PMD_PRESENT_MASK (PAGE_MASK) 37#define _PMD_PRESENT_MASK (PAGE_MASK)
45#define _PMD_BAD (~PAGE_MASK) 38#define _PMD_BAD (~PAGE_MASK)
diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
index 16e571c7f9ef..4aad4132d0a8 100644
--- a/arch/powerpc/include/asm/pte-hash32.h
+++ b/arch/powerpc/include/asm/pte-hash32.h
@@ -26,7 +26,6 @@
26#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ 26#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
27#define _PAGE_DIRTY 0x080 /* C: page changed */ 27#define _PAGE_DIRTY 0x080 /* C: page changed */
28#define _PAGE_ACCESSED 0x100 /* R: page referenced */ 28#define _PAGE_ACCESSED 0x100 /* R: page referenced */
29#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
30#define _PAGE_RW 0x400 /* software: user write access allowed */ 29#define _PAGE_RW 0x400 /* software: user write access allowed */
31#define _PAGE_SPECIAL 0x800 /* software: Special page */ 30#define _PAGE_SPECIAL 0x800 /* software: Special page */
32 31
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index 157c5ca581c8..f388f0ab193f 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -154,6 +154,7 @@ int qe_get_snum(void);
154void qe_put_snum(u8 snum); 154void qe_put_snum(u8 snum);
155unsigned int qe_get_num_of_risc(void); 155unsigned int qe_get_num_of_risc(void);
156unsigned int qe_get_num_of_snums(void); 156unsigned int qe_get_num_of_snums(void);
157int qe_alive_during_sleep(void);
157 158
158/* we actually use cpm_muram implementation, define this for convenience */ 159/* we actually use cpm_muram implementation, define this for convenience */
159#define qe_muram_init cpm_muram_init 160#define qe_muram_init cpm_muram_init
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1170267736d3..6315edc205d8 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -98,19 +98,15 @@
98#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */ 98#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
99#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */ 99#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
100 100
101#ifdef CONFIG_PPC64 101#if defined(CONFIG_PPC_BOOK3S_64)
102/* Server variant */
102#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV 103#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV
103#define MSR_KERNEL MSR_ | MSR_SF 104#define MSR_KERNEL MSR_ | MSR_SF
104
105#define MSR_USER32 MSR_ | MSR_PR | MSR_EE 105#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
106#define MSR_USER64 MSR_USER32 | MSR_SF 106#define MSR_USER64 MSR_USER32 | MSR_SF
107 107#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx)
108#else /* 32-bit */
109/* Default MSR for kernel mode. */ 108/* Default MSR for kernel mode. */
110#ifndef MSR_KERNEL /* reg_booke.h also defines this */
111#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) 109#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
112#endif
113
114#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) 110#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
115#endif 111#endif
116 112
@@ -646,6 +642,137 @@
646#endif 642#endif
647 643
648/* 644/*
645 * SPRG usage:
646 *
647 * All 64-bit:
648 * - SPRG1 stores PACA pointer
649 *
650 * 64-bit server:
651 * - SPRG0 unused (reserved for HV on Power4)
652 * - SPRG2 scratch for exception vectors
653 * - SPRG3 unused (user visible)
654 *
655 * 64-bit embedded
656 * - SPRG0 generic exception scratch
657 * - SPRG2 TLB exception stack
658 * - SPRG3 unused (user visible)
659 * - SPRG4 unused (user visible)
660 * - SPRG6 TLB miss scratch (user visible, sorry !)
661 * - SPRG7 critical exception scratch
662 * - SPRG8 machine check exception scratch
663 * - SPRG9 debug exception scratch
664 *
665 * All 32-bit:
666 * - SPRG3 current thread_info pointer
667 * (virtual on BookE, physical on others)
668 *
669 * 32-bit classic:
670 * - SPRG0 scratch for exception vectors
671 * - SPRG1 scratch for exception vectors
672 * - SPRG2 indicator that we are in RTAS
673 * - SPRG4 (603 only) pseudo TLB LRU data
674 *
675 * 32-bit 40x:
676 * - SPRG0 scratch for exception vectors
677 * - SPRG1 scratch for exception vectors
678 * - SPRG2 scratch for exception vectors
679 * - SPRG4 scratch for exception vectors (not 403)
680 * - SPRG5 scratch for exception vectors (not 403)
681 * - SPRG6 scratch for exception vectors (not 403)
682 * - SPRG7 scratch for exception vectors (not 403)
683 *
684 * 32-bit 440 and FSL BookE:
685 * - SPRG0 scratch for exception vectors
686 * - SPRG1 scratch for exception vectors (*)
687 * - SPRG2 scratch for crit interrupts handler
688 * - SPRG4 scratch for exception vectors
689 * - SPRG5 scratch for exception vectors
690 * - SPRG6 scratch for machine check handler
691 * - SPRG7 scratch for exception vectors
692 * - SPRG9 scratch for debug vectors (e500 only)
693 *
694 * Additionally, BookE separates "read" and "write"
695 * of those registers. That allows to use the userspace
696 * readable variant for reads, which can avoid a fault
697 * with KVM type virtualization.
698 *
699 * (*) Under KVM, the host SPRG1 is used to point to
700 * the current VCPU data structure
701 *
702 * 32-bit 8xx:
703 * - SPRG0 scratch for exception vectors
704 * - SPRG1 scratch for exception vectors
705 * - SPRG2 apparently unused but initialized
706 *
707 */
708#ifdef CONFIG_PPC64
709#define SPRN_SPRG_PACA SPRN_SPRG1
710#else
711#define SPRN_SPRG_THREAD SPRN_SPRG3
712#endif
713
714#ifdef CONFIG_PPC_BOOK3S_64
715#define SPRN_SPRG_SCRATCH0 SPRN_SPRG2
716#endif
717
718#ifdef CONFIG_PPC_BOOK3E_64
719#define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8
720#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG7
721#define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9
722#define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2
723#define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6
724#define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0
725#endif
726
727#ifdef CONFIG_PPC_BOOK3S_32
728#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
729#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
730#define SPRN_SPRG_RTAS SPRN_SPRG2
731#define SPRN_SPRG_603_LRU SPRN_SPRG4
732#endif
733
734#ifdef CONFIG_40x
735#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
736#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
737#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2
738#define SPRN_SPRG_SCRATCH3 SPRN_SPRG4
739#define SPRN_SPRG_SCRATCH4 SPRN_SPRG5
740#define SPRN_SPRG_SCRATCH5 SPRN_SPRG6
741#define SPRN_SPRG_SCRATCH6 SPRN_SPRG7
742#endif
743
744#ifdef CONFIG_BOOKE
745#define SPRN_SPRG_RSCRATCH0 SPRN_SPRG0
746#define SPRN_SPRG_WSCRATCH0 SPRN_SPRG0
747#define SPRN_SPRG_RSCRATCH1 SPRN_SPRG1
748#define SPRN_SPRG_WSCRATCH1 SPRN_SPRG1
749#define SPRN_SPRG_RSCRATCH_CRIT SPRN_SPRG2
750#define SPRN_SPRG_WSCRATCH_CRIT SPRN_SPRG2
751#define SPRN_SPRG_RSCRATCH2 SPRN_SPRG4R
752#define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W
753#define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R
754#define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W
755#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG6R
756#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG6W
757#define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R
758#define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W
759#ifdef CONFIG_E200
760#define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG6R
761#define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG6W
762#else
763#define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9
764#define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9
765#endif
766#define SPRN_SPRG_RVCPU SPRN_SPRG1
767#define SPRN_SPRG_WVCPU SPRN_SPRG1
768#endif
769
770#ifdef CONFIG_8xx
771#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
772#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
773#endif
774
775/*
649 * An mtfsf instruction with the L bit set. On CPUs that support this a 776 * An mtfsf instruction with the L bit set. On CPUs that support this a
650 * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored. 777 * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored.
651 * 778 *
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 6bcf364cbb2f..3bf783505528 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -18,18 +18,26 @@
18#define MSR_IS MSR_IR /* Instruction Space */ 18#define MSR_IS MSR_IR /* Instruction Space */
19#define MSR_DS MSR_DR /* Data Space */ 19#define MSR_DS MSR_DR /* Data Space */
20#define MSR_PMM (1<<2) /* Performance monitor mark bit */ 20#define MSR_PMM (1<<2) /* Performance monitor mark bit */
21#define MSR_CM (1<<31) /* Computation Mode (0=32-bit, 1=64-bit) */
21 22
22/* Default MSR for kernel mode. */ 23#if defined(CONFIG_PPC_BOOK3E_64)
23#if defined (CONFIG_40x) 24#define MSR_ MSR_ME | MSR_CE
25#define MSR_KERNEL MSR_ | MSR_CM
26#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
27#define MSR_USER64 MSR_USER32 | MSR_CM
28#elif defined (CONFIG_40x)
24#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) 29#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
25#elif defined(CONFIG_BOOKE) 30#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
31#else
26#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_CE) 32#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_CE)
33#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
27#endif 34#endif
28 35
29/* Special Purpose Registers (SPRNs)*/ 36/* Special Purpose Registers (SPRNs)*/
30#define SPRN_DECAR 0x036 /* Decrementer Auto Reload Register */ 37#define SPRN_DECAR 0x036 /* Decrementer Auto Reload Register */
31#define SPRN_IVPR 0x03F /* Interrupt Vector Prefix Register */ 38#define SPRN_IVPR 0x03F /* Interrupt Vector Prefix Register */
32#define SPRN_USPRG0 0x100 /* User Special Purpose Register General 0 */ 39#define SPRN_USPRG0 0x100 /* User Special Purpose Register General 0 */
40#define SPRN_SPRG3R 0x103 /* Special Purpose Register General 3 Read */
33#define SPRN_SPRG4R 0x104 /* Special Purpose Register General 4 Read */ 41#define SPRN_SPRG4R 0x104 /* Special Purpose Register General 4 Read */
34#define SPRN_SPRG5R 0x105 /* Special Purpose Register General 5 Read */ 42#define SPRN_SPRG5R 0x105 /* Special Purpose Register General 5 Read */
35#define SPRN_SPRG6R 0x106 /* Special Purpose Register General 6 Read */ 43#define SPRN_SPRG6R 0x106 /* Special Purpose Register General 6 Read */
@@ -38,11 +46,18 @@
38#define SPRN_SPRG5W 0x115 /* Special Purpose Register General 5 Write */ 46#define SPRN_SPRG5W 0x115 /* Special Purpose Register General 5 Write */
39#define SPRN_SPRG6W 0x116 /* Special Purpose Register General 6 Write */ 47#define SPRN_SPRG6W 0x116 /* Special Purpose Register General 6 Write */
40#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */ 48#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
49#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */
41#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */ 50#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
42#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */ 51#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
43#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */ 52#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
44#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */ 53#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */
45#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */ 54#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
55#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
56#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
57#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
58#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
59#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */
60#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */
46#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */ 61#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */
47#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */ 62#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */
48#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */ 63#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */
@@ -93,6 +108,8 @@
93#define SPRN_PID2 0x27A /* Process ID Register 2 */ 108#define SPRN_PID2 0x27A /* Process ID Register 2 */
94#define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */ 109#define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */
95#define SPRN_TLB1CFG 0x2B1 /* TLB 1 Config Register */ 110#define SPRN_TLB1CFG 0x2B1 /* TLB 1 Config Register */
111#define SPRN_TLB2CFG 0x2B2 /* TLB 2 Config Register */
112#define SPRN_TLB3CFG 0x2B3 /* TLB 3 Config Register */
96#define SPRN_EPR 0x2BE /* External Proxy Register */ 113#define SPRN_EPR 0x2BE /* External Proxy Register */
97#define SPRN_CCR1 0x378 /* Core Configuration Register 1 */ 114#define SPRN_CCR1 0x378 /* Core Configuration Register 1 */
98#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */ 115#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */
@@ -415,16 +432,31 @@
415#define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */ 432#define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */
416#define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */ 433#define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */
417 434
418/* Bit definitions for MMUCSR0 */
419#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
420#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
421#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
422#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
423
424/* Bit definitions for SGR. */ 435/* Bit definitions for SGR. */
425#define SGR_NORMAL 0 /* Speculative fetching allowed. */ 436#define SGR_NORMAL 0 /* Speculative fetching allowed. */
426#define SGR_GUARDED 1 /* Speculative fetching disallowed. */ 437#define SGR_GUARDED 1 /* Speculative fetching disallowed. */
427 438
439/* Bit definitions for EPCR */
440#define SPRN_EPCR_EXTGS 0x80000000 /* External Input interrupt
441 * directed to Guest state */
442#define SPRN_EPCR_DTLBGS 0x40000000 /* Data TLB Error interrupt
443 * directed to guest state */
444#define SPRN_EPCR_ITLBGS 0x20000000 /* Instr. TLB error interrupt
445 * directed to guest state */
446#define SPRN_EPCR_DSIGS 0x10000000 /* Data Storage interrupt
447 * directed to guest state */
448#define SPRN_EPCR_ISIGS 0x08000000 /* Instr. Storage interrupt
449 * directed to guest state */
450#define SPRN_EPCR_DUVD 0x04000000 /* Disable Hypervisor Debug */
451#define SPRN_EPCR_ICM 0x02000000 /* Interrupt computation mode
452 * (copied to MSR:CM on intr) */
453#define SPRN_EPCR_GICM 0x01000000 /* Guest Interrupt Comp. mode */
454#define SPRN_EPCR_DGTMI 0x00800000 /* Disable TLB Guest Management
455 * instructions */
456#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates
457 * for hypervisor */
458
459
428/* 460/*
429 * The IBM-403 is an even more odd special case, as it is much 461 * The IBM-403 is an even more odd special case, as it is much
430 * older than the IBM-405 series. We put these down here incase someone 462 * older than the IBM-405 series. We put these down here incase someone
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 817fac0a0714..dae19342f0b9 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -1,6 +1,6 @@
1#ifndef _ASM_POWERPC_SETUP_H 1#ifndef _ASM_POWERPC_SETUP_H
2#define _ASM_POWERPC_SETUP_H 2#define _ASM_POWERPC_SETUP_H
3 3
4#define COMMAND_LINE_SIZE 512 4#include <asm-generic/setup.h>
5 5
6#endif /* _ASM_POWERPC_SETUP_H */ 6#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index c25f73d1d842..c0d3b8af9319 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -148,6 +148,16 @@ extern struct smp_ops_t *smp_ops;
148extern void arch_send_call_function_single_ipi(int cpu); 148extern void arch_send_call_function_single_ipi(int cpu);
149extern void arch_send_call_function_ipi(cpumask_t mask); 149extern void arch_send_call_function_ipi(cpumask_t mask);
150 150
151/* Definitions relative to the secondary CPU spin loop
152 * and entry point. Not all of them exist on both 32 and
153 * 64-bit but defining them all here doesn't harm
154 */
155extern void generic_secondary_smp_init(void);
156extern void generic_secondary_thread_init(void);
157extern unsigned long __secondary_hold_spinloop;
158extern unsigned long __secondary_hold_acknowledge;
159extern char __secondary_hold;
160
151#endif /* __ASSEMBLY__ */ 161#endif /* __ASSEMBLY__ */
152 162
153#endif /* __KERNEL__ */ 163#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h
index 1e5cfad0e3f7..3ab8b3e6feb0 100644
--- a/arch/powerpc/include/asm/socket.h
+++ b/arch/powerpc/include/asm/socket.h
@@ -64,4 +64,7 @@
64#define SO_TIMESTAMPING 37 64#define SO_TIMESTAMPING 37
65#define SCM_TIMESTAMPING SO_TIMESTAMPING 65#define SCM_TIMESTAMPING SO_TIMESTAMPING
66 66
67#define SO_PROTOCOL 38
68#define SO_DOMAIN 39
69
67#endif /* _ASM_POWERPC_SOCKET_H */ 70#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index c3b193121f81..198266cf9e2d 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -54,7 +54,7 @@
54 * This returns the old value in the lock, so we succeeded 54 * This returns the old value in the lock, so we succeeded
55 * in getting the lock if the return value is 0. 55 * in getting the lock if the return value is 0.
56 */ 56 */
57static inline unsigned long __spin_trylock(raw_spinlock_t *lock) 57static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
58{ 58{
59 unsigned long tmp, token; 59 unsigned long tmp, token;
60 60
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock)
76static inline int __raw_spin_trylock(raw_spinlock_t *lock) 76static inline int __raw_spin_trylock(raw_spinlock_t *lock)
77{ 77{
78 CLEAR_IO_SYNC; 78 CLEAR_IO_SYNC;
79 return __spin_trylock(lock) == 0; 79 return arch_spin_trylock(lock) == 0;
80} 80}
81 81
82/* 82/*
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
108{ 108{
109 CLEAR_IO_SYNC; 109 CLEAR_IO_SYNC;
110 while (1) { 110 while (1) {
111 if (likely(__spin_trylock(lock) == 0)) 111 if (likely(arch_spin_trylock(lock) == 0))
112 break; 112 break;
113 do { 113 do {
114 HMT_low(); 114 HMT_low();
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
126 126
127 CLEAR_IO_SYNC; 127 CLEAR_IO_SYNC;
128 while (1) { 128 while (1) {
129 if (likely(__spin_trylock(lock) == 0)) 129 if (likely(arch_spin_trylock(lock) == 0))
130 break; 130 break;
131 local_save_flags(flags_dis); 131 local_save_flags(flags_dis);
132 local_irq_restore(flags); 132 local_irq_restore(flags);
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
181 * This returns the old value in the lock + 1, 181 * This returns the old value in the lock + 1,
182 * so we got a read lock if the return value is > 0. 182 * so we got a read lock if the return value is > 0.
183 */ 183 */
184static inline long __read_trylock(raw_rwlock_t *rw) 184static inline long arch_read_trylock(raw_rwlock_t *rw)
185{ 185{
186 long tmp; 186 long tmp;
187 187
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw)
205 * This returns the old value in the lock, 205 * This returns the old value in the lock,
206 * so we got the write lock if the return value is 0. 206 * so we got the write lock if the return value is 0.
207 */ 207 */
208static inline long __write_trylock(raw_rwlock_t *rw) 208static inline long arch_write_trylock(raw_rwlock_t *rw)
209{ 209{
210 long tmp, token; 210 long tmp, token;
211 211
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw)
228static inline void __raw_read_lock(raw_rwlock_t *rw) 228static inline void __raw_read_lock(raw_rwlock_t *rw)
229{ 229{
230 while (1) { 230 while (1) {
231 if (likely(__read_trylock(rw) > 0)) 231 if (likely(arch_read_trylock(rw) > 0))
232 break; 232 break;
233 do { 233 do {
234 HMT_low(); 234 HMT_low();
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
242static inline void __raw_write_lock(raw_rwlock_t *rw) 242static inline void __raw_write_lock(raw_rwlock_t *rw)
243{ 243{
244 while (1) { 244 while (1) {
245 if (likely(__write_trylock(rw) == 0)) 245 if (likely(arch_write_trylock(rw) == 0))
246 break; 246 break;
247 do { 247 do {
248 HMT_low(); 248 HMT_low();
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
255 255
256static inline int __raw_read_trylock(raw_rwlock_t *rw) 256static inline int __raw_read_trylock(raw_rwlock_t *rw)
257{ 257{
258 return __read_trylock(rw) > 0; 258 return arch_read_trylock(rw) > 0;
259} 259}
260 260
261static inline int __raw_write_trylock(raw_rwlock_t *rw) 261static inline int __raw_write_trylock(raw_rwlock_t *rw)
262{ 262{
263 return __write_trylock(rw) == 0; 263 return arch_write_trylock(rw) == 0;
264} 264}
265 265
266static inline void __raw_read_unlock(raw_rwlock_t *rw) 266static inline void __raw_read_unlock(raw_rwlock_t *rw)
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
index 30891d6e2bc1..8979d4cd3d70 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -13,15 +13,13 @@
13 13
14#include <linux/swiotlb.h> 14#include <linux/swiotlb.h>
15 15
16extern struct dma_mapping_ops swiotlb_dma_ops; 16extern struct dma_map_ops swiotlb_dma_ops;
17extern struct dma_mapping_ops swiotlb_pci_dma_ops;
18
19int swiotlb_arch_address_needs_mapping(struct device *, dma_addr_t,
20 size_t size);
21 17
22static inline void dma_mark_clean(void *addr, size_t size) {} 18static inline void dma_mark_clean(void *addr, size_t size) {}
23 19
24extern unsigned int ppc_swiotlb_enable; 20extern unsigned int ppc_swiotlb_enable;
25int __init swiotlb_setup_bus_notifier(void); 21int __init swiotlb_setup_bus_notifier(void);
26 22
23extern void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev);
24
27#endif /* __ASM_SWIOTLB_H */ 25#endif /* __ASM_SWIOTLB_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 370600ca2765..c7d671a7d9a1 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -95,8 +95,8 @@ SYSCALL(reboot)
95SYSX(sys_ni_syscall,compat_sys_old_readdir,sys_old_readdir) 95SYSX(sys_ni_syscall,compat_sys_old_readdir,sys_old_readdir)
96SYSCALL_SPU(mmap) 96SYSCALL_SPU(mmap)
97SYSCALL_SPU(munmap) 97SYSCALL_SPU(munmap)
98SYSCALL_SPU(truncate) 98COMPAT_SYS_SPU(truncate)
99SYSCALL_SPU(ftruncate) 99COMPAT_SYS_SPU(ftruncate)
100SYSCALL_SPU(fchmod) 100SYSCALL_SPU(fchmod)
101SYSCALL_SPU(fchown) 101SYSCALL_SPU(fchown)
102COMPAT_SYS_SPU(getpriority) 102COMPAT_SYS_SPU(getpriority)
@@ -322,7 +322,7 @@ SYSCALL_SPU(epoll_create1)
322SYSCALL_SPU(dup3) 322SYSCALL_SPU(dup3)
323SYSCALL_SPU(pipe2) 323SYSCALL_SPU(pipe2)
324SYSCALL(inotify_init1) 324SYSCALL(inotify_init1)
325SYSCALL_SPU(perf_counter_open) 325SYSCALL_SPU(perf_event_open)
326COMPAT_SYS_SPU(preadv) 326COMPAT_SYS_SPU(preadv)
327COMPAT_SYS_SPU(pwritev) 327COMPAT_SYS_SPU(pwritev)
328COMPAT_SYS(rt_tgsigqueueinfo) 328COMPAT_SYS(rt_tgsigqueueinfo)
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e20ff7541f36..e2b428b0f7ba 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -25,57 +25,25 @@
25 25
26#include <linux/pagemap.h> 26#include <linux/pagemap.h>
27 27
28struct mmu_gather;
29
30#define tlb_start_vma(tlb, vma) do { } while (0) 28#define tlb_start_vma(tlb, vma) do { } while (0)
31#define tlb_end_vma(tlb, vma) do { } while (0) 29#define tlb_end_vma(tlb, vma) do { } while (0)
32 30
33#if !defined(CONFIG_PPC_STD_MMU)
34
35#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
36
37#elif defined(__powerpc64__)
38
39extern void pte_free_finish(void);
40
41static inline void tlb_flush(struct mmu_gather *tlb)
42{
43 struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
44
45 /* If there's a TLB batch pending, then we must flush it because the
46 * pages are going to be freed and we really don't want to have a CPU
47 * access a freed page because it has a stale TLB
48 */
49 if (tlbbatch->index)
50 __flush_tlb_pending(tlbbatch);
51
52 pte_free_finish();
53}
54
55#else
56
57extern void tlb_flush(struct mmu_gather *tlb); 31extern void tlb_flush(struct mmu_gather *tlb);
58 32
59#endif
60
61/* Get the generic bits... */ 33/* Get the generic bits... */
62#include <asm-generic/tlb.h> 34#include <asm-generic/tlb.h>
63 35
64#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
65
66#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
67
68#else
69extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, 36extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
70 unsigned long address); 37 unsigned long address);
71 38
72static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, 39static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
73 unsigned long address) 40 unsigned long address)
74{ 41{
42#ifdef CONFIG_PPC_STD_MMU_32
75 if (pte_val(*ptep) & _PAGE_HASHPTE) 43 if (pte_val(*ptep) & _PAGE_HASHPTE)
76 flush_hash_entry(tlb->mm, ptep, address); 44 flush_hash_entry(tlb->mm, ptep, address);
45#endif
77} 46}
78 47
79#endif
80#endif /* __KERNEL__ */ 48#endif /* __KERNEL__ */
81#endif /* __ASM_POWERPC_TLB_H */ 49#endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index abbe3419d1dd..d50a380b2b6f 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -6,7 +6,7 @@
6 * 6 *
7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
8 * - flush_tlb_page(vma, vmaddr) flushes one page 8 * - flush_tlb_page(vma, vmaddr) flushes one page
9 * - local_flush_tlb_mm(mm) flushes the specified mm context on 9 * - local_flush_tlb_mm(mm, full) flushes the specified mm context on
10 * the local processor 10 * the local processor
11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor 11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
@@ -29,7 +29,8 @@
29 * specific tlbie's 29 * specific tlbie's
30 */ 30 */
31 31
32#include <linux/mm.h> 32struct vm_area_struct;
33struct mm_struct;
33 34
34#define MMU_NO_CONTEXT ((unsigned int)-1) 35#define MMU_NO_CONTEXT ((unsigned int)-1)
35 36
@@ -40,12 +41,18 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
40extern void local_flush_tlb_mm(struct mm_struct *mm); 41extern void local_flush_tlb_mm(struct mm_struct *mm);
41extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 42extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
42 43
44extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
45 int tsize, int ind);
46
43#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
44extern void flush_tlb_mm(struct mm_struct *mm); 48extern void flush_tlb_mm(struct mm_struct *mm);
45extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 49extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
50extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
51 int tsize, int ind);
46#else 52#else
47#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 53#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
48#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) 54#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
55#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
49#endif 56#endif
50#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) 57#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
51 58
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 054a16d68082..394edcbcce71 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -57,14 +57,13 @@ static inline int pcibus_to_node(struct pci_bus *bus)
57 .cache_nice_tries = 1, \ 57 .cache_nice_tries = 1, \
58 .busy_idx = 3, \ 58 .busy_idx = 3, \
59 .idle_idx = 1, \ 59 .idle_idx = 1, \
60 .newidle_idx = 2, \ 60 .newidle_idx = 0, \
61 .wake_idx = 1, \ 61 .wake_idx = 0, \
62 .flags = SD_LOAD_BALANCE \ 62 .flags = SD_LOAD_BALANCE \
63 | SD_BALANCE_EXEC \ 63 | SD_BALANCE_EXEC \
64 | SD_BALANCE_FORK \
64 | SD_BALANCE_NEWIDLE \ 65 | SD_BALANCE_NEWIDLE \
65 | SD_WAKE_IDLE \ 66 | SD_SERIALIZE, \
66 | SD_SERIALIZE \
67 | SD_WAKE_BALANCE, \
68 .last_balance = jiffies, \ 67 .last_balance = jiffies, \
69 .balance_interval = 1, \ 68 .balance_interval = 1, \
70 .nr_balance_failed = 0, \ 69 .nr_balance_failed = 0, \
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index cef080bfc607..f6ca76176766 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -341,7 +341,7 @@
341#define __NR_dup3 316 341#define __NR_dup3 316
342#define __NR_pipe2 317 342#define __NR_pipe2 317
343#define __NR_inotify_init1 318 343#define __NR_inotify_init1 318
344#define __NR_perf_counter_open 319 344#define __NR_perf_event_open 319
345#define __NR_preadv 320 345#define __NR_preadv 320
346#define __NR_pwritev 321 346#define __NR_pwritev 321
347#define __NR_rt_tgsigqueueinfo 322 347#define __NR_rt_tgsigqueueinfo 322
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index 26fc449bd989..dc0419b66f17 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -7,9 +7,8 @@
7#define VDSO32_LBASE 0x100000 7#define VDSO32_LBASE 0x100000
8#define VDSO64_LBASE 0x100000 8#define VDSO64_LBASE 0x100000
9 9
10/* Default map addresses */ 10/* Default map addresses for 32bit vDSO */
11#define VDSO32_MBASE VDSO32_LBASE 11#define VDSO32_MBASE VDSO32_LBASE
12#define VDSO64_MBASE VDSO64_LBASE
13 12
14#define VDSO_VERSION_STRING LINUX_2.6.15 13#define VDSO_VERSION_STRING LINUX_2.6.15
15 14