aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-12-31 20:34:16 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-03 12:53:31 -0500
commit7eb19553369c46cc1fa64caf120cbcab1b597f7c (patch)
treeef1a3beae706b9497c845d0a2557ceb4d2754998 /arch/powerpc/include
parent6092848a2a23b660150a38bc06f59d75838d70c8 (diff)
parent8c384cdee3e04d6194a2c2b192b624754f990835 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask into merge-rr-cpumask
Conflicts: arch/x86/kernel/io_apic.c kernel/rcuclassic.c kernel/sched.c kernel/time/tick-sched.c Signed-off-by: Mike Travis <travis@sgi.com> [ mingo@elte.hu: backmerged typo fix for io_apic.c ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/atomic.h18
-rw-r--r--arch/powerpc/include/asm/bug.h11
-rw-r--r--arch/powerpc/include/asm/byteorder.h38
-rw-r--r--arch/powerpc/include/asm/cputable.h113
-rw-r--r--arch/powerpc/include/asm/dcr-native.h63
-rw-r--r--arch/powerpc/include/asm/dcr.h4
-rw-r--r--arch/powerpc/include/asm/device.h12
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h156
-rw-r--r--arch/powerpc/include/asm/eeh.h8
-rw-r--r--arch/powerpc/include/asm/elf.h2
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h30
-rw-r--r--arch/powerpc/include/asm/highmem.h23
-rw-r--r--arch/powerpc/include/asm/io.h7
-rw-r--r--arch/powerpc/include/asm/kdump.h13
-rw-r--r--arch/powerpc/include/asm/kexec.h15
-rw-r--r--arch/powerpc/include/asm/local.h4
-rw-r--r--arch/powerpc/include/asm/lppaca.h3
-rw-r--r--arch/powerpc/include/asm/mmu-40x.h5
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h22
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/mmu-fsl-booke.h7
-rw-r--r--arch/powerpc/include/asm/mmu.h57
-rw-r--r--arch/powerpc/include/asm/mmu_context.h257
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h19
-rw-r--r--arch/powerpc/include/asm/mpc52xx_psc.h11
-rw-r--r--arch/powerpc/include/asm/mutex.h135
-rw-r--r--arch/powerpc/include/asm/page.h13
-rw-r--r--arch/powerpc/include/asm/page_32.h7
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h30
-rw-r--r--arch/powerpc/include/asm/pci.h15
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h11
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h34
-rw-r--r--arch/powerpc/include/asm/pgalloc.h41
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h42
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h15
-rw-r--r--arch/powerpc/include/asm/pgtable.h26
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h4
-rw-r--r--arch/powerpc/include/asm/processor.h8
-rw-r--r--arch/powerpc/include/asm/prom.h3
-rw-r--r--arch/powerpc/include/asm/ps3.h56
-rw-r--r--arch/powerpc/include/asm/ps3av.h4
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/rtas.h1
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h58
-rw-r--r--arch/powerpc/include/asm/smp.h7
-rw-r--r--arch/powerpc/include/asm/spinlock.h2
-rw-r--r--arch/powerpc/include/asm/synch.h4
-rw-r--r--arch/powerpc/include/asm/system.h24
-rw-r--r--arch/powerpc/include/asm/time.h20
-rw-r--r--arch/powerpc/include/asm/tlbflush.h87
-rw-r--r--arch/powerpc/include/asm/topology.h12
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h3
52 files changed, 917 insertions, 650 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index f3fc733758f5..499be5bdd6fa 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -111,7 +111,7 @@ static __inline__ void atomic_inc(atomic_t *v)
111 bne- 1b" 111 bne- 1b"
112 : "=&r" (t), "+m" (v->counter) 112 : "=&r" (t), "+m" (v->counter)
113 : "r" (&v->counter) 113 : "r" (&v->counter)
114 : "cc"); 114 : "cc", "xer");
115} 115}
116 116
117static __inline__ int atomic_inc_return(atomic_t *v) 117static __inline__ int atomic_inc_return(atomic_t *v)
@@ -128,7 +128,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
128 ISYNC_ON_SMP 128 ISYNC_ON_SMP
129 : "=&r" (t) 129 : "=&r" (t)
130 : "r" (&v->counter) 130 : "r" (&v->counter)
131 : "cc", "memory"); 131 : "cc", "xer", "memory");
132 132
133 return t; 133 return t;
134} 134}
@@ -155,7 +155,7 @@ static __inline__ void atomic_dec(atomic_t *v)
155 bne- 1b" 155 bne- 1b"
156 : "=&r" (t), "+m" (v->counter) 156 : "=&r" (t), "+m" (v->counter)
157 : "r" (&v->counter) 157 : "r" (&v->counter)
158 : "cc"); 158 : "cc", "xer");
159} 159}
160 160
161static __inline__ int atomic_dec_return(atomic_t *v) 161static __inline__ int atomic_dec_return(atomic_t *v)
@@ -172,7 +172,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
172 ISYNC_ON_SMP 172 ISYNC_ON_SMP
173 : "=&r" (t) 173 : "=&r" (t)
174 : "r" (&v->counter) 174 : "r" (&v->counter)
175 : "cc", "memory"); 175 : "cc", "xer", "memory");
176 176
177 return t; 177 return t;
178} 178}
@@ -346,7 +346,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
346 bne- 1b" 346 bne- 1b"
347 : "=&r" (t), "+m" (v->counter) 347 : "=&r" (t), "+m" (v->counter)
348 : "r" (&v->counter) 348 : "r" (&v->counter)
349 : "cc"); 349 : "cc", "xer");
350} 350}
351 351
352static __inline__ long atomic64_inc_return(atomic64_t *v) 352static __inline__ long atomic64_inc_return(atomic64_t *v)
@@ -362,7 +362,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
362 ISYNC_ON_SMP 362 ISYNC_ON_SMP
363 : "=&r" (t) 363 : "=&r" (t)
364 : "r" (&v->counter) 364 : "r" (&v->counter)
365 : "cc", "memory"); 365 : "cc", "xer", "memory");
366 366
367 return t; 367 return t;
368} 368}
@@ -388,7 +388,7 @@ static __inline__ void atomic64_dec(atomic64_t *v)
388 bne- 1b" 388 bne- 1b"
389 : "=&r" (t), "+m" (v->counter) 389 : "=&r" (t), "+m" (v->counter)
390 : "r" (&v->counter) 390 : "r" (&v->counter)
391 : "cc"); 391 : "cc", "xer");
392} 392}
393 393
394static __inline__ long atomic64_dec_return(atomic64_t *v) 394static __inline__ long atomic64_dec_return(atomic64_t *v)
@@ -404,7 +404,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
404 ISYNC_ON_SMP 404 ISYNC_ON_SMP
405 : "=&r" (t) 405 : "=&r" (t)
406 : "r" (&v->counter) 406 : "r" (&v->counter)
407 : "cc", "memory"); 407 : "cc", "xer", "memory");
408 408
409 return t; 409 return t;
410} 410}
@@ -431,7 +431,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
431 "\n\ 431 "\n\
4322:" : "=&r" (t) 4322:" : "=&r" (t)
433 : "r" (&v->counter) 433 : "r" (&v->counter)
434 : "cc", "memory"); 434 : "cc", "xer", "memory");
435 435
436 return t; 436 return t;
437} 437}
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index e55d1f66b86f..64e1fdca233e 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -3,6 +3,7 @@
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <asm/asm-compat.h> 5#include <asm/asm-compat.h>
6
6/* 7/*
7 * Define an illegal instr to trap on the bug. 8 * Define an illegal instr to trap on the bug.
8 * We don't use 0 because that marks the end of a function 9 * We don't use 0 because that marks the end of a function
@@ -14,6 +15,7 @@
14#ifdef CONFIG_BUG 15#ifdef CONFIG_BUG
15 16
16#ifdef __ASSEMBLY__ 17#ifdef __ASSEMBLY__
18#include <asm/asm-offsets.h>
17#ifdef CONFIG_DEBUG_BUGVERBOSE 19#ifdef CONFIG_DEBUG_BUGVERBOSE
18.macro EMIT_BUG_ENTRY addr,file,line,flags 20.macro EMIT_BUG_ENTRY addr,file,line,flags
19 .section __bug_table,"a" 21 .section __bug_table,"a"
@@ -26,7 +28,7 @@
26 .previous 28 .previous
27.endm 29.endm
28#else 30#else
29 .macro EMIT_BUG_ENTRY addr,file,line,flags 31.macro EMIT_BUG_ENTRY addr,file,line,flags
30 .section __bug_table,"a" 32 .section __bug_table,"a"
315001: PPC_LONG \addr 335001: PPC_LONG \addr
32 .short \flags 34 .short \flags
@@ -113,6 +115,13 @@
113#define HAVE_ARCH_BUG_ON 115#define HAVE_ARCH_BUG_ON
114#define HAVE_ARCH_WARN_ON 116#define HAVE_ARCH_WARN_ON
115#endif /* __ASSEMBLY __ */ 117#endif /* __ASSEMBLY __ */
118#else
119#ifdef __ASSEMBLY__
120.macro EMIT_BUG_ENTRY addr,file,line,flags
121.endm
122#else /* !__ASSEMBLY__ */
123#define _EMIT_BUG_ENTRY
124#endif
116#endif /* CONFIG_BUG */ 125#endif /* CONFIG_BUG */
117 126
118#include <asm-generic/bug.h> 127#include <asm-generic/bug.h>
diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h
index b37752214a16..d5de325472e9 100644
--- a/arch/powerpc/include/asm/byteorder.h
+++ b/arch/powerpc/include/asm/byteorder.h
@@ -11,6 +11,8 @@
11#include <asm/types.h> 11#include <asm/types.h>
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13 13
14#define __BIG_ENDIAN
15
14#ifdef __GNUC__ 16#ifdef __GNUC__
15#ifdef __KERNEL__ 17#ifdef __KERNEL__
16 18
@@ -21,12 +23,19 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)
21 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); 23 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
22 return val; 24 return val;
23} 25}
26#define __arch_swab16p ld_le16
24 27
25static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) 28static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
26{ 29{
27 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); 30 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
28} 31}
29 32
33static inline void __arch_swab16s(__u16 *addr)
34{
35 st_le16(addr, *addr);
36}
37#define __arch_swab16s __arch_swab16s
38
30static __inline__ __u32 ld_le32(const volatile __u32 *addr) 39static __inline__ __u32 ld_le32(const volatile __u32 *addr)
31{ 40{
32 __u32 val; 41 __u32 val;
@@ -34,13 +43,20 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)
34 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); 43 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
35 return val; 44 return val;
36} 45}
46#define __arch_swab32p ld_le32
37 47
38static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) 48static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
39{ 49{
40 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); 50 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
41} 51}
42 52
43static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value) 53static inline void __arch_swab32s(__u32 *addr)
54{
55 st_le32(addr, *addr);
56}
57#define __arch_swab32s __arch_swab32s
58
59static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
44{ 60{
45 __u16 result; 61 __u16 result;
46 62
@@ -49,8 +65,9 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
49 : "r" (value), "0" (value >> 8)); 65 : "r" (value), "0" (value >> 8));
50 return result; 66 return result;
51} 67}
68#define __arch_swab16 __arch_swab16
52 69
53static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value) 70static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
54{ 71{
55 __u32 result; 72 __u32 result;
56 73
@@ -61,29 +78,16 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
61 : "r" (value), "0" (value >> 24)); 78 : "r" (value), "0" (value >> 24));
62 return result; 79 return result;
63} 80}
64 81#define __arch_swab32 __arch_swab32
65#define __arch__swab16(x) ___arch__swab16(x)
66#define __arch__swab32(x) ___arch__swab32(x)
67
68/* The same, but returns converted value from the location pointer by addr. */
69#define __arch__swab16p(addr) ld_le16(addr)
70#define __arch__swab32p(addr) ld_le32(addr)
71
72/* The same, but do the conversion in situ, ie. put the value back to addr. */
73#define __arch__swab16s(addr) st_le16(addr,*addr)
74#define __arch__swab32s(addr) st_le32(addr,*addr)
75 82
76#endif /* __KERNEL__ */ 83#endif /* __KERNEL__ */
77 84
78#ifndef __STRICT_ANSI__
79#define __BYTEORDER_HAS_U64__
80#ifndef __powerpc64__ 85#ifndef __powerpc64__
81#define __SWAB_64_THRU_32__ 86#define __SWAB_64_THRU_32__
82#endif /* __powerpc64__ */ 87#endif /* __powerpc64__ */
83#endif /* __STRICT_ANSI__ */
84 88
85#endif /* __GNUC__ */ 89#endif /* __GNUC__ */
86 90
87#include <linux/byteorder/big_endian.h> 91#include <linux/byteorder.h>
88 92
89#endif /* _ASM_POWERPC_BYTEORDER_H */ 93#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 1e94b07a020e..4911104791c3 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -82,6 +82,7 @@ struct cpu_spec {
82 char *cpu_name; 82 char *cpu_name;
83 unsigned long cpu_features; /* Kernel features */ 83 unsigned long cpu_features; /* Kernel features */
84 unsigned int cpu_user_features; /* Userland features */ 84 unsigned int cpu_user_features; /* Userland features */
85 unsigned int mmu_features; /* MMU features */
85 86
86 /* cache line sizes */ 87 /* cache line sizes */
87 unsigned int icache_bsize; 88 unsigned int icache_bsize;
@@ -144,17 +145,14 @@ extern const char *powerpc_base_platform;
144#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040) 145#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
145#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080) 146#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080)
146#define CPU_FTR_601 ASM_CONST(0x0000000000000100) 147#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
147#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
148#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400) 148#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
149#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800) 149#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
150#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000) 150#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
151#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000) 151#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
152#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000) 152#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
153#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000) 153#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
154#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000)
155#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) 154#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
156#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) 155#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
157#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
158#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) 156#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
159#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) 157#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
160#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) 158#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
@@ -163,6 +161,8 @@ extern const char *powerpc_base_platform;
163#define CPU_FTR_SPE ASM_CONST(0x0000000002000000) 161#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
164#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) 162#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
165#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) 163#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
164#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
165#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
166 166
167/* 167/*
168 * Add the 64-bit processor unique features in the top half of the word; 168 * Add the 64-bit processor unique features in the top half of the word;
@@ -177,7 +177,6 @@ extern const char *powerpc_base_platform;
177#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000) 177#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000)
178#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000) 178#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000)
179#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000) 179#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000)
180#define CPU_FTR_NOEXECUTE LONG_ASM_CONST(0x0000000800000000)
181#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) 180#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
182#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) 181#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
183#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000) 182#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
@@ -194,6 +193,7 @@ extern const char *powerpc_base_platform;
194#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) 193#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
195#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) 194#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
196#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) 195#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
196#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
197 197
198#ifndef __ASSEMBLY__ 198#ifndef __ASSEMBLY__
199 199
@@ -264,164 +264,159 @@ extern const char *powerpc_base_platform;
264 !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \ 264 !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
265 !defined(CONFIG_BOOKE)) 265 !defined(CONFIG_BOOKE))
266 266
267#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE | \ 267#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \
268 CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE) 268 CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE)
269#define CPU_FTRS_603 (CPU_FTR_COMMON | \ 269#define CPU_FTRS_603 (CPU_FTR_COMMON | \
270 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 270 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
271 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) 271 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
272#define CPU_FTRS_604 (CPU_FTR_COMMON | \ 272#define CPU_FTRS_604 (CPU_FTR_COMMON | \
273 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_PPC_LE) 273 CPU_FTR_USE_TB | CPU_FTR_PPC_LE)
274#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \ 274#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \
275 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 275 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
276 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) 276 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
277#define CPU_FTRS_740 (CPU_FTR_COMMON | \ 277#define CPU_FTRS_740 (CPU_FTR_COMMON | \
278 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 278 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
279 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ 279 CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
280 CPU_FTR_PPC_LE) 280 CPU_FTR_PPC_LE)
281#define CPU_FTRS_750 (CPU_FTR_COMMON | \ 281#define CPU_FTRS_750 (CPU_FTR_COMMON | \
282 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 282 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
283 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ 283 CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
284 CPU_FTR_PPC_LE) 284 CPU_FTR_PPC_LE)
285#define CPU_FTRS_750CL (CPU_FTRS_750 | CPU_FTR_HAS_HIGH_BATS) 285#define CPU_FTRS_750CL (CPU_FTRS_750)
286#define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM) 286#define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM)
287#define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM) 287#define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM)
288#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | \ 288#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX)
289 CPU_FTR_HAS_HIGH_BATS)
290#define CPU_FTRS_750GX (CPU_FTRS_750FX) 289#define CPU_FTRS_750GX (CPU_FTRS_750FX)
291#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \ 290#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \
292 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 291 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
293 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ 292 CPU_FTR_ALTIVEC_COMP | \
294 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) 293 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
295#define CPU_FTRS_7400 (CPU_FTR_COMMON | \ 294#define CPU_FTRS_7400 (CPU_FTR_COMMON | \
296 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 295 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
297 CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ 296 CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \
298 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) 297 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
299#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \ 298#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \
300 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 299 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
301 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 300 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
302 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 301 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
303#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \ 302#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \
304 CPU_FTR_USE_TB | \ 303 CPU_FTR_USE_TB | \
305 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 304 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
306 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 305 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
307 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ 306 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
308 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 307 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
309#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \ 308#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \
310 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ 309 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
311 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 310 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
312 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 311 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
313 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) 312 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
314#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \ 313#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \
315 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ 314 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
316 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \ 315 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
317 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \ 316 CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
318 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
319#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \ 317#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \
320 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ 318 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
321 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 319 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
322 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 320 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
323 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ 321 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
324 CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) 322 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
325#define CPU_FTRS_7455 (CPU_FTR_COMMON | \ 323#define CPU_FTRS_7455 (CPU_FTR_COMMON | \
326 CPU_FTR_USE_TB | \ 324 CPU_FTR_USE_TB | \
327 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 325 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
328 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 326 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
329 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
330 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 327 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
331#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \ 328#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \
332 CPU_FTR_USE_TB | \ 329 CPU_FTR_USE_TB | \
333 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 330 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
334 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 331 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
335 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
336 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \ 332 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \
337 CPU_FTR_NEED_PAIRED_STWCX) 333 CPU_FTR_NEED_PAIRED_STWCX)
338#define CPU_FTRS_7447 (CPU_FTR_COMMON | \ 334#define CPU_FTRS_7447 (CPU_FTR_COMMON | \
339 CPU_FTR_USE_TB | \ 335 CPU_FTR_USE_TB | \
340 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 336 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
341 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 337 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
342 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
343 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 338 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
344#define CPU_FTRS_7447A (CPU_FTR_COMMON | \ 339#define CPU_FTRS_7447A (CPU_FTR_COMMON | \
345 CPU_FTR_USE_TB | \ 340 CPU_FTR_USE_TB | \
346 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 341 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
347 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 342 CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
348 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
349 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 343 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
350#define CPU_FTRS_7448 (CPU_FTR_COMMON | \ 344#define CPU_FTRS_7448 (CPU_FTR_COMMON | \
351 CPU_FTR_USE_TB | \ 345 CPU_FTR_USE_TB | \
352 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 346 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
353 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 347 CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
354 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
355 CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 348 CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
356#define CPU_FTRS_82XX (CPU_FTR_COMMON | \ 349#define CPU_FTRS_82XX (CPU_FTR_COMMON | \
357 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB) 350 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB)
358#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ 351#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
359 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS) 352 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP)
360#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \ 353#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
361 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \ 354 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
362 CPU_FTR_COMMON) 355 CPU_FTR_COMMON)
363#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \ 356#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \
364 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \ 357 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
365 CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE) 358 CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
366#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | \ 359#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB)
367 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
368#define CPU_FTRS_8XX (CPU_FTR_USE_TB) 360#define CPU_FTRS_8XX (CPU_FTR_USE_TB)
369#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN) 361#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
370#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN) 362#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
363#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
364 CPU_FTR_INDEXED_DCR)
371#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ 365#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
372 CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ 366 CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
373 CPU_FTR_UNIFIED_ID_CACHE) 367 CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
374#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 368#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
375 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN) 369 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
370 CPU_FTR_NOEXECUTE)
376#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 371#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
377 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | \ 372 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
378 CPU_FTR_NODSISRALIGN) 373 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
379#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 374#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
380 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \ 375 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
381 CPU_FTR_L2CSR | CPU_FTR_LWSYNC) 376 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE)
382#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 377#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
383 378
384/* 64-bit CPUs */ 379/* 64-bit CPUs */
385#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 380#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
386 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE) 381 CPU_FTR_IABR | CPU_FTR_PPC_LE)
387#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 382#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
388 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ 383 CPU_FTR_IABR | \
389 CPU_FTR_MMCRA | CPU_FTR_CTRL) 384 CPU_FTR_MMCRA | CPU_FTR_CTRL)
390#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 385#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
391 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 386 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
392 CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ) 387 CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ)
393#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 388#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
394 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 389 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
395 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ 390 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
396 CPU_FTR_CP_USE_DCBTZ) 391 CPU_FTR_CP_USE_DCBTZ)
397#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 392#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
398 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 393 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
399 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 394 CPU_FTR_MMCRA | CPU_FTR_SMT | \
400 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 395 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
401 CPU_FTR_PURR) 396 CPU_FTR_PURR)
402#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 397#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
403 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 398 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
404 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 399 CPU_FTR_MMCRA | CPU_FTR_SMT | \
405 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 400 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
406 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 401 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
407 CPU_FTR_DSCR) 402 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD)
408#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 403#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
409 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 404 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
410 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 405 CPU_FTR_MMCRA | CPU_FTR_SMT | \
411 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 406 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
412 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 407 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
413 CPU_FTR_DSCR | CPU_FTR_SAO) 408 CPU_FTR_DSCR | CPU_FTR_SAO)
414#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 409#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
415 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 410 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
416 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 411 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
417 CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \ 412 CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \
418 CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ) 413 CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
414 CPU_FTR_UNALIGNED_LD_STD)
419#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 415#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
420 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ 416 CPU_FTR_PPCAS_ARCH_V2 | \
421 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ 417 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
422 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) 418 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
423#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | \ 419#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
424 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
425 420
426#ifdef __powerpc64__ 421#ifdef __powerpc64__
427#define CPU_FTRS_POSSIBLE \ 422#define CPU_FTRS_POSSIBLE \
@@ -452,7 +447,7 @@ enum {
452 CPU_FTRS_40X | 447 CPU_FTRS_40X |
453#endif 448#endif
454#ifdef CONFIG_44x 449#ifdef CONFIG_44x
455 CPU_FTRS_44X | 450 CPU_FTRS_44X | CPU_FTRS_440x6 |
456#endif 451#endif
457#ifdef CONFIG_E200 452#ifdef CONFIG_E200
458 CPU_FTRS_E200 | 453 CPU_FTRS_E200 |
@@ -492,7 +487,7 @@ enum {
492 CPU_FTRS_40X & 487 CPU_FTRS_40X &
493#endif 488#endif
494#ifdef CONFIG_44x 489#ifdef CONFIG_44x
495 CPU_FTRS_44X & 490 CPU_FTRS_44X & CPU_FTRS_440x6 &
496#endif 491#endif
497#ifdef CONFIG_E200 492#ifdef CONFIG_E200
498 CPU_FTRS_E200 & 493 CPU_FTRS_E200 &
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 72d2b72c7390..7d2e6235726d 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -23,6 +23,7 @@
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24 24
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <asm/cputable.h>
26 27
27typedef struct { 28typedef struct {
28 unsigned int base; 29 unsigned int base;
@@ -39,23 +40,45 @@ static inline bool dcr_map_ok_native(dcr_host_native_t host)
39#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base) 40#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base)
40#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value) 41#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value)
41 42
42/* Device Control Registers */ 43/* Table based DCR accessors */
43void __mtdcr(int reg, unsigned int val); 44extern void __mtdcr(unsigned int reg, unsigned int val);
44unsigned int __mfdcr(int reg); 45extern unsigned int __mfdcr(unsigned int reg);
46
47/* mfdcrx/mtdcrx instruction based accessors. We hand code
48 * the opcodes in order not to depend on newer binutils
49 */
50static inline unsigned int mfdcrx(unsigned int reg)
51{
52 unsigned int ret;
53 asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)"
54 : "=r" (ret) : "r" (reg));
55 return ret;
56}
57
58static inline void mtdcrx(unsigned int reg, unsigned int val)
59{
60 asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)"
61 : : "r" (val), "r" (reg));
62}
63
45#define mfdcr(rn) \ 64#define mfdcr(rn) \
46 ({unsigned int rval; \ 65 ({unsigned int rval; \
47 if (__builtin_constant_p(rn)) \ 66 if (__builtin_constant_p(rn) && rn < 1024) \
48 asm volatile("mfdcr %0," __stringify(rn) \ 67 asm volatile("mfdcr %0," __stringify(rn) \
49 : "=r" (rval)); \ 68 : "=r" (rval)); \
69 else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
70 rval = mfdcrx(rn); \
50 else \ 71 else \
51 rval = __mfdcr(rn); \ 72 rval = __mfdcr(rn); \
52 rval;}) 73 rval;})
53 74
54#define mtdcr(rn, v) \ 75#define mtdcr(rn, v) \
55do { \ 76do { \
56 if (__builtin_constant_p(rn)) \ 77 if (__builtin_constant_p(rn) && rn < 1024) \
57 asm volatile("mtdcr " __stringify(rn) ",%0" \ 78 asm volatile("mtdcr " __stringify(rn) ",%0" \
58 : : "r" (v)); \ 79 : : "r" (v)); \
80 else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
81 mtdcrx(rn, v); \
59 else \ 82 else \
60 __mtdcr(rn, v); \ 83 __mtdcr(rn, v); \
61} while (0) 84} while (0)
@@ -69,8 +92,13 @@ static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
69 unsigned int val; 92 unsigned int val;
70 93
71 spin_lock_irqsave(&dcr_ind_lock, flags); 94 spin_lock_irqsave(&dcr_ind_lock, flags);
72 __mtdcr(base_addr, reg); 95 if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
73 val = __mfdcr(base_data); 96 mtdcrx(base_addr, reg);
97 val = mfdcrx(base_data);
98 } else {
99 __mtdcr(base_addr, reg);
100 val = __mfdcr(base_data);
101 }
74 spin_unlock_irqrestore(&dcr_ind_lock, flags); 102 spin_unlock_irqrestore(&dcr_ind_lock, flags);
75 return val; 103 return val;
76} 104}
@@ -81,8 +109,13 @@ static inline void __mtdcri(int base_addr, int base_data, int reg,
81 unsigned long flags; 109 unsigned long flags;
82 110
83 spin_lock_irqsave(&dcr_ind_lock, flags); 111 spin_lock_irqsave(&dcr_ind_lock, flags);
84 __mtdcr(base_addr, reg); 112 if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
85 __mtdcr(base_data, val); 113 mtdcrx(base_addr, reg);
114 mtdcrx(base_data, val);
115 } else {
116 __mtdcr(base_addr, reg);
117 __mtdcr(base_data, val);
118 }
86 spin_unlock_irqrestore(&dcr_ind_lock, flags); 119 spin_unlock_irqrestore(&dcr_ind_lock, flags);
87} 120}
88 121
@@ -93,9 +126,15 @@ static inline void __dcri_clrset(int base_addr, int base_data, int reg,
93 unsigned int val; 126 unsigned int val;
94 127
95 spin_lock_irqsave(&dcr_ind_lock, flags); 128 spin_lock_irqsave(&dcr_ind_lock, flags);
96 __mtdcr(base_addr, reg); 129 if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
97 val = (__mfdcr(base_data) & ~clr) | set; 130 mtdcrx(base_addr, reg);
98 __mtdcr(base_data, val); 131 val = (mfdcrx(base_data) & ~clr) | set;
132 mtdcrx(base_data, val);
133 } else {
134 __mtdcr(base_addr, reg);
135 val = (__mfdcr(base_data) & ~clr) | set;
136 __mtdcr(base_data, val);
137 }
99 spin_unlock_irqrestore(&dcr_ind_lock, flags); 138 spin_unlock_irqrestore(&dcr_ind_lock, flags);
100} 139}
101 140
diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h
index d13fb68bb5c0..9d6851cfb841 100644
--- a/arch/powerpc/include/asm/dcr.h
+++ b/arch/powerpc/include/asm/dcr.h
@@ -68,9 +68,9 @@ typedef dcr_host_mmio_t dcr_host_t;
68 * additional helpers to read the DCR * base from the device-tree 68 * additional helpers to read the DCR * base from the device-tree
69 */ 69 */
70struct device_node; 70struct device_node;
71extern unsigned int dcr_resource_start(struct device_node *np, 71extern unsigned int dcr_resource_start(const struct device_node *np,
72 unsigned int index); 72 unsigned int index);
73extern unsigned int dcr_resource_len(struct device_node *np, 73extern unsigned int dcr_resource_len(const struct device_node *np,
74 unsigned int index); 74 unsigned int index);
75#endif /* CONFIG_PPC_DCR */ 75#endif /* CONFIG_PPC_DCR */
76#endif /* __ASSEMBLY__ */ 76#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index dfd504caccc1..7d2277cef09a 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -18,4 +18,16 @@ struct dev_archdata {
18 void *dma_data; 18 void *dma_data;
19}; 19};
20 20
21static inline void dev_archdata_set_node(struct dev_archdata *ad,
22 struct device_node *np)
23{
24 ad->of_node = np;
25}
26
27static inline struct device_node *
28dev_archdata_get_node(const struct dev_archdata *ad)
29{
30 return ad->of_node;
31}
32
21#endif /* _ASM_POWERPC_DEVICE_H */ 33#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index fddb229bd74f..86cef7ddc8d5 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -60,12 +60,6 @@ struct dma_mapping_ops {
60 dma_addr_t *dma_handle, gfp_t flag); 60 dma_addr_t *dma_handle, gfp_t flag);
61 void (*free_coherent)(struct device *dev, size_t size, 61 void (*free_coherent)(struct device *dev, size_t size,
62 void *vaddr, dma_addr_t dma_handle); 62 void *vaddr, dma_addr_t dma_handle);
63 dma_addr_t (*map_single)(struct device *dev, void *ptr,
64 size_t size, enum dma_data_direction direction,
65 struct dma_attrs *attrs);
66 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
67 size_t size, enum dma_data_direction direction,
68 struct dma_attrs *attrs);
69 int (*map_sg)(struct device *dev, struct scatterlist *sg, 63 int (*map_sg)(struct device *dev, struct scatterlist *sg,
70 int nents, enum dma_data_direction direction, 64 int nents, enum dma_data_direction direction,
71 struct dma_attrs *attrs); 65 struct dma_attrs *attrs);
@@ -82,6 +76,22 @@ struct dma_mapping_ops {
82 dma_addr_t dma_address, size_t size, 76 dma_addr_t dma_address, size_t size,
83 enum dma_data_direction direction, 77 enum dma_data_direction direction,
84 struct dma_attrs *attrs); 78 struct dma_attrs *attrs);
79#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
80 void (*sync_single_range_for_cpu)(struct device *hwdev,
81 dma_addr_t dma_handle, unsigned long offset,
82 size_t size,
83 enum dma_data_direction direction);
84 void (*sync_single_range_for_device)(struct device *hwdev,
85 dma_addr_t dma_handle, unsigned long offset,
86 size_t size,
87 enum dma_data_direction direction);
88 void (*sync_sg_for_cpu)(struct device *hwdev,
89 struct scatterlist *sg, int nelems,
90 enum dma_data_direction direction);
91 void (*sync_sg_for_device)(struct device *hwdev,
92 struct scatterlist *sg, int nelems,
93 enum dma_data_direction direction);
94#endif
85}; 95};
86 96
87/* 97/*
@@ -149,10 +159,9 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
149} 159}
150 160
151/* 161/*
152 * TODO: map_/unmap_single will ideally go away, to be completely 162 * map_/unmap_single actually call through to map/unmap_page now that all the
153 * replaced by map/unmap_page. Until then, we allow dma_ops to have 163 * dma_mapping_ops have been converted over. We just have to get the page and
154 * one or the other, or both by checking to see if the specific 164 * offset to pass through to map_page
155 * function requested exists; and if not, falling back on the other set.
156 */ 165 */
157static inline dma_addr_t dma_map_single_attrs(struct device *dev, 166static inline dma_addr_t dma_map_single_attrs(struct device *dev,
158 void *cpu_addr, 167 void *cpu_addr,
@@ -164,10 +173,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
164 173
165 BUG_ON(!dma_ops); 174 BUG_ON(!dma_ops);
166 175
167 if (dma_ops->map_single)
168 return dma_ops->map_single(dev, cpu_addr, size, direction,
169 attrs);
170
171 return dma_ops->map_page(dev, virt_to_page(cpu_addr), 176 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
172 (unsigned long)cpu_addr % PAGE_SIZE, size, 177 (unsigned long)cpu_addr % PAGE_SIZE, size,
173 direction, attrs); 178 direction, attrs);
@@ -183,11 +188,6 @@ static inline void dma_unmap_single_attrs(struct device *dev,
183 188
184 BUG_ON(!dma_ops); 189 BUG_ON(!dma_ops);
185 190
186 if (dma_ops->unmap_single) {
187 dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
188 return;
189 }
190
191 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); 191 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
192} 192}
193 193
@@ -201,12 +201,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
201 201
202 BUG_ON(!dma_ops); 202 BUG_ON(!dma_ops);
203 203
204 if (dma_ops->map_page) 204 return dma_ops->map_page(dev, page, offset, size, direction, attrs);
205 return dma_ops->map_page(dev, page, offset, size, direction,
206 attrs);
207
208 return dma_ops->map_single(dev, page_address(page) + offset, size,
209 direction, attrs);
210} 205}
211 206
212static inline void dma_unmap_page_attrs(struct device *dev, 207static inline void dma_unmap_page_attrs(struct device *dev,
@@ -219,12 +214,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
219 214
220 BUG_ON(!dma_ops); 215 BUG_ON(!dma_ops);
221 216
222 if (dma_ops->unmap_page) { 217 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
223 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
224 return;
225 }
226
227 dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
228} 218}
229 219
230static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 220static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
@@ -308,47 +298,107 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
308 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); 298 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
309} 299}
310 300
301#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
311static inline void dma_sync_single_for_cpu(struct device *dev, 302static inline void dma_sync_single_for_cpu(struct device *dev,
312 dma_addr_t dma_handle, size_t size, 303 dma_addr_t dma_handle, size_t size,
313 enum dma_data_direction direction) 304 enum dma_data_direction direction)
314{ 305{
315 BUG_ON(direction == DMA_NONE); 306 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
316 __dma_sync(bus_to_virt(dma_handle), size, direction); 307
308 BUG_ON(!dma_ops);
309 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
310 size, direction);
317} 311}
318 312
319static inline void dma_sync_single_for_device(struct device *dev, 313static inline void dma_sync_single_for_device(struct device *dev,
320 dma_addr_t dma_handle, size_t size, 314 dma_addr_t dma_handle, size_t size,
321 enum dma_data_direction direction) 315 enum dma_data_direction direction)
322{ 316{
323 BUG_ON(direction == DMA_NONE); 317 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
324 __dma_sync(bus_to_virt(dma_handle), size, direction); 318
319 BUG_ON(!dma_ops);
320 dma_ops->sync_single_range_for_device(dev, dma_handle,
321 0, size, direction);
325} 322}
326 323
327static inline void dma_sync_sg_for_cpu(struct device *dev, 324static inline void dma_sync_sg_for_cpu(struct device *dev,
328 struct scatterlist *sgl, int nents, 325 struct scatterlist *sgl, int nents,
329 enum dma_data_direction direction) 326 enum dma_data_direction direction)
330{ 327{
331 struct scatterlist *sg; 328 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
332 int i;
333 329
334 BUG_ON(direction == DMA_NONE); 330 BUG_ON(!dma_ops);
331 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
332}
333
334static inline void dma_sync_sg_for_device(struct device *dev,
335 struct scatterlist *sgl, int nents,
336 enum dma_data_direction direction)
337{
338 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
339
340 BUG_ON(!dma_ops);
341 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
342}
343
344static inline void dma_sync_single_range_for_cpu(struct device *dev,
345 dma_addr_t dma_handle, unsigned long offset, size_t size,
346 enum dma_data_direction direction)
347{
348 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
335 349
336 for_each_sg(sgl, sg, nents, i) 350 BUG_ON(!dma_ops);
337 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 351 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
352 offset, size, direction);
353}
354
355static inline void dma_sync_single_range_for_device(struct device *dev,
356 dma_addr_t dma_handle, unsigned long offset, size_t size,
357 enum dma_data_direction direction)
358{
359 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
360
361 BUG_ON(!dma_ops);
362 dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
363 size, direction);
364}
365#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
366static inline void dma_sync_single_for_cpu(struct device *dev,
367 dma_addr_t dma_handle, size_t size,
368 enum dma_data_direction direction)
369{
370}
371
372static inline void dma_sync_single_for_device(struct device *dev,
373 dma_addr_t dma_handle, size_t size,
374 enum dma_data_direction direction)
375{
376}
377
378static inline void dma_sync_sg_for_cpu(struct device *dev,
379 struct scatterlist *sgl, int nents,
380 enum dma_data_direction direction)
381{
338} 382}
339 383
340static inline void dma_sync_sg_for_device(struct device *dev, 384static inline void dma_sync_sg_for_device(struct device *dev,
341 struct scatterlist *sgl, int nents, 385 struct scatterlist *sgl, int nents,
342 enum dma_data_direction direction) 386 enum dma_data_direction direction)
343{ 387{
344 struct scatterlist *sg; 388}
345 int i;
346 389
347 BUG_ON(direction == DMA_NONE); 390static inline void dma_sync_single_range_for_cpu(struct device *dev,
391 dma_addr_t dma_handle, unsigned long offset, size_t size,
392 enum dma_data_direction direction)
393{
394}
348 395
349 for_each_sg(sgl, sg, nents, i) 396static inline void dma_sync_single_range_for_device(struct device *dev,
350 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 397 dma_addr_t dma_handle, unsigned long offset, size_t size,
398 enum dma_data_direction direction)
399{
351} 400}
401#endif
352 402
353static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 403static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
354{ 404{
@@ -382,22 +432,6 @@ static inline int dma_get_cache_alignment(void)
382#endif 432#endif
383} 433}
384 434
385static inline void dma_sync_single_range_for_cpu(struct device *dev,
386 dma_addr_t dma_handle, unsigned long offset, size_t size,
387 enum dma_data_direction direction)
388{
389 /* just sync everything for now */
390 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
391}
392
393static inline void dma_sync_single_range_for_device(struct device *dev,
394 dma_addr_t dma_handle, unsigned long offset, size_t size,
395 enum dma_data_direction direction)
396{
397 /* just sync everything for now */
398 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
399}
400
401static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 435static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
402 enum dma_data_direction direction) 436 enum dma_data_direction direction)
403{ 437{
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index b886bec67016..66ea9b8b95c5 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -17,8 +17,8 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#ifndef _PPC64_EEH_H 20#ifndef _POWERPC_EEH_H
21#define _PPC64_EEH_H 21#define _POWERPC_EEH_H
22#ifdef __KERNEL__ 22#ifdef __KERNEL__
23 23
24#include <linux/init.h> 24#include <linux/init.h>
@@ -110,6 +110,7 @@ static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
110#define EEH_IO_ERROR_VALUE(size) (-1UL) 110#define EEH_IO_ERROR_VALUE(size) (-1UL)
111#endif /* CONFIG_EEH */ 111#endif /* CONFIG_EEH */
112 112
113#ifdef CONFIG_PPC64
113/* 114/*
114 * MMIO read/write operations with EEH support. 115 * MMIO read/write operations with EEH support.
115 */ 116 */
@@ -207,5 +208,6 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
207 eeh_check_failure(addr, *(u32*)buf); 208 eeh_check_failure(addr, *(u32*)buf);
208} 209}
209 210
211#endif /* CONFIG_PPC64 */
210#endif /* __KERNEL__ */ 212#endif /* __KERNEL__ */
211#endif /* _PPC64_EEH_H */ 213#endif /* _POWERPC_EEH_H */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index d812929390e4..cd46f023ec6d 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -267,7 +267,7 @@ extern int ucache_bsize;
267#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 267#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
268struct linux_binprm; 268struct linux_binprm;
269extern int arch_setup_additional_pages(struct linux_binprm *bprm, 269extern int arch_setup_additional_pages(struct linux_binprm *bprm,
270 int executable_stack); 270 int uses_interp);
271#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); 271#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
272 272
273#endif /* __KERNEL__ */ 273#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index a1029967620b..e4094a5cb05b 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -81,6 +81,36 @@ label##5: \
81#define ALT_FTR_SECTION_END_IFCLR(msk) \ 81#define ALT_FTR_SECTION_END_IFCLR(msk) \
82 ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97) 82 ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
83 83
84/* MMU feature dependent sections */
85#define BEGIN_MMU_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
86#define BEGIN_MMU_FTR_SECTION START_FTR_SECTION(97)
87
88#define END_MMU_FTR_SECTION_NESTED(msk, val, label) \
89 FTR_SECTION_ELSE_NESTED(label) \
90 MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
91
92#define END_MMU_FTR_SECTION(msk, val) \
93 END_MMU_FTR_SECTION_NESTED(msk, val, 97)
94
95#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk))
96#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0)
97
98/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
99#define MMU_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label)
100#define MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE_NESTED(97)
101#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label) \
102 MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
103#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label) \
104 ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label)
105#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
106 ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label)
107#define ALT_MMU_FTR_SECTION_END(msk, val) \
108 ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97)
109#define ALT_MMU_FTR_SECTION_END_IFSET(msk) \
110 ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97)
111#define ALT_MMU_FTR_SECTION_END_IFCLR(msk) \
112 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
113
84/* Firmware feature dependent sections */ 114/* Firmware feature dependent sections */
85#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) 115#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
86#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97) 116#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97)
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 91c589520c0a..04e4a620952e 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -38,9 +38,24 @@ extern pte_t *pkmap_page_table;
38 * easily, subsequent pte tables have to be allocated in one physical 38 * easily, subsequent pte tables have to be allocated in one physical
39 * chunk of RAM. 39 * chunk of RAM.
40 */ 40 */
41#define LAST_PKMAP (1 << PTE_SHIFT) 41/*
42#define LAST_PKMAP_MASK (LAST_PKMAP-1) 42 * We use one full pte table with 4K pages. And with 16K/64K pages pte
43 * table covers enough memory (32MB and 512MB resp.) that both FIXMAP
44 * and PKMAP can be placed in single pte table. We use 1024 pages for
45 * PKMAP in case of 16K/64K pages.
46 */
47#ifdef CONFIG_PPC_4K_PAGES
48#define PKMAP_ORDER PTE_SHIFT
49#else
50#define PKMAP_ORDER 10
51#endif
52#define LAST_PKMAP (1 << PKMAP_ORDER)
53#ifndef CONFIG_PPC_4K_PAGES
54#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
55#else
43#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) 56#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
57#endif
58#define LAST_PKMAP_MASK (LAST_PKMAP-1)
44#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) 59#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
45#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 60#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
46 61
@@ -85,7 +100,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
85 BUG_ON(!pte_none(*(kmap_pte-idx))); 100 BUG_ON(!pte_none(*(kmap_pte-idx)));
86#endif 101#endif
87 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); 102 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
88 flush_tlb_page(NULL, vaddr); 103 local_flush_tlb_page(NULL, vaddr);
89 104
90 return (void*) vaddr; 105 return (void*) vaddr;
91} 106}
@@ -113,7 +128,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
113 * this pte without first remap it 128 * this pte without first remap it
114 */ 129 */
115 pte_clear(&init_mm, vaddr, kmap_pte-idx); 130 pte_clear(&init_mm, vaddr, kmap_pte-idx);
116 flush_tlb_page(NULL, vaddr); 131 local_flush_tlb_page(NULL, vaddr);
117#endif 132#endif
118 pagefault_enable(); 133 pagefault_enable();
119} 134}
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 08266d2728b3..494cd8b0a278 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -713,13 +713,6 @@ static inline void * phys_to_virt(unsigned long address)
713 */ 713 */
714#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) 714#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
715 715
716/* We do NOT want virtual merging, it would put too much pressure on
717 * our iommu allocator. Instead, we want drivers to be smart enough
718 * to coalesce sglists that happen to have been mapped in a contiguous
719 * way by the iommu
720 */
721#define BIO_VMERGE_BOUNDARY 0
722
723/* 716/*
724 * 32 bits still uses virt_to_bus() for it's implementation of DMA 717 * 32 bits still uses virt_to_bus() for it's implementation of DMA
725 * mappings se we have to keep it defined here. We also have some old 718 * mappings se we have to keep it defined here. We also have some old
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index b07ebb9784d3..5ebfe5d3c61f 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -1,6 +1,8 @@
1#ifndef _PPC64_KDUMP_H 1#ifndef _PPC64_KDUMP_H
2#define _PPC64_KDUMP_H 2#define _PPC64_KDUMP_H
3 3
4#include <asm/page.h>
5
4/* Kdump kernel runs at 32 MB, change at your peril. */ 6/* Kdump kernel runs at 32 MB, change at your peril. */
5#define KDUMP_KERNELBASE 0x2000000 7#define KDUMP_KERNELBASE 0x2000000
6 8
@@ -11,8 +13,19 @@
11 13
12#ifdef CONFIG_CRASH_DUMP 14#ifdef CONFIG_CRASH_DUMP
13 15
16/*
17 * On PPC64 translation is disabled during trampoline setup, so we use
18 * physical addresses. Though on PPC32 translation is already enabled,
19 * so we can't do the same. Luckily create_trampoline() creates relative
20 * branches, so we can just add the PAGE_OFFSET and don't worry about it.
21 */
22#ifdef __powerpc64__
14#define KDUMP_TRAMPOLINE_START 0x0100 23#define KDUMP_TRAMPOLINE_START 0x0100
15#define KDUMP_TRAMPOLINE_END 0x3000 24#define KDUMP_TRAMPOLINE_END 0x3000
25#else
26#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET)
27#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
28#endif /* __powerpc64__ */
16 29
17#define KDUMP_MIN_TCE_ENTRIES 2048 30#define KDUMP_MIN_TCE_ENTRIES 2048
18 31
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 3736d9b33289..6dbffc981702 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -33,12 +33,12 @@
33 33
34#ifndef __ASSEMBLY__ 34#ifndef __ASSEMBLY__
35#include <linux/cpumask.h> 35#include <linux/cpumask.h>
36#include <asm/reg.h>
36 37
37typedef void (*crash_shutdown_t)(void); 38typedef void (*crash_shutdown_t)(void);
38 39
39#ifdef CONFIG_KEXEC 40#ifdef CONFIG_KEXEC
40 41
41#ifdef __powerpc64__
42/* 42/*
43 * This function is responsible for capturing register states if coming 43 * This function is responsible for capturing register states if coming
44 * via panic or invoking dump using sysrq-trigger. 44 * via panic or invoking dump using sysrq-trigger.
@@ -48,6 +48,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
48{ 48{
49 if (oldregs) 49 if (oldregs)
50 memcpy(newregs, oldregs, sizeof(*newregs)); 50 memcpy(newregs, oldregs, sizeof(*newregs));
51#ifdef __powerpc64__
51 else { 52 else {
52 /* FIXME Merge this with xmon_save_regs ?? */ 53 /* FIXME Merge this with xmon_save_regs ?? */
53 unsigned long tmp1, tmp2; 54 unsigned long tmp1, tmp2;
@@ -100,15 +101,11 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
100 : "b" (newregs) 101 : "b" (newregs)
101 : "memory"); 102 : "memory");
102 } 103 }
103}
104#else 104#else
105/* 105 else
106 * Provide a dummy definition to avoid build failures. Will remain 106 ppc_save_regs(newregs);
107 * empty till crash dump support is enabled. 107#endif /* __powerpc64__ */
108 */ 108}
109static inline void crash_setup_regs(struct pt_regs *newregs,
110 struct pt_regs *oldregs) { }
111#endif /* !__powerpc64 __ */
112 109
113extern void kexec_smp_wait(void); /* get and clear naca physid, wait for 110extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
114 master to copy new code to 0 */ 111 master to copy new code to 0 */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index 612d83276653..84b457a3c1bc 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -67,7 +67,7 @@ static __inline__ long local_inc_return(local_t *l)
67 bne- 1b" 67 bne- 1b"
68 : "=&r" (t) 68 : "=&r" (t)
69 : "r" (&(l->a.counter)) 69 : "r" (&(l->a.counter))
70 : "cc", "memory"); 70 : "cc", "xer", "memory");
71 71
72 return t; 72 return t;
73} 73}
@@ -94,7 +94,7 @@ static __inline__ long local_dec_return(local_t *l)
94 bne- 1b" 94 bne- 1b"
95 : "=&r" (t) 95 : "=&r" (t)
96 : "r" (&(l->a.counter)) 96 : "r" (&(l->a.counter))
97 : "cc", "memory"); 97 : "cc", "xer", "memory");
98 98
99 return t; 99 return t;
100} 100}
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 2fe268b10333..25aaa97facd8 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -133,7 +133,8 @@ struct lppaca {
133//============================================================================= 133//=============================================================================
134// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data 134// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
135//============================================================================= 135//=============================================================================
136 u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF 136 u32 page_ins; // CMO Hint - # page ins by OS x00-x04
137 u8 pmc_save_area[252]; // PMC interrupt Area x04-xFF
137} __attribute__((__aligned__(0x400))); 138} __attribute__((__aligned__(0x400)));
138 139
139extern struct lppaca lppaca[]; 140extern struct lppaca lppaca[];
diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h
index 3d108676584c..776f415a36aa 100644
--- a/arch/powerpc/include/asm/mmu-40x.h
+++ b/arch/powerpc/include/asm/mmu-40x.h
@@ -54,8 +54,9 @@
54#ifndef __ASSEMBLY__ 54#ifndef __ASSEMBLY__
55 55
56typedef struct { 56typedef struct {
57 unsigned long id; 57 unsigned int id;
58 unsigned long vdso_base; 58 unsigned int active;
59 unsigned long vdso_base;
59} mm_context_t; 60} mm_context_t;
60 61
61#endif /* !__ASSEMBLY__ */ 62#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index a825524c981a..8a97cfb08b7e 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -4,6 +4,8 @@
4 * PPC440 support 4 * PPC440 support
5 */ 5 */
6 6
7#include <asm/page.h>
8
7#define PPC44x_MMUCR_TID 0x000000ff 9#define PPC44x_MMUCR_TID 0x000000ff
8#define PPC44x_MMUCR_STS 0x00010000 10#define PPC44x_MMUCR_STS 0x00010000
9 11
@@ -56,8 +58,9 @@
56extern unsigned int tlb_44x_hwater; 58extern unsigned int tlb_44x_hwater;
57 59
58typedef struct { 60typedef struct {
59 unsigned long id; 61 unsigned int id;
60 unsigned long vdso_base; 62 unsigned int active;
63 unsigned long vdso_base;
61} mm_context_t; 64} mm_context_t;
62 65
63#endif /* !__ASSEMBLY__ */ 66#endif /* !__ASSEMBLY__ */
@@ -73,4 +76,19 @@ typedef struct {
73/* Size of the TLBs used for pinning in lowmem */ 76/* Size of the TLBs used for pinning in lowmem */
74#define PPC_PIN_SIZE (1 << 28) /* 256M */ 77#define PPC_PIN_SIZE (1 << 28) /* 256M */
75 78
79#if (PAGE_SHIFT == 12)
80#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
81#elif (PAGE_SHIFT == 14)
82#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
83#elif (PAGE_SHIFT == 16)
84#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
85#else
86#error "Unsupported PAGE_SIZE"
87#endif
88
89#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
90#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
91#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
92#define PPC44x_PTE_ADD_MASK_BIT (32 - PTE_T_LOG2 - PTE_SHIFT)
93
76#endif /* _ASM_POWERPC_MMU_44X_H_ */ 94#endif /* _ASM_POWERPC_MMU_44X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 9db877eb88db..07865a357848 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -137,7 +137,8 @@
137 137
138#ifndef __ASSEMBLY__ 138#ifndef __ASSEMBLY__
139typedef struct { 139typedef struct {
140 unsigned long id; 140 unsigned int id;
141 unsigned int active;
141 unsigned long vdso_base; 142 unsigned long vdso_base;
142} mm_context_t; 143} mm_context_t;
143#endif /* !__ASSEMBLY__ */ 144#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-fsl-booke.h
index 925d93cf64d8..3f941c0f7e8e 100644
--- a/arch/powerpc/include/asm/mmu-fsl-booke.h
+++ b/arch/powerpc/include/asm/mmu-fsl-booke.h
@@ -40,6 +40,8 @@
40#define MAS2_M 0x00000004 40#define MAS2_M 0x00000004
41#define MAS2_G 0x00000002 41#define MAS2_G 0x00000002
42#define MAS2_E 0x00000001 42#define MAS2_E 0x00000001
43#define MAS2_EPN_MASK(size) (~0 << (2*(size) + 10))
44#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
43 45
44#define MAS3_RPN 0xFFFFF000 46#define MAS3_RPN 0xFFFFF000
45#define MAS3_U0 0x00000200 47#define MAS3_U0 0x00000200
@@ -74,8 +76,9 @@
74#ifndef __ASSEMBLY__ 76#ifndef __ASSEMBLY__
75 77
76typedef struct { 78typedef struct {
77 unsigned long id; 79 unsigned int id;
78 unsigned long vdso_base; 80 unsigned int active;
81 unsigned long vdso_base;
79} mm_context_t; 82} mm_context_t;
80#endif /* !__ASSEMBLY__ */ 83#endif /* !__ASSEMBLY__ */
81 84
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 4c0e1b4f975c..6e7639911318 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -2,6 +2,63 @@
2#define _ASM_POWERPC_MMU_H_ 2#define _ASM_POWERPC_MMU_H_
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <asm/asm-compat.h>
6#include <asm/feature-fixups.h>
7
8/*
9 * MMU features bit definitions
10 */
11
12/*
13 * First half is MMU families
14 */
15#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
16#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
17#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
18#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
19#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
20
21/*
22 * This is individual features
23 */
24
25/* Enable use of high BAT registers */
26#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
27
28/* Enable >32-bit physical addresses on 32-bit processor, only used
29 * by CONFIG_6xx currently as BookE supports that from day 1
30 */
31#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
32
33/* Enable use of broadcast TLB invalidations. We don't always set it
34 * on processors that support it due to other constraints with the
35 * use of such invalidations
36 */
37#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
38
39/* Enable use of tlbilx invalidate-by-PID variant.
40 */
41#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000)
42
43/* This indicates that the processor cannot handle multiple outstanding
44 * broadcast tlbivax or tlbsync. This makes the code use a spinlock
45 * around such invalidate forms.
46 */
47#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000)
48
49#ifndef __ASSEMBLY__
50#include <asm/cputable.h>
51
52static inline int mmu_has_feature(unsigned long feature)
53{
54 return (cur_cpu_spec->mmu_features & feature);
55}
56
57extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
58
59#endif /* !__ASSEMBLY__ */
60
61
5#ifdef CONFIG_PPC64 62#ifdef CONFIG_PPC64
6/* 64-bit classic hash table MMU */ 63/* 64-bit classic hash table MMU */
7# include <asm/mmu-hash64.h> 64# include <asm/mmu-hash64.h>
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 6b993ef452ff..ab4f19263c42 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -2,237 +2,26 @@
2#define __ASM_POWERPC_MMU_CONTEXT_H 2#define __ASM_POWERPC_MMU_CONTEXT_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/spinlock.h>
5#include <asm/mmu.h> 9#include <asm/mmu.h>
6#include <asm/cputable.h> 10#include <asm/cputable.h>
7#include <asm-generic/mm_hooks.h> 11#include <asm-generic/mm_hooks.h>
8 12#include <asm/cputhreads.h>
9#ifndef CONFIG_PPC64
10#include <asm/atomic.h>
11#include <linux/bitops.h>
12
13/*
14 * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
15 * (virtual segment identifiers) for each context. Although the
16 * hardware supports 24-bit VSIDs, and thus >1 million contexts,
17 * we only use 32,768 of them. That is ample, since there can be
18 * at most around 30,000 tasks in the system anyway, and it means
19 * that we can use a bitmap to indicate which contexts are in use.
20 * Using a bitmap means that we entirely avoid all of the problems
21 * that we used to have when the context number overflowed,
22 * particularly on SMP systems.
23 * -- paulus.
24 */
25
26/*
27 * This function defines the mapping from contexts to VSIDs (virtual
28 * segment IDs). We use a skew on both the context and the high 4 bits
29 * of the 32-bit virtual address (the "effective segment ID") in order
30 * to spread out the entries in the MMU hash table. Note, if this
31 * function is changed then arch/ppc/mm/hashtable.S will have to be
32 * changed to correspond.
33 */
34#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
35 & 0xffffff)
36
37/*
38 The MPC8xx has only 16 contexts. We rotate through them on each
39 task switch. A better way would be to keep track of tasks that
40 own contexts, and implement an LRU usage. That way very active
41 tasks don't always have to pay the TLB reload overhead. The
42 kernel pages are mapped shared, so the kernel can run on behalf
43 of any task that makes a kernel entry. Shared does not mean they
44 are not protected, just that the ASID comparison is not performed.
45 -- Dan
46
47 The IBM4xx has 256 contexts, so we can just rotate through these
48 as a way of "switching" contexts. If the TID of the TLB is zero,
49 the PID/TID comparison is disabled, so we can use a TID of zero
50 to represent all kernel pages as shared among all contexts.
51 -- Dan
52 */
53
54static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
55{
56}
57
58#ifdef CONFIG_8xx
59#define NO_CONTEXT 16
60#define LAST_CONTEXT 15
61#define FIRST_CONTEXT 0
62
63#elif defined(CONFIG_4xx)
64#define NO_CONTEXT 256
65#define LAST_CONTEXT 255
66#define FIRST_CONTEXT 1
67
68#elif defined(CONFIG_E200) || defined(CONFIG_E500)
69#define NO_CONTEXT 256
70#define LAST_CONTEXT 255
71#define FIRST_CONTEXT 1
72
73#else
74
75/* PPC 6xx, 7xx CPUs */
76#define NO_CONTEXT ((unsigned long) -1)
77#define LAST_CONTEXT 32767
78#define FIRST_CONTEXT 1
79#endif
80
81/*
82 * Set the current MMU context.
83 * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
84 * loading up the segment registers for the user part of the address space.
85 *
86 * Since the PGD is immediately available, it is much faster to simply
87 * pass this along as a second parameter, which is required for 8xx and
88 * can be used for debugging on all processors (if you happen to have
89 * an Abatron).
90 */
91extern void set_context(unsigned long contextid, pgd_t *pgd);
92
93/*
94 * Bitmap of contexts in use.
95 * The size of this bitmap is LAST_CONTEXT + 1 bits.
96 */
97extern unsigned long context_map[];
98
99/*
100 * This caches the next context number that we expect to be free.
101 * Its use is an optimization only, we can't rely on this context
102 * number to be free, but it usually will be.
103 */
104extern unsigned long next_mmu_context;
105
106/*
107 * If we don't have sufficient contexts to give one to every task
108 * that could be in the system, we need to be able to steal contexts.
109 * These variables support that.
110 */
111#if LAST_CONTEXT < 30000
112#define FEW_CONTEXTS 1
113extern atomic_t nr_free_contexts;
114extern struct mm_struct *context_mm[LAST_CONTEXT+1];
115extern void steal_context(void);
116#endif
117
118/*
119 * Get a new mmu context for the address space described by `mm'.
120 */
121static inline void get_mmu_context(struct mm_struct *mm)
122{
123 unsigned long ctx;
124
125 if (mm->context.id != NO_CONTEXT)
126 return;
127#ifdef FEW_CONTEXTS
128 while (atomic_dec_if_positive(&nr_free_contexts) < 0)
129 steal_context();
130#endif
131 ctx = next_mmu_context;
132 while (test_and_set_bit(ctx, context_map)) {
133 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
134 if (ctx > LAST_CONTEXT)
135 ctx = 0;
136 }
137 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
138 mm->context.id = ctx;
139#ifdef FEW_CONTEXTS
140 context_mm[ctx] = mm;
141#endif
142}
143
144/*
145 * Set up the context for a new address space.
146 */
147static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
148{
149 mm->context.id = NO_CONTEXT;
150 return 0;
151}
152
153/*
154 * We're finished using the context for an address space.
155 */
156static inline void destroy_context(struct mm_struct *mm)
157{
158 preempt_disable();
159 if (mm->context.id != NO_CONTEXT) {
160 clear_bit(mm->context.id, context_map);
161 mm->context.id = NO_CONTEXT;
162#ifdef FEW_CONTEXTS
163 atomic_inc(&nr_free_contexts);
164#endif
165 }
166 preempt_enable();
167}
168
169static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
170 struct task_struct *tsk)
171{
172#ifdef CONFIG_ALTIVEC
173 if (cpu_has_feature(CPU_FTR_ALTIVEC))
174 asm volatile ("dssall;\n"
175#ifndef CONFIG_POWER4
176 "sync;\n" /* G4 needs a sync here, G5 apparently not */
177#endif
178 : : );
179#endif /* CONFIG_ALTIVEC */
180
181 tsk->thread.pgdir = next->pgd;
182
183 /* No need to flush userspace segments if the mm doesnt change */
184 if (prev == next)
185 return;
186
187 /* Setup new userspace context */
188 get_mmu_context(next);
189 set_context(next->context.id, next->pgd);
190}
191
192#define deactivate_mm(tsk,mm) do { } while (0)
193 13
194/* 14/*
195 * After we have set current->mm to a new value, this activates 15 * Most if the context management is out of line
196 * the context for the new mm so we see the new mappings.
197 */ 16 */
198#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current)
199
200extern void mmu_context_init(void); 17extern void mmu_context_init(void);
201
202
203#else
204
205#include <linux/kernel.h>
206#include <linux/mm.h>
207#include <linux/sched.h>
208
209/*
210 * Copyright (C) 2001 PPC 64 Team, IBM Corp
211 *
212 * This program is free software; you can redistribute it and/or
213 * modify it under the terms of the GNU General Public License
214 * as published by the Free Software Foundation; either version
215 * 2 of the License, or (at your option) any later version.
216 */
217
218static inline void enter_lazy_tlb(struct mm_struct *mm,
219 struct task_struct *tsk)
220{
221}
222
223/*
224 * The proto-VSID space has 2^35 - 1 segments available for user mappings.
225 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
226 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
227 */
228#define NO_CONTEXT 0
229#define MAX_CONTEXT ((1UL << 19) - 1)
230
231extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 18extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
232extern void destroy_context(struct mm_struct *mm); 19extern void destroy_context(struct mm_struct *mm);
233 20
21extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
234extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm); 22extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
235extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 23extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
24extern void set_context(unsigned long id, pgd_t *pgd);
236 25
237/* 26/*
238 * switch_mm is the entry point called from the architecture independent 27 * switch_mm is the entry point called from the architecture independent
@@ -241,22 +30,39 @@ extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
241static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 30static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
242 struct task_struct *tsk) 31 struct task_struct *tsk)
243{ 32{
244 if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask)) 33 /* Mark this context has been used on the new CPU */
245 cpu_set(smp_processor_id(), next->cpu_vm_mask); 34 cpu_set(smp_processor_id(), next->cpu_vm_mask);
35
36 /* 32-bit keeps track of the current PGDIR in the thread struct */
37#ifdef CONFIG_PPC32
38 tsk->thread.pgdir = next->pgd;
39#endif /* CONFIG_PPC32 */
246 40
247 /* No need to flush userspace segments if the mm doesnt change */ 41 /* Nothing else to do if we aren't actually switching */
248 if (prev == next) 42 if (prev == next)
249 return; 43 return;
250 44
45 /* We must stop all altivec streams before changing the HW
46 * context
47 */
251#ifdef CONFIG_ALTIVEC 48#ifdef CONFIG_ALTIVEC
252 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 49 if (cpu_has_feature(CPU_FTR_ALTIVEC))
253 asm volatile ("dssall"); 50 asm volatile ("dssall");
254#endif /* CONFIG_ALTIVEC */ 51#endif /* CONFIG_ALTIVEC */
255 52
53 /* The actual HW switching method differs between the various
54 * sub architectures.
55 */
56#ifdef CONFIG_PPC_STD_MMU_64
256 if (cpu_has_feature(CPU_FTR_SLB)) 57 if (cpu_has_feature(CPU_FTR_SLB))
257 switch_slb(tsk, next); 58 switch_slb(tsk, next);
258 else 59 else
259 switch_stab(tsk, next); 60 switch_stab(tsk, next);
61#else
62 /* Out of line for now */
63 switch_mmu_context(prev, next);
64#endif
65
260} 66}
261 67
262#define deactivate_mm(tsk,mm) do { } while (0) 68#define deactivate_mm(tsk,mm) do { } while (0)
@@ -274,6 +80,11 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
274 local_irq_restore(flags); 80 local_irq_restore(flags);
275} 81}
276 82
277#endif /* CONFIG_PPC64 */ 83/* We don't currently use enter_lazy_tlb() for anything */
84static inline void enter_lazy_tlb(struct mm_struct *mm,
85 struct task_struct *tsk)
86{
87}
88
278#endif /* __KERNEL__ */ 89#endif /* __KERNEL__ */
279#endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 90#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 81ef10b6b672..81a23932a160 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -239,6 +239,25 @@ struct mpc52xx_cdm {
239 u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */ 239 u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */
240}; 240};
241 241
242/* Interrupt controller Register set */
243struct mpc52xx_intr {
244 u32 per_mask; /* INTR + 0x00 */
245 u32 per_pri1; /* INTR + 0x04 */
246 u32 per_pri2; /* INTR + 0x08 */
247 u32 per_pri3; /* INTR + 0x0c */
248 u32 ctrl; /* INTR + 0x10 */
249 u32 main_mask; /* INTR + 0x14 */
250 u32 main_pri1; /* INTR + 0x18 */
251 u32 main_pri2; /* INTR + 0x1c */
252 u32 reserved1; /* INTR + 0x20 */
253 u32 enc_status; /* INTR + 0x24 */
254 u32 crit_status; /* INTR + 0x28 */
255 u32 main_status; /* INTR + 0x2c */
256 u32 per_status; /* INTR + 0x30 */
257 u32 reserved2; /* INTR + 0x34 */
258 u32 per_error; /* INTR + 0x38 */
259};
260
242#endif /* __ASSEMBLY__ */ 261#endif /* __ASSEMBLY__ */
243 262
244 263
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
index 8917ed630565..a218da6bec7c 100644
--- a/arch/powerpc/include/asm/mpc52xx_psc.h
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -68,12 +68,20 @@
68#define MPC52xx_PSC_IMR_ORERR 0x1000 68#define MPC52xx_PSC_IMR_ORERR 0x1000
69#define MPC52xx_PSC_IMR_IPC 0x8000 69#define MPC52xx_PSC_IMR_IPC 0x8000
70 70
71/* PSC input port change bit */ 71/* PSC input port change bits */
72#define MPC52xx_PSC_CTS 0x01 72#define MPC52xx_PSC_CTS 0x01
73#define MPC52xx_PSC_DCD 0x02 73#define MPC52xx_PSC_DCD 0x02
74#define MPC52xx_PSC_D_CTS 0x10 74#define MPC52xx_PSC_D_CTS 0x10
75#define MPC52xx_PSC_D_DCD 0x20 75#define MPC52xx_PSC_D_DCD 0x20
76 76
77/* PSC acr bits */
78#define MPC52xx_PSC_IEC_CTS 0x01
79#define MPC52xx_PSC_IEC_DCD 0x02
80
81/* PSC output port bits */
82#define MPC52xx_PSC_OP_RTS 0x01
83#define MPC52xx_PSC_OP_RES 0x02
84
77/* PSC mode fields */ 85/* PSC mode fields */
78#define MPC52xx_PSC_MODE_5_BITS 0x00 86#define MPC52xx_PSC_MODE_5_BITS 0x00
79#define MPC52xx_PSC_MODE_6_BITS 0x01 87#define MPC52xx_PSC_MODE_6_BITS 0x01
@@ -91,6 +99,7 @@
91#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00 99#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00
92#define MPC52xx_PSC_MODE_ONE_STOP 0x07 100#define MPC52xx_PSC_MODE_ONE_STOP 0x07
93#define MPC52xx_PSC_MODE_TWO_STOP 0x0f 101#define MPC52xx_PSC_MODE_TWO_STOP 0x0f
102#define MPC52xx_PSC_MODE_TXCTS 0x10
94 103
95#define MPC52xx_PSC_RFNUM_MASK 0x01ff 104#define MPC52xx_PSC_RFNUM_MASK 0x01ff
96 105
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
index 458c1f7fbc18..dabc01c727b8 100644
--- a/arch/powerpc/include/asm/mutex.h
+++ b/arch/powerpc/include/asm/mutex.h
@@ -1,9 +1,134 @@
1/* 1/*
2 * Pull in the generic implementation for the mutex fastpath. 2 * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
3 */
4#ifndef _ASM_POWERPC_MUTEX_H
5#define _ASM_POWERPC_MUTEX_H
6
7static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
8{
9 int t;
10
11 __asm__ __volatile__ (
12"1: lwarx %0,0,%1 # mutex trylock\n\
13 cmpw 0,%0,%2\n\
14 bne- 2f\n"
15 PPC405_ERR77(0,%1)
16" stwcx. %3,0,%1\n\
17 bne- 1b"
18 ISYNC_ON_SMP
19 "\n\
202:"
21 : "=&r" (t)
22 : "r" (&v->counter), "r" (old), "r" (new)
23 : "cc", "memory");
24
25 return t;
26}
27
28static inline int __mutex_dec_return_lock(atomic_t *v)
29{
30 int t;
31
32 __asm__ __volatile__(
33"1: lwarx %0,0,%1 # mutex lock\n\
34 addic %0,%0,-1\n"
35 PPC405_ERR77(0,%1)
36" stwcx. %0,0,%1\n\
37 bne- 1b"
38 ISYNC_ON_SMP
39 : "=&r" (t)
40 : "r" (&v->counter)
41 : "cc", "memory");
42
43 return t;
44}
45
46static inline int __mutex_inc_return_unlock(atomic_t *v)
47{
48 int t;
49
50 __asm__ __volatile__(
51 LWSYNC_ON_SMP
52"1: lwarx %0,0,%1 # mutex unlock\n\
53 addic %0,%0,1\n"
54 PPC405_ERR77(0,%1)
55" stwcx. %0,0,%1 \n\
56 bne- 1b"
57 : "=&r" (t)
58 : "r" (&v->counter)
59 : "cc", "memory");
60
61 return t;
62}
63
64/**
65 * __mutex_fastpath_lock - try to take the lock by moving the count
66 * from 1 to a 0 value
67 * @count: pointer of type atomic_t
68 * @fail_fn: function to call if the original value was not 1
69 *
70 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
71 * it wasn't 1 originally. This function MUST leave the value lower than
72 * 1 even when the "1" assertion wasn't true.
73 */
74static inline void
75__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
76{
77 if (unlikely(__mutex_dec_return_lock(count) < 0))
78 fail_fn(count);
79}
80
81/**
82 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
83 * from 1 to a 0 value
84 * @count: pointer of type atomic_t
85 * @fail_fn: function to call if the original value was not 1
86 *
87 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
88 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
89 * or anything the slow path function returns.
90 */
91static inline int
92__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
93{
94 if (unlikely(__mutex_dec_return_lock(count) < 0))
95 return fail_fn(count);
96 return 0;
97}
98
99/**
100 * __mutex_fastpath_unlock - try to promote the count from 0 to 1
101 * @count: pointer of type atomic_t
102 * @fail_fn: function to call if the original value was not 0
103 *
104 * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
105 * In the failure case, this function is allowed to either set the value to
106 * 1, or to set it to a value lower than 1.
107 */
108static inline void
109__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
110{
111 if (unlikely(__mutex_inc_return_unlock(count) <= 0))
112 fail_fn(count);
113}
114
115#define __mutex_slowpath_needs_to_unlock() 1
116
117/**
118 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
119 *
120 * @count: pointer of type atomic_t
121 * @fail_fn: fallback function
3 * 122 *
4 * TODO: implement optimized primitives instead, or leave the generic 123 * Change the count from 1 to 0, and return 1 (success), or if the count
5 * implementation in place, or pick the atomic_xchg() based generic 124 * was not 1, then return 0 (failure).
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */ 125 */
126static inline int
127__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
128{
129 if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1))
130 return 1;
131 return 0;
132}
8 133
9#include <asm-generic/mutex-dec.h> 134#endif
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index c0b8d4a29a91..197d569f5bd3 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -19,12 +19,15 @@
19#include <asm/kdump.h> 19#include <asm/kdump.h>
20 20
21/* 21/*
22 * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software 22 * On regular PPC32 page size is 4K (but we support 4K/16K/64K pages
23 * on PPC44x). For PPC64 we support either 4K or 64K software
23 * page size. When using 64K pages however, whether we are really supporting 24 * page size. When using 64K pages however, whether we are really supporting
24 * 64K pages in HW or not is irrelevant to those definitions. 25 * 64K pages in HW or not is irrelevant to those definitions.
25 */ 26 */
26#ifdef CONFIG_PPC_64K_PAGES 27#if defined(CONFIG_PPC_64K_PAGES)
27#define PAGE_SHIFT 16 28#define PAGE_SHIFT 16
29#elif defined(CONFIG_PPC_16K_PAGES)
30#define PAGE_SHIFT 14
28#else 31#else
29#define PAGE_SHIFT 12 32#define PAGE_SHIFT 12
30#endif 33#endif
@@ -151,7 +154,7 @@ typedef struct { pte_basic_t pte; } pte_t;
151/* 64k pages additionally define a bigger "real PTE" type that gathers 154/* 64k pages additionally define a bigger "real PTE" type that gathers
152 * the "second half" part of the PTE for pseudo 64k pages 155 * the "second half" part of the PTE for pseudo 64k pages
153 */ 156 */
154#ifdef CONFIG_PPC_64K_PAGES 157#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
155typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 158typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
156#else 159#else
157typedef struct { pte_t pte; } real_pte_t; 160typedef struct { pte_t pte; } real_pte_t;
@@ -191,10 +194,10 @@ typedef pte_basic_t pte_t;
191#define pte_val(x) (x) 194#define pte_val(x) (x)
192#define __pte(x) (x) 195#define __pte(x) (x)
193 196
194#ifdef CONFIG_PPC_64K_PAGES 197#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
195typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 198typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
196#else 199#else
197typedef unsigned long real_pte_t; 200typedef pte_t real_pte_t;
198#endif 201#endif
199 202
200 203
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index d77072a32cc6..1458d9500381 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -19,6 +19,8 @@
19#define PTE_FLAGS_OFFSET 0 19#define PTE_FLAGS_OFFSET 0
20#endif 20#endif
21 21
22#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
23
22#ifndef __ASSEMBLY__ 24#ifndef __ASSEMBLY__
23/* 25/*
24 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit 26 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
@@ -26,10 +28,8 @@
26 */ 28 */
27#ifdef CONFIG_PTE_64BIT 29#ifdef CONFIG_PTE_64BIT
28typedef unsigned long long pte_basic_t; 30typedef unsigned long long pte_basic_t;
29#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
30#else 31#else
31typedef unsigned long pte_basic_t; 32typedef unsigned long pte_basic_t;
32#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
33#endif 33#endif
34 34
35struct page; 35struct page;
@@ -39,6 +39,9 @@ extern void copy_page(void *to, void *from);
39 39
40#include <asm-generic/page.h> 40#include <asm-generic/page.h>
41 41
42#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
43#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
44
42#endif /* __ASSEMBLY__ */ 45#endif /* __ASSEMBLY__ */
43 46
44#endif /* _ASM_POWERPC_PAGE_32_H */ 47#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 9047af7baa69..84007afabdb5 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -13,7 +13,6 @@
13 13
14struct device_node; 14struct device_node;
15 15
16extern unsigned int ppc_pci_flags;
17enum { 16enum {
18 /* Force re-assigning all resources (ignore firmware 17 /* Force re-assigning all resources (ignore firmware
19 * setup completely) 18 * setup completely)
@@ -36,6 +35,31 @@ enum {
36 /* ... except for domain 0 */ 35 /* ... except for domain 0 */
37 PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020, 36 PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020,
38}; 37};
38#ifdef CONFIG_PCI
39extern unsigned int ppc_pci_flags;
40
41static inline void ppc_pci_set_flags(int flags)
42{
43 ppc_pci_flags = flags;
44}
45
46static inline void ppc_pci_add_flags(int flags)
47{
48 ppc_pci_flags |= flags;
49}
50
51static inline int ppc_pci_has_flag(int flag)
52{
53 return (ppc_pci_flags & flag);
54}
55#else
56static inline void ppc_pci_set_flags(int flags) { }
57static inline void ppc_pci_add_flags(int flags) { }
58static inline int ppc_pci_has_flag(int flag)
59{
60 return 0;
61}
62#endif
39 63
40 64
41/* 65/*
@@ -241,9 +265,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus);
241 265
242/** Discover new pci devices under this bus, and add them */ 266/** Discover new pci devices under this bus, and add them */
243extern void pcibios_add_pci_devices(struct pci_bus *bus); 267extern void pcibios_add_pci_devices(struct pci_bus *bus);
244extern void pcibios_fixup_new_pci_devices(struct pci_bus *bus);
245
246extern int pcibios_remove_root_bus(struct pci_controller *phb);
247 268
248static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) 269static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
249{ 270{
@@ -290,6 +311,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
290/* Allocate & free a PCI host bridge structure */ 311/* Allocate & free a PCI host bridge structure */
291extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev); 312extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
292extern void pcibios_free_controller(struct pci_controller *phb); 313extern void pcibios_free_controller(struct pci_controller *phb);
314extern void pcibios_setup_phb_resources(struct pci_controller *hose);
293 315
294#ifdef CONFIG_PCI 316#ifdef CONFIG_PCI
295extern unsigned long pci_address_to_pio(phys_addr_t address); 317extern unsigned long pci_address_to_pio(phys_addr_t address);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 57a2a494886b..3548159a1beb 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -38,8 +38,8 @@ struct pci_dev;
38 * Set this to 1 if you want the kernel to re-assign all PCI 38 * Set this to 1 if you want the kernel to re-assign all PCI
39 * bus numbers (don't do that on ppc64 yet !) 39 * bus numbers (don't do that on ppc64 yet !)
40 */ 40 */
41#define pcibios_assign_all_busses() (ppc_pci_flags & \ 41#define pcibios_assign_all_busses() \
42 PPC_PCI_REASSIGN_ALL_BUS) 42 (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
43#define pcibios_scan_all_fns(a, b) 0 43#define pcibios_scan_all_fns(a, b) 0
44 44
45static inline void pcibios_set_master(struct pci_dev *dev) 45static inline void pcibios_set_master(struct pci_dev *dev)
@@ -204,15 +204,14 @@ static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
204 return root; 204 return root;
205} 205}
206 206
207extern void pcibios_setup_new_device(struct pci_dev *dev);
208
209extern void pcibios_claim_one_bus(struct pci_bus *b); 207extern void pcibios_claim_one_bus(struct pci_bus *b);
210 208
211extern void pcibios_allocate_bus_resources(struct pci_bus *bus); 209extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
212 210
213extern void pcibios_resource_survey(void); 211extern void pcibios_resource_survey(void);
214 212
215extern struct pci_controller *init_phb_dynamic(struct device_node *dn); 213extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
214extern int remove_phb_dynamic(struct pci_controller *phb);
216 215
217extern struct pci_dev *of_create_pci_dev(struct device_node *node, 216extern struct pci_dev *of_create_pci_dev(struct device_node *node,
218 struct pci_bus *bus, int devfn); 217 struct pci_bus *bus, int devfn);
@@ -221,6 +220,7 @@ extern void of_scan_pci_bridge(struct device_node *node,
221 struct pci_dev *dev); 220 struct pci_dev *dev);
222 221
223extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); 222extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
223extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
224 224
225extern int pci_read_irq_line(struct pci_dev *dev); 225extern int pci_read_irq_line(struct pci_dev *dev);
226 226
@@ -235,9 +235,8 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
235 const struct resource *rsrc, 235 const struct resource *rsrc,
236 resource_size_t *start, resource_size_t *end); 236 resource_size_t *start, resource_size_t *end);
237 237
238extern void pcibios_do_bus_setup(struct pci_bus *bus); 238extern void pcibios_setup_bus_devices(struct pci_bus *bus);
239extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus); 239extern void pcibios_setup_bus_self(struct pci_bus *bus);
240
241 240
242#endif /* __KERNEL__ */ 241#endif /* __KERNEL__ */
243#endif /* __ASM_POWERPC_PCI_H */ 242#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 58c07147b3ea..0815eb40acae 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5 5
6#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */
7
6extern void __bad_pte(pmd_t *pmd); 8extern void __bad_pte(pmd_t *pmd);
7 9
8extern pgd_t *pgd_alloc(struct mm_struct *mm); 10extern pgd_t *pgd_alloc(struct mm_struct *mm);
@@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
33 35
34extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); 36extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
35extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); 37extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
36extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
37extern void pte_free(struct mm_struct *mm, pgtable_t pte);
38 38
39#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) 39static inline void pgtable_free(pgtable_free_t pgf)
40{
41 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
42
43 free_page((unsigned long)p);
44}
40 45
41#define check_pgt_cache() do { } while (0) 46#define check_pgt_cache() do { } while (0)
42 47
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 812a1d8f35cb..afda2bdd860f 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -7,7 +7,6 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 9
10#include <linux/mm.h>
11#include <linux/slab.h> 10#include <linux/slab.h>
12#include <linux/cpumask.h> 11#include <linux/cpumask.h>
13#include <linux/percpu.h> 12#include <linux/percpu.h>
@@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
108 return page; 107 return page;
109} 108}
110 109
111static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
112{
113 free_page((unsigned long)pte);
114}
115
116static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
117{
118 pgtable_page_dtor(ptepage);
119 __free_page(ptepage);
120}
121
122#define PGF_CACHENUM_MASK 0x7
123
124typedef struct pgtable_free {
125 unsigned long val;
126} pgtable_free_t;
127
128static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
129 unsigned long mask)
130{
131 BUG_ON(cachenum > PGF_CACHENUM_MASK);
132
133 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
134}
135
136static inline void pgtable_free(pgtable_free_t pgf) 110static inline void pgtable_free(pgtable_free_t pgf)
137{ 111{
138 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 112 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
@@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf)
144 kmem_cache_free(pgtable_cache[cachenum], p); 118 kmem_cache_free(pgtable_cache[cachenum], p);
145} 119}
146 120
147extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
148
149#define __pte_free_tlb(tlb,ptepage) \
150do { \
151 pgtable_page_dtor(ptepage); \
152 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
153 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
154} while (0)
155#define __pmd_free_tlb(tlb, pmd) \ 121#define __pmd_free_tlb(tlb, pmd) \
156 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 122 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
157 PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 123 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index b4505ed0f0f2..5d8480265a77 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -2,11 +2,52 @@
2#define _ASM_POWERPC_PGALLOC_H 2#define _ASM_POWERPC_PGALLOC_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/mm.h>
6
7static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
8{
9 free_page((unsigned long)pte);
10}
11
12static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
13{
14 pgtable_page_dtor(ptepage);
15 __free_page(ptepage);
16}
17
18typedef struct pgtable_free {
19 unsigned long val;
20} pgtable_free_t;
21
22#define PGF_CACHENUM_MASK 0x7
23
24static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
25 unsigned long mask)
26{
27 BUG_ON(cachenum > PGF_CACHENUM_MASK);
28
29 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
30}
31
5#ifdef CONFIG_PPC64 32#ifdef CONFIG_PPC64
6#include <asm/pgalloc-64.h> 33#include <asm/pgalloc-64.h>
7#else 34#else
8#include <asm/pgalloc-32.h> 35#include <asm/pgalloc-32.h>
9#endif 36#endif
10 37
38extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
39
40#ifdef CONFIG_SMP
41#define __pte_free_tlb(tlb,ptepage) \
42do { \
43 pgtable_page_dtor(ptepage); \
44 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
45 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
46} while (0)
47#else
48#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
49#endif
50
51
11#endif /* __KERNEL__ */ 52#endif /* __KERNEL__ */
12#endif /* _ASM_POWERPC_PGALLOC_H */ 53#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 6ab7c67cb5ab..f69a4d977729 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -228,9 +228,10 @@ extern int icache_44x_need_flush;
228 * - FILE *must* be in the bottom three bits because swap cache 228 * - FILE *must* be in the bottom three bits because swap cache
229 * entries use the top 29 bits for TLB2. 229 * entries use the top 29 bits for TLB2.
230 * 230 *
231 * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it 231 * - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
232 * doesn't support SMP. So we can use this as software bit, like 232 * because it doesn't support SMP. However, some later 460 variants
233 * DIRTY. 233 * have -some- form of SMP support and so I keep the bit there for
234 * future use
234 * 235 *
235 * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used 236 * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
236 * for memory protection related functions (see PTE structure in 237 * for memory protection related functions (see PTE structure in
@@ -436,20 +437,23 @@ extern int icache_44x_need_flush;
436 _PAGE_USER | _PAGE_ACCESSED | \ 437 _PAGE_USER | _PAGE_ACCESSED | \
437 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ 438 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
438 _PAGE_EXEC | _PAGE_HWEXEC) 439 _PAGE_EXEC | _PAGE_HWEXEC)
440
439/* 441/*
440 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware 442 * We define 2 sets of base prot bits, one for basic pages (ie,
441 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need 443 * cacheable kernel and user pages) and one for non cacheable
442 * to have it in the Linux PTE, and in fact the bit could be reused for 444 * pages. We always set _PAGE_COHERENT when SMP is enabled or
443 * another purpose. -- paulus. 445 * the processor might need it for DMA coherency.
444 */ 446 */
445 447#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
446#ifdef CONFIG_44x 448#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
447#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)
448#else 449#else
449#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) 450#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
450#endif 451#endif
452#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)
453
451#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) 454#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
452#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) 455#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
456#define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE)
453 457
454#ifdef CONFIG_PPC_STD_MMU 458#ifdef CONFIG_PPC_STD_MMU
455/* On standard PPC MMU, no user access implies kernel read/write access, 459/* On standard PPC MMU, no user access implies kernel read/write access,
@@ -459,7 +463,7 @@ extern int icache_44x_need_flush;
459#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) 463#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
460#endif 464#endif
461 465
462#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) 466#define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED)
463#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) 467#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
464 468
465#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ 469#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
@@ -552,9 +556,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
552static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 556static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
553static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 557static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
554 558
555static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
556static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
557
558static inline pte_t pte_wrprotect(pte_t pte) { 559static inline pte_t pte_wrprotect(pte_t pte) {
559 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } 560 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
560static inline pte_t pte_mkclean(pte_t pte) { 561static inline pte_t pte_mkclean(pte_t pte) {
@@ -693,10 +694,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
693#endif 694#endif
694} 695}
695 696
697
696static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 698static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
697 pte_t *ptep, pte_t pte) 699 pte_t *ptep, pte_t pte)
698{ 700{
699#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) 701#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM)
700 WARN_ON(pte_present(*ptep)); 702 WARN_ON(pte_present(*ptep));
701#endif 703#endif
702 __set_pte_at(mm, addr, ptep, pte); 704 __set_pte_at(mm, addr, ptep, pte);
@@ -760,16 +762,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
760 __changed; \ 762 __changed; \
761}) 763})
762 764
763/*
764 * Macro to mark a page protection value as "uncacheable".
765 */
766#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
767
768struct file;
769extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
770 unsigned long size, pgprot_t vma_prot);
771#define __HAVE_PHYS_MEM_ACCESS_PROT
772
773#define __HAVE_ARCH_PTE_SAME 765#define __HAVE_ARCH_PTE_SAME
774#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) 766#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
775 767
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 4c0a8c62859d..b0f18be81d9f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -100,7 +100,7 @@
100 100
101#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) 101#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
102 102
103/* __pgprot defined in arch/powerpc/incliude/asm/page.h */ 103/* __pgprot defined in arch/powerpc/include/asm/page.h */
104#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 104#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
105 105
106#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) 106#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
@@ -245,9 +245,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
245static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 245static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
246static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 246static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
247 247
248static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
249static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
250
251static inline pte_t pte_wrprotect(pte_t pte) { 248static inline pte_t pte_wrprotect(pte_t pte) {
252 pte_val(pte) &= ~(_PAGE_RW); return pte; } 249 pte_val(pte) &= ~(_PAGE_RW); return pte; }
253static inline pte_t pte_mkclean(pte_t pte) { 250static inline pte_t pte_mkclean(pte_t pte) {
@@ -405,16 +402,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
405 __changed; \ 402 __changed; \
406}) 403})
407 404
408/*
409 * Macro to mark a page protection value as "uncacheable".
410 */
411#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
412
413struct file;
414extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
415 unsigned long size, pgprot_t vma_prot);
416#define __HAVE_PHYS_MEM_ACCESS_PROT
417
418#define __HAVE_ARCH_PTE_SAME 405#define __HAVE_ARCH_PTE_SAME
419#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 406#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
420 407
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dbb8ca172e44..07f55e601696 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -16,6 +16,32 @@ struct mm_struct;
16#endif 16#endif
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19
20/*
21 * Macro to mark a page protection value as "uncacheable".
22 */
23
24#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
25 _PAGE_WRITETHRU)
26
27#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
28 _PAGE_NO_CACHE | _PAGE_GUARDED))
29
30#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
31 _PAGE_NO_CACHE))
32
33#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
34 _PAGE_COHERENT))
35
36#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
37 _PAGE_COHERENT | _PAGE_WRITETHRU))
38
39
40struct file;
41extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
42 unsigned long size, pgprot_t vma_prot);
43#define __HAVE_PHYS_MEM_ACCESS_PROT
44
19/* 45/*
20 * ZERO_PAGE is a global shared page that is always zero: used 46 * ZERO_PAGE is a global shared page that is always zero: used
21 * for zero-mapped memory areas etc.. 47 * for zero-mapped memory areas etc..
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c4a029ccb4d3..1a0d628eb114 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -425,14 +425,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
425#define fromreal(rd) tovirt(rd,rd) 425#define fromreal(rd) tovirt(rd,rd)
426 426
427#define tophys(rd,rs) \ 427#define tophys(rd,rs) \
4280: addis rd,rs,-KERNELBASE@h; \ 4280: addis rd,rs,-PAGE_OFFSET@h; \
429 .section ".vtop_fixup","aw"; \ 429 .section ".vtop_fixup","aw"; \
430 .align 1; \ 430 .align 1; \
431 .long 0b; \ 431 .long 0b; \
432 .previous 432 .previous
433 433
434#define tovirt(rd,rs) \ 434#define tovirt(rd,rs) \
4350: addis rd,rs,KERNELBASE@h; \ 4350: addis rd,rs,PAGE_OFFSET@h; \
436 .section ".ptov_fixup","aw"; \ 436 .section ".ptov_fixup","aw"; \
437 .align 1; \ 437 .align 1; \
438 .long 0b; \ 438 .long 0b; \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 101ed87f7d84..d3466490104a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -69,8 +69,6 @@ extern int _prep_type;
69 69
70#ifdef __KERNEL__ 70#ifdef __KERNEL__
71 71
72extern int have_of;
73
74struct task_struct; 72struct task_struct;
75void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); 73void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
76void release_thread(struct task_struct *); 74void release_thread(struct task_struct *);
@@ -207,6 +205,11 @@ struct thread_struct {
207#define INIT_SP_LIMIT \ 205#define INIT_SP_LIMIT \
208 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) 206 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
209 207
208#ifdef CONFIG_SPE
209#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
210#else
211#define SPEFSCR_INIT
212#endif
210 213
211#ifdef CONFIG_PPC32 214#ifdef CONFIG_PPC32
212#define INIT_THREAD { \ 215#define INIT_THREAD { \
@@ -215,6 +218,7 @@ struct thread_struct {
215 .fs = KERNEL_DS, \ 218 .fs = KERNEL_DS, \
216 .pgdir = swapper_pg_dir, \ 219 .pgdir = swapper_pg_dir, \
217 .fpexc_mode = MSR_FE0 | MSR_FE1, \ 220 .fpexc_mode = MSR_FE0 | MSR_FE1, \
221 SPEFSCR_INIT \
218} 222}
219#else 223#else
220#define INIT_THREAD { \ 224#define INIT_THREAD { \
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index eb3bd2e1c7f6..6ff04185d2aa 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -253,6 +253,9 @@ extern void kdump_move_device_tree(void);
253/* CPU OF node matching */ 253/* CPU OF node matching */
254struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); 254struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
255 255
256/* cache lookup */
257struct device_node *of_find_next_cache_node(struct device_node *np);
258
256/* Get the MAC address */ 259/* Get the MAC address */
257extern const void *of_get_mac_address(struct device_node *np); 260extern const void *of_get_mac_address(struct device_node *np);
258 261
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index f9e34c493cbb..cff30c0ef1ff 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -305,30 +305,34 @@ static inline const char* ps3_result(int result)
305/* system bus routines */ 305/* system bus routines */
306 306
307enum ps3_match_id { 307enum ps3_match_id {
308 PS3_MATCH_ID_EHCI = 1, 308 PS3_MATCH_ID_EHCI = 1,
309 PS3_MATCH_ID_OHCI = 2, 309 PS3_MATCH_ID_OHCI = 2,
310 PS3_MATCH_ID_GELIC = 3, 310 PS3_MATCH_ID_GELIC = 3,
311 PS3_MATCH_ID_AV_SETTINGS = 4, 311 PS3_MATCH_ID_AV_SETTINGS = 4,
312 PS3_MATCH_ID_SYSTEM_MANAGER = 5, 312 PS3_MATCH_ID_SYSTEM_MANAGER = 5,
313 PS3_MATCH_ID_STOR_DISK = 6, 313 PS3_MATCH_ID_STOR_DISK = 6,
314 PS3_MATCH_ID_STOR_ROM = 7, 314 PS3_MATCH_ID_STOR_ROM = 7,
315 PS3_MATCH_ID_STOR_FLASH = 8, 315 PS3_MATCH_ID_STOR_FLASH = 8,
316 PS3_MATCH_ID_SOUND = 9, 316 PS3_MATCH_ID_SOUND = 9,
317 PS3_MATCH_ID_GRAPHICS = 10, 317 PS3_MATCH_ID_GPU = 10,
318 PS3_MATCH_ID_LPM = 11, 318 PS3_MATCH_ID_LPM = 11,
319}; 319};
320 320
321#define PS3_MODULE_ALIAS_EHCI "ps3:1" 321enum ps3_match_sub_id {
322#define PS3_MODULE_ALIAS_OHCI "ps3:2" 322 PS3_MATCH_SUB_ID_GPU_FB = 1,
323#define PS3_MODULE_ALIAS_GELIC "ps3:3" 323};
324#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4" 324
325#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5" 325#define PS3_MODULE_ALIAS_EHCI "ps3:1:0"
326#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6" 326#define PS3_MODULE_ALIAS_OHCI "ps3:2:0"
327#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7" 327#define PS3_MODULE_ALIAS_GELIC "ps3:3:0"
328#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8" 328#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4:0"
329#define PS3_MODULE_ALIAS_SOUND "ps3:9" 329#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5:0"
330#define PS3_MODULE_ALIAS_GRAPHICS "ps3:10" 330#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6:0"
331#define PS3_MODULE_ALIAS_LPM "ps3:11" 331#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7:0"
332#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0"
333#define PS3_MODULE_ALIAS_SOUND "ps3:9:0"
334#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1"
335#define PS3_MODULE_ALIAS_LPM "ps3:11:0"
332 336
333enum ps3_system_bus_device_type { 337enum ps3_system_bus_device_type {
334 PS3_DEVICE_TYPE_IOC0 = 1, 338 PS3_DEVICE_TYPE_IOC0 = 1,
@@ -337,11 +341,6 @@ enum ps3_system_bus_device_type {
337 PS3_DEVICE_TYPE_LPM, 341 PS3_DEVICE_TYPE_LPM,
338}; 342};
339 343
340enum ps3_match_sub_id {
341 /* for PS3_MATCH_ID_GRAPHICS */
342 PS3_MATCH_SUB_ID_FB = 1,
343};
344
345/** 344/**
346 * struct ps3_system_bus_device - a device on the system bus 345 * struct ps3_system_bus_device - a device on the system bus
347 */ 346 */
@@ -516,4 +515,7 @@ void ps3_sync_irq(int node);
516u32 ps3_get_hw_thread_id(int cpu); 515u32 ps3_get_hw_thread_id(int cpu);
517u64 ps3_get_spe_id(void *arg); 516u64 ps3_get_spe_id(void *arg);
518 517
518/* mutex synchronizing GPU accesses and video mode changes */
519extern struct mutex ps3_gpu_mutex;
520
519#endif 521#endif
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index 5aa22cffdbd6..cd24ac16660a 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -740,8 +740,4 @@ extern int ps3av_audio_mute(int);
740extern int ps3av_audio_mute_analog(int); 740extern int ps3av_audio_mute_analog(int);
741extern int ps3av_dev_open(void); 741extern int ps3av_dev_open(void);
742extern int ps3av_dev_close(void); 742extern int ps3av_dev_close(void);
743extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
744 void *flip_data);
745extern void ps3av_flip_ctl(int on);
746
747#endif /* _ASM_POWERPC_PS3AV_H_ */ 743#endif /* _ASM_POWERPC_PS3AV_H_ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c6d1ab650778..f484a343efba 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -783,6 +783,10 @@ extern void scom970_write(unsigned int address, unsigned long value);
783#define __get_SP() ({unsigned long sp; \ 783#define __get_SP() ({unsigned long sp; \
784 asm volatile("mr %0,1": "=r" (sp)); sp;}) 784 asm volatile("mr %0,1": "=r" (sp)); sp;})
785 785
786struct pt_regs;
787
788extern void ppc_save_regs(struct pt_regs *regs);
789
786#endif /* __ASSEMBLY__ */ 790#endif /* __ASSEMBLY__ */
787#endif /* __KERNEL__ */ 791#endif /* __KERNEL__ */
788#endif /* _ASM_POWERPC_REG_H */ 792#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 8eaa7b28d9d0..e0175beb4462 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -168,6 +168,7 @@ extern void rtas_os_term(char *str);
168extern int rtas_get_sensor(int sensor, int index, int *state); 168extern int rtas_get_sensor(int sensor, int index, int *state);
169extern int rtas_get_power_level(int powerdomain, int *level); 169extern int rtas_get_power_level(int powerdomain, int *level);
170extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); 170extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
171extern bool rtas_indicator_present(int token, int *maxindex);
171extern int rtas_set_indicator(int indicator, int index, int new_value); 172extern int rtas_set_indicator(int indicator, int index, int new_value);
172extern int rtas_set_indicator_fast(int indicator, int index, int new_value); 173extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
173extern void rtas_progress(char *s, unsigned short hex); 174extern void rtas_progress(char *s, unsigned short hex);
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index ced34f1dc8f8..3d9f831c3c55 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -82,7 +82,7 @@
82#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) 82#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
83#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) 83#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
84 84
85#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y) 85#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
86#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) 86#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
87 87
88/* These macros define what NaN looks like. They're supposed to expand to 88/* These macros define what NaN looks like. They're supposed to expand to
@@ -97,6 +97,20 @@
97 97
98#define _FP_KEEPNANFRACP 1 98#define _FP_KEEPNANFRACP 1
99 99
100#ifdef FP_EX_BOOKE_E500_SPE
101#define FP_EX_INEXACT (1 << 21)
102#define FP_EX_INVALID (1 << 20)
103#define FP_EX_DIVZERO (1 << 19)
104#define FP_EX_UNDERFLOW (1 << 18)
105#define FP_EX_OVERFLOW (1 << 17)
106#define FP_INHIBIT_RESULTS 0
107
108#define __FPU_FPSCR (current->thread.spefscr)
109#define __FPU_ENABLED_EXC \
110({ \
111 (__FPU_FPSCR >> 2) & 0x1f; \
112})
113#else
100/* Exception flags. We use the bit positions of the appropriate bits 114/* Exception flags. We use the bit positions of the appropriate bits
101 in the FPSCR, which also correspond to the FE_* bits. This makes 115 in the FPSCR, which also correspond to the FE_* bits. This makes
102 everything easier ;-). */ 116 everything easier ;-). */
@@ -111,22 +125,6 @@
111#define FP_EX_DIVZERO (1 << (31 - 5)) 125#define FP_EX_DIVZERO (1 << (31 - 5))
112#define FP_EX_INEXACT (1 << (31 - 6)) 126#define FP_EX_INEXACT (1 << (31 - 6))
113 127
114/* This macro appears to be called when both X and Y are NaNs, and
115 * has to choose one and copy it to R. i386 goes for the larger of the
116 * two, sparc64 just picks Y. I don't understand this at all so I'll
117 * go with sparc64 because it's shorter :-> -- PMM
118 */
119#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
120 do { \
121 R##_s = Y##_s; \
122 _FP_FRAC_COPY_##wc(R,Y); \
123 R##_c = FP_CLS_NAN; \
124 } while (0)
125
126
127#include <linux/kernel.h>
128#include <linux/sched.h>
129
130#define __FPU_FPSCR (current->thread.fpscr.val) 128#define __FPU_FPSCR (current->thread.fpscr.val)
131 129
132/* We only actually write to the destination register 130/* We only actually write to the destination register
@@ -137,6 +135,32 @@
137 (__FPU_FPSCR >> 3) & 0x1f; \ 135 (__FPU_FPSCR >> 3) & 0x1f; \
138}) 136})
139 137
138#endif
139
140/*
141 * If one NaN is signaling and the other is not,
142 * we choose that one, otherwise we choose X.
143 */
144#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
145 do { \
146 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
147 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
148 { \
149 R##_s = X##_s; \
150 _FP_FRAC_COPY_##wc(R,X); \
151 } \
152 else \
153 { \
154 R##_s = Y##_s; \
155 _FP_FRAC_COPY_##wc(R,Y); \
156 } \
157 R##_c = FP_CLS_NAN; \
158 } while (0)
159
160
161#include <linux/kernel.h>
162#include <linux/sched.h>
163
140#define __FPU_TRAP_P(bits) \ 164#define __FPU_TRAP_P(bits) \
141 ((__FPU_ENABLED_EXC & (bits)) != 0) 165 ((__FPU_ENABLED_EXC & (bits)) != 0)
142 166
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 1866cec4f967..c25f73d1d842 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -81,6 +81,13 @@ extern int cpu_to_core_id(int cpu);
81#define PPC_MSG_CALL_FUNC_SINGLE 2 81#define PPC_MSG_CALL_FUNC_SINGLE 2
82#define PPC_MSG_DEBUGGER_BREAK 3 82#define PPC_MSG_DEBUGGER_BREAK 3
83 83
84/*
85 * irq controllers that have dedicated ipis per message and don't
86 * need additional code in the action handler may use this
87 */
88extern int smp_request_message_ipi(int virq, int message);
89extern const char *smp_ipi_name[];
90
84void smp_init_iSeries(void); 91void smp_init_iSeries(void);
85void smp_init_pSeries(void); 92void smp_init_pSeries(void);
86void smp_init_cell(void); 93void smp_init_cell(void);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f56a843f4705..36864364e601 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -277,7 +277,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
277 bne- 1b" 277 bne- 1b"
278 : "=&r"(tmp) 278 : "=&r"(tmp)
279 : "r"(&rw->lock) 279 : "r"(&rw->lock)
280 : "cr0", "memory"); 280 : "cr0", "xer", "memory");
281} 281}
282 282
283static inline void __raw_write_unlock(raw_rwlock_t *rw) 283static inline void __raw_write_unlock(raw_rwlock_t *rw)
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index 45963e80f557..28f6ddbff4cf 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -5,6 +5,10 @@
5#include <linux/stringify.h> 5#include <linux/stringify.h>
6#include <asm/feature-fixups.h> 6#include <asm/feature-fixups.h>
7 7
8#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
9#define __SUBARCH_HAS_LWSYNC
10#endif
11
8#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
9extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; 13extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
10extern void do_lwsync_fixups(unsigned long value, void *fixup_start, 14extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index d6648c143322..2a4be19a92c4 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -23,15 +23,17 @@
23 * read_barrier_depends() prevents data-dependent loads being reordered 23 * read_barrier_depends() prevents data-dependent loads being reordered
24 * across this point (nop on PPC). 24 * across this point (nop on PPC).
25 * 25 *
26 * We have to use the sync instructions for mb(), since lwsync doesn't 26 * *mb() variants without smp_ prefix must order all types of memory
27 * order loads with respect to previous stores. Lwsync is fine for 27 * operations with one another. sync is the only instruction sufficient
28 * rmb(), though. Note that rmb() actually uses a sync on 32-bit 28 * to do this.
29 * architectures.
30 * 29 *
31 * For wmb(), we use sync since wmb is used in drivers to order 30 * For the smp_ barriers, ordering is for cacheable memory operations
32 * stores to system memory with respect to writes to the device. 31 * only. We have to use the sync instruction for smp_mb(), since lwsync
33 * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier 32 * doesn't order loads with respect to previous stores. Lwsync can be
34 * on SMP since it is only used to order updates to system memory. 33 * used for smp_rmb() and smp_wmb().
34 *
35 * However, on CPUs that don't support lwsync, lwsync actually maps to a
36 * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
35 */ 37 */
36#define mb() __asm__ __volatile__ ("sync" : : : "memory") 38#define mb() __asm__ __volatile__ ("sync" : : : "memory")
37#define rmb() __asm__ __volatile__ ("sync" : : : "memory") 39#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
@@ -45,14 +47,14 @@
45#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
46 48
47#ifdef __SUBARCH_HAS_LWSYNC 49#ifdef __SUBARCH_HAS_LWSYNC
48# define SMPWMB lwsync 50# define SMPWMB LWSYNC
49#else 51#else
50# define SMPWMB eieio 52# define SMPWMB eieio
51#endif 53#endif
52 54
53#define smp_mb() mb() 55#define smp_mb() mb()
54#define smp_rmb() rmb() 56#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
55#define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory") 57#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
56#define smp_read_barrier_depends() read_barrier_depends() 58#define smp_read_barrier_depends() read_barrier_depends()
57#else 59#else
58#define smp_mb() barrier() 60#define smp_mb() barrier()
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index febd581ec9b0..27ccb764fdab 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -48,26 +48,6 @@ extern unsigned long ppc_proc_freq;
48extern unsigned long ppc_tb_freq; 48extern unsigned long ppc_tb_freq;
49#define DEFAULT_TB_FREQ 125000000UL 49#define DEFAULT_TB_FREQ 125000000UL
50 50
51/*
52 * By putting all of this stuff into a single struct we
53 * reduce the number of cache lines touched by do_gettimeofday.
54 * Both by collecting all of the data in one cache line and
55 * by touching only one TOC entry on ppc64.
56 */
57struct gettimeofday_vars {
58 u64 tb_to_xs;
59 u64 stamp_xsec;
60 u64 tb_orig_stamp;
61};
62
63struct gettimeofday_struct {
64 unsigned long tb_ticks_per_sec;
65 struct gettimeofday_vars vars[2];
66 struct gettimeofday_vars * volatile varp;
67 unsigned var_idx;
68 unsigned tb_to_us;
69};
70
71struct div_result { 51struct div_result {
72 u64 result_high; 52 u64 result_high;
73 u64 result_low; 53 u64 result_low;
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index a2c6bfd85fb7..abbe3419d1dd 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -6,6 +6,9 @@
6 * 6 *
7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
8 * - flush_tlb_page(vma, vmaddr) flushes one page 8 * - flush_tlb_page(vma, vmaddr) flushes one page
9 * - local_flush_tlb_mm(mm) flushes the specified mm context on
10 * the local processor
11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
9 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
10 * - flush_tlb_range(vma, start, end) flushes a range of pages 13 * - flush_tlb_range(vma, start, end) flushes a range of pages
11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 14 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
@@ -17,7 +20,7 @@
17 */ 20 */
18#ifdef __KERNEL__ 21#ifdef __KERNEL__
19 22
20#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) 23#ifdef CONFIG_PPC_MMU_NOHASH
21/* 24/*
22 * TLB flushing for software loaded TLB chips 25 * TLB flushing for software loaded TLB chips
23 * 26 *
@@ -28,63 +31,49 @@
28 31
29#include <linux/mm.h> 32#include <linux/mm.h>
30 33
31extern void _tlbie(unsigned long address, unsigned int pid); 34#define MMU_NO_CONTEXT ((unsigned int)-1)
32extern void _tlbil_all(void);
33extern void _tlbil_pid(unsigned int pid);
34extern void _tlbil_va(unsigned long address, unsigned int pid);
35 35
36#if defined(CONFIG_40x) || defined(CONFIG_8xx) 36extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
37#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") 37 unsigned long end);
38#else /* CONFIG_44x || CONFIG_FSL_BOOKE */ 38extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
39extern void _tlbia(void);
40#endif
41
42static inline void flush_tlb_mm(struct mm_struct *mm)
43{
44 _tlbil_pid(mm->context.id);
45}
46
47static inline void flush_tlb_page(struct vm_area_struct *vma,
48 unsigned long vmaddr)
49{
50 _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
51}
52 39
53static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 40extern void local_flush_tlb_mm(struct mm_struct *mm);
54 unsigned long vmaddr) 41extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
55{
56 flush_tlb_page(vma, vmaddr);
57}
58 42
59static inline void flush_tlb_range(struct vm_area_struct *vma, 43#ifdef CONFIG_SMP
60 unsigned long start, unsigned long end) 44extern void flush_tlb_mm(struct mm_struct *mm);
61{ 45extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
62 _tlbil_pid(vma->vm_mm->context.id); 46#else
63} 47#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
48#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
49#endif
50#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
64 51
65static inline void flush_tlb_kernel_range(unsigned long start, 52#elif defined(CONFIG_PPC_STD_MMU_32)
66 unsigned long end)
67{
68 _tlbil_pid(0);
69}
70 53
71#elif defined(CONFIG_PPC32)
72/* 54/*
73 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx 55 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
74 */ 56 */
75extern void _tlbie(unsigned long address);
76extern void _tlbia(void);
77
78extern void flush_tlb_mm(struct mm_struct *mm); 57extern void flush_tlb_mm(struct mm_struct *mm);
79extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 58extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
80extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 59extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
81extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 60extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
82 unsigned long end); 61 unsigned long end);
83extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 62extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
63static inline void local_flush_tlb_page(struct vm_area_struct *vma,
64 unsigned long vmaddr)
65{
66 flush_tlb_page(vma, vmaddr);
67}
68static inline void local_flush_tlb_mm(struct mm_struct *mm)
69{
70 flush_tlb_mm(mm);
71}
72
73#elif defined(CONFIG_PPC_STD_MMU_64)
84 74
85#else
86/* 75/*
87 * TLB flushing for 64-bit has-MMU CPUs 76 * TLB flushing for 64-bit hash-MMU CPUs
88 */ 77 */
89 78
90#include <linux/percpu.h> 79#include <linux/percpu.h>
@@ -134,10 +123,19 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
134extern void flush_hash_range(unsigned long number, int local); 123extern void flush_hash_range(unsigned long number, int local);
135 124
136 125
126static inline void local_flush_tlb_mm(struct mm_struct *mm)
127{
128}
129
137static inline void flush_tlb_mm(struct mm_struct *mm) 130static inline void flush_tlb_mm(struct mm_struct *mm)
138{ 131{
139} 132}
140 133
134static inline void local_flush_tlb_page(struct vm_area_struct *vma,
135 unsigned long vmaddr)
136{
137}
138
141static inline void flush_tlb_page(struct vm_area_struct *vma, 139static inline void flush_tlb_page(struct vm_area_struct *vma,
142 unsigned long vmaddr) 140 unsigned long vmaddr)
143{ 141{
@@ -162,7 +160,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
162extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 160extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
163 unsigned long end); 161 unsigned long end);
164 162
165 163#else
164#error Unsupported MMU type
166#endif 165#endif
167 166
168#endif /*__KERNEL__ */ 167#endif /*__KERNEL__ */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 373fca394a54..375258559ae6 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -22,11 +22,11 @@ static inline cpumask_t node_to_cpumask(int node)
22 return numa_cpumask_lookup_table[node]; 22 return numa_cpumask_lookup_table[node];
23} 23}
24 24
25#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
26
25static inline int node_to_first_cpu(int node) 27static inline int node_to_first_cpu(int node)
26{ 28{
27 cpumask_t tmp; 29 return cpumask_first(cpumask_of_node(node));
28 tmp = node_to_cpumask(node);
29 return first_cpu(tmp);
30} 30}
31 31
32int of_node_to_nid(struct device_node *device); 32int of_node_to_nid(struct device_node *device);
@@ -46,6 +46,10 @@ static inline int pcibus_to_node(struct pci_bus *bus)
46 node_to_cpumask(pcibus_to_node(bus)) \ 46 node_to_cpumask(pcibus_to_node(bus)) \
47 ) 47 )
48 48
49#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
50 cpu_all_mask : \
51 cpumask_of_node(pcibus_to_node(bus)))
52
49/* sched_domains SD_NODE_INIT for PPC64 machines */ 53/* sched_domains SD_NODE_INIT for PPC64 machines */
50#define SD_NODE_INIT (struct sched_domain) { \ 54#define SD_NODE_INIT (struct sched_domain) { \
51 .parent = NULL, \ 55 .parent = NULL, \
@@ -108,6 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
108 112
109#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 113#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
110#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 114#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
115#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
116#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
111#define topology_core_id(cpu) (cpu_to_core_id(cpu)) 117#define topology_core_id(cpu) (cpu_to_core_id(cpu))
112#endif 118#endif
113#endif 119#endif
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index f01393224b52..13c2c283e178 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -39,6 +39,7 @@
39#ifndef __ASSEMBLY__ 39#ifndef __ASSEMBLY__
40 40
41#include <linux/unistd.h> 41#include <linux/unistd.h>
42#include <linux/time.h>
42 43
43#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32) 44#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32)
44 45
@@ -83,6 +84,7 @@ struct vdso_data {
83 __u32 icache_log_block_size; /* L1 i-cache log block size */ 84 __u32 icache_log_block_size; /* L1 i-cache log block size */
84 __s32 wtom_clock_sec; /* Wall to monotonic clock */ 85 __s32 wtom_clock_sec; /* Wall to monotonic clock */
85 __s32 wtom_clock_nsec; 86 __s32 wtom_clock_nsec;
87 struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
86 __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ 88 __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
87 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ 89 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
88}; 90};
@@ -102,6 +104,7 @@ struct vdso_data {
102 __u32 tz_dsttime; /* Type of dst correction 0x5C */ 104 __u32 tz_dsttime; /* Type of dst correction 0x5C */
103 __s32 wtom_clock_sec; /* Wall to monotonic clock */ 105 __s32 wtom_clock_sec; /* Wall to monotonic clock */
104 __s32 wtom_clock_nsec; 106 __s32 wtom_clock_nsec;
107 struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
105 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ 108 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
106 __u32 dcache_block_size; /* L1 d-cache block size */ 109 __u32 dcache_block_size; /* L1 d-cache block size */
107 __u32 icache_block_size; /* L1 i-cache block size */ 110 __u32 icache_block_size; /* L1 i-cache block size */