diff options
Diffstat (limited to 'include/asm-powerpc')
54 files changed, 1143 insertions, 458 deletions
diff --git a/include/asm-powerpc/Kbuild b/include/asm-powerpc/Kbuild index bca352e033c3..04ce8f8a2ee7 100644 --- a/include/asm-powerpc/Kbuild +++ b/include/asm-powerpc/Kbuild | |||
@@ -2,7 +2,6 @@ include include/asm-generic/Kbuild.asm | |||
2 | 2 | ||
3 | header-y += auxvec.h | 3 | header-y += auxvec.h |
4 | header-y += ioctls.h | 4 | header-y += ioctls.h |
5 | header-y += mman.h | ||
6 | header-y += sembuf.h | 5 | header-y += sembuf.h |
7 | header-y += siginfo.h | 6 | header-y += siginfo.h |
8 | header-y += stat.h | 7 | header-y += stat.h |
@@ -23,7 +22,6 @@ header-y += sigcontext.h | |||
23 | header-y += statfs.h | 22 | header-y += statfs.h |
24 | header-y += ps3fb.h | 23 | header-y += ps3fb.h |
25 | 24 | ||
26 | unifdef-y += asm-compat.h | ||
27 | unifdef-y += bootx.h | 25 | unifdef-y += bootx.h |
28 | unifdef-y += byteorder.h | 26 | unifdef-y += byteorder.h |
29 | unifdef-y += cputable.h | 27 | unifdef-y += cputable.h |
diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h index c19e7367fce6..8ec2e1da68bf 100644 --- a/include/asm-powerpc/asm-compat.h +++ b/include/asm-powerpc/asm-compat.h | |||
@@ -15,57 +15,6 @@ | |||
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | 17 | ||
18 | /* | ||
19 | * Feature section common macros | ||
20 | * | ||
21 | * Note that the entries now contain offsets between the table entry | ||
22 | * and the code rather than absolute code pointers in order to be | ||
23 | * useable with the vdso shared library. There is also an assumption | ||
24 | * that values will be negative, that is, the fixup table has to be | ||
25 | * located after the code it fixes up. | ||
26 | */ | ||
27 | #ifdef CONFIG_PPC64 | ||
28 | #ifdef __powerpc64__ | ||
29 | /* 64 bits kernel, 64 bits code */ | ||
30 | #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ | ||
31 | 99: \ | ||
32 | .section sect,"a"; \ | ||
33 | .align 3; \ | ||
34 | 98: \ | ||
35 | .llong msk; \ | ||
36 | .llong val; \ | ||
37 | .llong label##b-98b; \ | ||
38 | .llong 99b-98b; \ | ||
39 | .previous | ||
40 | #else /* __powerpc64__ */ | ||
41 | /* 64 bits kernel, 32 bits code (ie. vdso32) */ | ||
42 | #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ | ||
43 | 99: \ | ||
44 | .section sect,"a"; \ | ||
45 | .align 3; \ | ||
46 | 98: \ | ||
47 | .llong msk; \ | ||
48 | .llong val; \ | ||
49 | .long 0xffffffff; \ | ||
50 | .long label##b-98b; \ | ||
51 | .long 0xffffffff; \ | ||
52 | .long 99b-98b; \ | ||
53 | .previous | ||
54 | #endif /* !__powerpc64__ */ | ||
55 | #else /* CONFIG_PPC64 */ | ||
56 | /* 32 bits kernel, 32 bits code */ | ||
57 | #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ | ||
58 | 99: \ | ||
59 | .section sect,"a"; \ | ||
60 | .align 2; \ | ||
61 | 98: \ | ||
62 | .long msk; \ | ||
63 | .long val; \ | ||
64 | .long label##b-98b; \ | ||
65 | .long 99b-98b; \ | ||
66 | .previous | ||
67 | #endif /* !CONFIG_PPC64 */ | ||
68 | |||
69 | #ifdef __powerpc64__ | 18 | #ifdef __powerpc64__ |
70 | 19 | ||
71 | /* operations for longs and pointers */ | 20 | /* operations for longs and pointers */ |
diff --git a/include/asm-powerpc/cache.h b/include/asm-powerpc/cache.h index 53507046a1b1..81de6eb3455d 100644 --- a/include/asm-powerpc/cache.h +++ b/include/asm-powerpc/cache.h | |||
@@ -8,6 +8,9 @@ | |||
8 | #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) | 8 | #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) |
9 | #define L1_CACHE_SHIFT 4 | 9 | #define L1_CACHE_SHIFT 4 |
10 | #define MAX_COPY_PREFETCH 1 | 10 | #define MAX_COPY_PREFETCH 1 |
11 | #elif defined(CONFIG_PPC_E500MC) | ||
12 | #define L1_CACHE_SHIFT 6 | ||
13 | #define MAX_COPY_PREFETCH 4 | ||
11 | #elif defined(CONFIG_PPC32) | 14 | #elif defined(CONFIG_PPC32) |
12 | #define L1_CACHE_SHIFT 5 | 15 | #define L1_CACHE_SHIFT 5 |
13 | #define MAX_COPY_PREFETCH 4 | 16 | #define MAX_COPY_PREFETCH 4 |
diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h new file mode 100644 index 000000000000..107d9b915e33 --- /dev/null +++ b/include/asm-powerpc/code-patching.h | |||
@@ -0,0 +1,54 @@ | |||
1 | #ifndef _ASM_POWERPC_CODE_PATCHING_H | ||
2 | #define _ASM_POWERPC_CODE_PATCHING_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 2008, Michael Ellerman, IBM Corporation. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <asm/types.h> | ||
14 | |||
15 | #define PPC_NOP_INSTR 0x60000000 | ||
16 | #define PPC_LWSYNC_INSTR 0x7c2004ac | ||
17 | |||
18 | /* Flags for create_branch: | ||
19 | * "b" == create_branch(addr, target, 0); | ||
20 | * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); | ||
21 | * "bl" == create_branch(addr, target, BRANCH_SET_LINK); | ||
22 | * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); | ||
23 | */ | ||
24 | #define BRANCH_SET_LINK 0x1 | ||
25 | #define BRANCH_ABSOLUTE 0x2 | ||
26 | |||
27 | unsigned int create_branch(const unsigned int *addr, | ||
28 | unsigned long target, int flags); | ||
29 | unsigned int create_cond_branch(const unsigned int *addr, | ||
30 | unsigned long target, int flags); | ||
31 | void patch_branch(unsigned int *addr, unsigned long target, int flags); | ||
32 | void patch_instruction(unsigned int *addr, unsigned int instr); | ||
33 | |||
34 | int instr_is_relative_branch(unsigned int instr); | ||
35 | int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); | ||
36 | unsigned long branch_target(const unsigned int *instr); | ||
37 | unsigned int translate_branch(const unsigned int *dest, | ||
38 | const unsigned int *src); | ||
39 | |||
40 | static inline unsigned long ppc_function_entry(void *func) | ||
41 | { | ||
42 | #ifdef CONFIG_PPC64 | ||
43 | /* | ||
44 | * On PPC64 the function pointer actually points to the function's | ||
45 | * descriptor. The first entry in the descriptor is the address | ||
46 | * of the function text. | ||
47 | */ | ||
48 | return ((func_descr_t *)func)->entry; | ||
49 | #else | ||
50 | return (unsigned long)func; | ||
51 | #endif | ||
52 | } | ||
53 | |||
54 | #endif /* _ASM_POWERPC_CODE_PATCHING_H */ | ||
diff --git a/include/asm-powerpc/cpm.h b/include/asm-powerpc/cpm.h index ede38ffe466a..63a55337c2de 100644 --- a/include/asm-powerpc/cpm.h +++ b/include/asm-powerpc/cpm.h | |||
@@ -96,6 +96,7 @@ unsigned long cpm_muram_alloc(unsigned long size, unsigned long align); | |||
96 | int cpm_muram_free(unsigned long offset); | 96 | int cpm_muram_free(unsigned long offset); |
97 | unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size); | 97 | unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size); |
98 | void __iomem *cpm_muram_addr(unsigned long offset); | 98 | void __iomem *cpm_muram_addr(unsigned long offset); |
99 | unsigned long cpm_muram_offset(void __iomem *addr); | ||
99 | dma_addr_t cpm_muram_dma(void __iomem *addr); | 100 | dma_addr_t cpm_muram_dma(void __iomem *addr); |
100 | int cpm_command(u32 command, u8 opcode); | 101 | int cpm_command(u32 command, u8 opcode); |
101 | 102 | ||
diff --git a/include/asm-powerpc/cpm1.h b/include/asm-powerpc/cpm1.h index 3df439678006..2ff798744c1d 100644 --- a/include/asm-powerpc/cpm1.h +++ b/include/asm-powerpc/cpm1.h | |||
@@ -42,35 +42,15 @@ | |||
42 | 42 | ||
43 | #define mk_cr_cmd(CH, CMD) ((CMD << 8) | (CH << 4)) | 43 | #define mk_cr_cmd(CH, CMD) ((CMD << 8) | (CH << 4)) |
44 | 44 | ||
45 | #ifndef CONFIG_PPC_CPM_NEW_BINDING | ||
46 | /* The dual ported RAM is multi-functional. Some areas can be (and are | ||
47 | * being) used for microcode. There is an area that can only be used | ||
48 | * as data ram for buffer descriptors, which is all we use right now. | ||
49 | * Currently the first 512 and last 256 bytes are used for microcode. | ||
50 | */ | ||
51 | #define CPM_DATAONLY_BASE ((uint)0x0800) | ||
52 | #define CPM_DATAONLY_SIZE ((uint)0x0700) | ||
53 | #define CPM_DP_NOSPACE ((uint)0x7fffffff) | ||
54 | #endif | ||
55 | |||
56 | /* Export the base address of the communication processor registers | 45 | /* Export the base address of the communication processor registers |
57 | * and dual port ram. | 46 | * and dual port ram. |
58 | */ | 47 | */ |
59 | extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */ | 48 | extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */ |
60 | 49 | ||
61 | #ifdef CONFIG_PPC_CPM_NEW_BINDING | ||
62 | #define cpm_dpalloc cpm_muram_alloc | 50 | #define cpm_dpalloc cpm_muram_alloc |
63 | #define cpm_dpfree cpm_muram_free | 51 | #define cpm_dpfree cpm_muram_free |
64 | #define cpm_dpram_addr cpm_muram_addr | 52 | #define cpm_dpram_addr cpm_muram_addr |
65 | #define cpm_dpram_phys cpm_muram_dma | 53 | #define cpm_dpram_phys cpm_muram_dma |
66 | #else | ||
67 | extern unsigned long cpm_dpalloc(uint size, uint align); | ||
68 | extern int cpm_dpfree(unsigned long offset); | ||
69 | extern unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align); | ||
70 | extern void cpm_dpdump(void); | ||
71 | extern void *cpm_dpram_addr(unsigned long offset); | ||
72 | extern uint cpm_dpram_phys(u8 *addr); | ||
73 | #endif | ||
74 | 54 | ||
75 | extern void cpm_setbrg(uint brg, uint rate); | 55 | extern void cpm_setbrg(uint brg, uint rate); |
76 | 56 | ||
diff --git a/include/asm-powerpc/cpm2.h b/include/asm-powerpc/cpm2.h index 4c85ed9cd43f..2c7fd9cee291 100644 --- a/include/asm-powerpc/cpm2.h +++ b/include/asm-powerpc/cpm2.h | |||
@@ -78,24 +78,6 @@ | |||
78 | #define mk_cr_cmd(PG, SBC, MCN, OP) \ | 78 | #define mk_cr_cmd(PG, SBC, MCN, OP) \ |
79 | ((PG << 26) | (SBC << 21) | (MCN << 6) | OP) | 79 | ((PG << 26) | (SBC << 21) | (MCN << 6) | OP) |
80 | 80 | ||
81 | #ifndef CONFIG_PPC_CPM_NEW_BINDING | ||
82 | /* Dual Port RAM addresses. The first 16K is available for almost | ||
83 | * any CPM use, so we put the BDs there. The first 128 bytes are | ||
84 | * used for SMC1 and SMC2 parameter RAM, so we start allocating | ||
85 | * BDs above that. All of this must change when we start | ||
86 | * downloading RAM microcode. | ||
87 | */ | ||
88 | #define CPM_DATAONLY_BASE ((uint)128) | ||
89 | #define CPM_DP_NOSPACE ((uint)0x7fffffff) | ||
90 | #if defined(CONFIG_8272) || defined(CONFIG_MPC8555) | ||
91 | #define CPM_DATAONLY_SIZE ((uint)(8 * 1024) - CPM_DATAONLY_BASE) | ||
92 | #define CPM_FCC_SPECIAL_BASE ((uint)0x00009000) | ||
93 | #else | ||
94 | #define CPM_DATAONLY_SIZE ((uint)(16 * 1024) - CPM_DATAONLY_BASE) | ||
95 | #define CPM_FCC_SPECIAL_BASE ((uint)0x0000b000) | ||
96 | #endif | ||
97 | #endif | ||
98 | |||
99 | /* The number of pages of host memory we allocate for CPM. This is | 81 | /* The number of pages of host memory we allocate for CPM. This is |
100 | * done early in kernel initialization to get physically contiguous | 82 | * done early in kernel initialization to get physically contiguous |
101 | * pages. | 83 | * pages. |
@@ -107,17 +89,9 @@ | |||
107 | */ | 89 | */ |
108 | extern cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor */ | 90 | extern cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor */ |
109 | 91 | ||
110 | #ifdef CONFIG_PPC_CPM_NEW_BINDING | ||
111 | #define cpm_dpalloc cpm_muram_alloc | 92 | #define cpm_dpalloc cpm_muram_alloc |
112 | #define cpm_dpfree cpm_muram_free | 93 | #define cpm_dpfree cpm_muram_free |
113 | #define cpm_dpram_addr cpm_muram_addr | 94 | #define cpm_dpram_addr cpm_muram_addr |
114 | #else | ||
115 | extern unsigned long cpm_dpalloc(uint size, uint align); | ||
116 | extern int cpm_dpfree(unsigned long offset); | ||
117 | extern unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align); | ||
118 | extern void cpm_dpdump(void); | ||
119 | extern void *cpm_dpram_addr(unsigned long offset); | ||
120 | #endif | ||
121 | 95 | ||
122 | extern void cpm_setbrg(uint brg, uint rate); | 96 | extern void cpm_setbrg(uint brg, uint rate); |
123 | extern void cpm2_fastbrg(uint brg, uint rate, int div16); | 97 | extern void cpm2_fastbrg(uint brg, uint rate, int div16); |
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 1e79673b7316..2a3e9075a5a0 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef __ASM_POWERPC_CPUTABLE_H | 1 | #ifndef __ASM_POWERPC_CPUTABLE_H |
2 | #define __ASM_POWERPC_CPUTABLE_H | 2 | #define __ASM_POWERPC_CPUTABLE_H |
3 | 3 | ||
4 | #include <asm/asm-compat.h> | ||
5 | |||
6 | #define PPC_FEATURE_32 0x80000000 | 4 | #define PPC_FEATURE_32 0x80000000 |
7 | #define PPC_FEATURE_64 0x40000000 | 5 | #define PPC_FEATURE_64 0x40000000 |
8 | #define PPC_FEATURE_601_INSTR 0x20000000 | 6 | #define PPC_FEATURE_601_INSTR 0x20000000 |
@@ -26,11 +24,20 @@ | |||
26 | #define PPC_FEATURE_PA6T 0x00000800 | 24 | #define PPC_FEATURE_PA6T 0x00000800 |
27 | #define PPC_FEATURE_HAS_DFP 0x00000400 | 25 | #define PPC_FEATURE_HAS_DFP 0x00000400 |
28 | #define PPC_FEATURE_POWER6_EXT 0x00000200 | 26 | #define PPC_FEATURE_POWER6_EXT 0x00000200 |
27 | #define PPC_FEATURE_ARCH_2_06 0x00000100 | ||
28 | #define PPC_FEATURE_HAS_VSX 0x00000080 | ||
29 | |||
30 | #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \ | ||
31 | 0x00000040 | ||
29 | 32 | ||
30 | #define PPC_FEATURE_TRUE_LE 0x00000002 | 33 | #define PPC_FEATURE_TRUE_LE 0x00000002 |
31 | #define PPC_FEATURE_PPC_LE 0x00000001 | 34 | #define PPC_FEATURE_PPC_LE 0x00000001 |
32 | 35 | ||
33 | #ifdef __KERNEL__ | 36 | #ifdef __KERNEL__ |
37 | |||
38 | #include <asm/asm-compat.h> | ||
39 | #include <asm/feature-fixups.h> | ||
40 | |||
34 | #ifndef __ASSEMBLY__ | 41 | #ifndef __ASSEMBLY__ |
35 | 42 | ||
36 | /* This structure can grow, it's real size is used by head.S code | 43 | /* This structure can grow, it's real size is used by head.S code |
@@ -132,7 +139,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, | |||
132 | #define CPU_FTR_TAU ASM_CONST(0x0000000000000010) | 139 | #define CPU_FTR_TAU ASM_CONST(0x0000000000000010) |
133 | #define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020) | 140 | #define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020) |
134 | #define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040) | 141 | #define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040) |
135 | #define CPU_FTR_604_PERF_MON ASM_CONST(0x0000000000000080) | 142 | #define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080) |
136 | #define CPU_FTR_601 ASM_CONST(0x0000000000000100) | 143 | #define CPU_FTR_601 ASM_CONST(0x0000000000000100) |
137 | #define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200) | 144 | #define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200) |
138 | #define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400) | 145 | #define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400) |
@@ -152,6 +159,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, | |||
152 | #define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000) | 159 | #define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000) |
153 | #define CPU_FTR_SPE ASM_CONST(0x0000000002000000) | 160 | #define CPU_FTR_SPE ASM_CONST(0x0000000002000000) |
154 | #define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) | 161 | #define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) |
162 | #define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) | ||
155 | 163 | ||
156 | /* | 164 | /* |
157 | * Add the 64-bit processor unique features in the top half of the word; | 165 | * Add the 64-bit processor unique features in the top half of the word; |
@@ -180,6 +188,8 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, | |||
180 | #define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000) | 188 | #define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000) |
181 | #define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004000000000000) | 189 | #define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004000000000000) |
182 | #define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000) | 190 | #define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000) |
191 | #define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) | ||
192 | #define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) | ||
183 | 193 | ||
184 | #ifndef __ASSEMBLY__ | 194 | #ifndef __ASSEMBLY__ |
185 | 195 | ||
@@ -198,6 +208,17 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, | |||
198 | #define PPC_FEATURE_HAS_ALTIVEC_COMP 0 | 208 | #define PPC_FEATURE_HAS_ALTIVEC_COMP 0 |
199 | #endif | 209 | #endif |
200 | 210 | ||
211 | /* We only set the VSX features if the kernel was compiled with VSX | ||
212 | * support | ||
213 | */ | ||
214 | #ifdef CONFIG_VSX | ||
215 | #define CPU_FTR_VSX_COMP CPU_FTR_VSX | ||
216 | #define PPC_FEATURE_HAS_VSX_COMP PPC_FEATURE_HAS_VSX | ||
217 | #else | ||
218 | #define CPU_FTR_VSX_COMP 0 | ||
219 | #define PPC_FEATURE_HAS_VSX_COMP 0 | ||
220 | #endif | ||
221 | |||
201 | /* We only set the spe features if the kernel was compiled with spe | 222 | /* We only set the spe features if the kernel was compiled with spe |
202 | * support | 223 | * support |
203 | */ | 224 | */ |
@@ -245,8 +266,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, | |||
245 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ | 266 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ |
246 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) | 267 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) |
247 | #define CPU_FTRS_604 (CPU_FTR_COMMON | \ | 268 | #define CPU_FTRS_604 (CPU_FTR_COMMON | \ |
248 | CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE | \ | 269 | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_PPC_LE) |
249 | CPU_FTR_PPC_LE) | ||
250 | #define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \ | 270 | #define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \ |
251 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ | 271 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ |
252 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) | 272 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) |
@@ -347,40 +367,50 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, | |||
347 | #define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ | 367 | #define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ |
348 | CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ | 368 | CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ |
349 | CPU_FTR_UNIFIED_ID_CACHE) | 369 | CPU_FTR_UNIFIED_ID_CACHE) |
350 | #define CPU_FTRS_E500 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ | 370 | #define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ |
371 | CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN) | ||
372 | #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ | ||
373 | CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | \ | ||
351 | CPU_FTR_NODSISRALIGN) | 374 | CPU_FTR_NODSISRALIGN) |
352 | #define CPU_FTRS_E500_2 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ | 375 | #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ |
353 | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN) | 376 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \ |
377 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC) | ||
354 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) | 378 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) |
355 | 379 | ||
356 | /* 64-bit CPUs */ | 380 | /* 64-bit CPUs */ |
357 | #define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | \ | 381 | #define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
358 | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE) | 382 | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE) |
359 | #define CPU_FTRS_RS64 (CPU_FTR_USE_TB | \ | 383 | #define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
360 | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ | 384 | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ |
361 | CPU_FTR_MMCRA | CPU_FTR_CTRL) | 385 | CPU_FTR_MMCRA | CPU_FTR_CTRL) |
362 | #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | \ | 386 | #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
363 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 387 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
364 | CPU_FTR_MMCRA) | 388 | CPU_FTR_MMCRA) |
365 | #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | \ | 389 | #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
366 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 390 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
367 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA) | 391 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA) |
368 | #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | \ | 392 | #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
369 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 393 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
370 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 394 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
371 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ | 395 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ |
372 | CPU_FTR_PURR) | 396 | CPU_FTR_PURR) |
373 | #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | \ | 397 | #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
374 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 398 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
375 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 399 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
376 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ | 400 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ |
377 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ | 401 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ |
378 | CPU_FTR_DSCR) | 402 | CPU_FTR_DSCR) |
379 | #define CPU_FTRS_CELL (CPU_FTR_USE_TB | \ | 403 | #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
404 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | ||
405 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | ||
406 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ | ||
407 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ | ||
408 | CPU_FTR_DSCR | CPU_FTR_SAO) | ||
409 | #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | ||
380 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 410 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
381 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 411 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
382 | CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG) | 412 | CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG) |
383 | #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | \ | 413 | #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
384 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ | 414 | CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ |
385 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ | 415 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ |
386 | CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) | 416 | CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) |
@@ -391,7 +421,8 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, | |||
391 | #define CPU_FTRS_POSSIBLE \ | 421 | #define CPU_FTRS_POSSIBLE \ |
392 | (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ | 422 | (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ |
393 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ | 423 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ |
394 | CPU_FTRS_CELL | CPU_FTRS_PA6T | CPU_FTR_1T_SEGMENT) | 424 | CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ |
425 | CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) | ||
395 | #else | 426 | #else |
396 | enum { | 427 | enum { |
397 | CPU_FTRS_POSSIBLE = | 428 | CPU_FTRS_POSSIBLE = |
@@ -421,7 +452,7 @@ enum { | |||
421 | CPU_FTRS_E200 | | 452 | CPU_FTRS_E200 | |
422 | #endif | 453 | #endif |
423 | #ifdef CONFIG_E500 | 454 | #ifdef CONFIG_E500 |
424 | CPU_FTRS_E500 | CPU_FTRS_E500_2 | | 455 | CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | |
425 | #endif | 456 | #endif |
426 | 0, | 457 | 0, |
427 | }; | 458 | }; |
@@ -431,7 +462,7 @@ enum { | |||
431 | #define CPU_FTRS_ALWAYS \ | 462 | #define CPU_FTRS_ALWAYS \ |
432 | (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ | 463 | (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ |
433 | CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ | 464 | CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ |
434 | CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) | 465 | CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) |
435 | #else | 466 | #else |
436 | enum { | 467 | enum { |
437 | CPU_FTRS_ALWAYS = | 468 | CPU_FTRS_ALWAYS = |
@@ -461,7 +492,7 @@ enum { | |||
461 | CPU_FTRS_E200 & | 492 | CPU_FTRS_E200 & |
462 | #endif | 493 | #endif |
463 | #ifdef CONFIG_E500 | 494 | #ifdef CONFIG_E500 |
464 | CPU_FTRS_E500 & CPU_FTRS_E500_2 & | 495 | CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & |
465 | #endif | 496 | #endif |
466 | CPU_FTRS_POSSIBLE, | 497 | CPU_FTRS_POSSIBLE, |
467 | }; | 498 | }; |
@@ -477,18 +508,5 @@ static inline int cpu_has_feature(unsigned long feature) | |||
477 | 508 | ||
478 | #endif /* !__ASSEMBLY__ */ | 509 | #endif /* !__ASSEMBLY__ */ |
479 | 510 | ||
480 | #ifdef __ASSEMBLY__ | ||
481 | |||
482 | #define BEGIN_FTR_SECTION_NESTED(label) label: | ||
483 | #define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(97) | ||
484 | #define END_FTR_SECTION_NESTED(msk, val, label) \ | ||
485 | MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) | ||
486 | #define END_FTR_SECTION(msk, val) \ | ||
487 | END_FTR_SECTION_NESTED(msk, val, 97) | ||
488 | |||
489 | #define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) | ||
490 | #define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) | ||
491 | #endif /* __ASSEMBLY__ */ | ||
492 | |||
493 | #endif /* __KERNEL__ */ | 511 | #endif /* __KERNEL__ */ |
494 | #endif /* __ASM_POWERPC_CPUTABLE_H */ | 512 | #endif /* __ASM_POWERPC_CPUTABLE_H */ |
diff --git a/include/asm-powerpc/dcr-generic.h b/include/asm-powerpc/dcr-generic.h new file mode 100644 index 000000000000..35b71599ec46 --- /dev/null +++ b/include/asm-powerpc/dcr-generic.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. | ||
3 | * <benh@kernel.crashing.org> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
13 | * the GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #ifndef _ASM_POWERPC_DCR_GENERIC_H | ||
21 | #define _ASM_POWERPC_DCR_GENERIC_H | ||
22 | #ifdef __KERNEL__ | ||
23 | #ifndef __ASSEMBLY__ | ||
24 | |||
25 | enum host_type_t {DCR_HOST_MMIO, DCR_HOST_NATIVE, DCR_HOST_INVALID}; | ||
26 | |||
27 | typedef struct { | ||
28 | enum host_type_t type; | ||
29 | union { | ||
30 | dcr_host_mmio_t mmio; | ||
31 | dcr_host_native_t native; | ||
32 | } host; | ||
33 | } dcr_host_t; | ||
34 | |||
35 | extern bool dcr_map_ok_generic(dcr_host_t host); | ||
36 | |||
37 | extern dcr_host_t dcr_map_generic(struct device_node *dev, unsigned int dcr_n, | ||
38 | unsigned int dcr_c); | ||
39 | extern void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c); | ||
40 | |||
41 | extern u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n); | ||
42 | |||
43 | extern void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value); | ||
44 | |||
45 | #endif /* __ASSEMBLY__ */ | ||
46 | #endif /* __KERNEL__ */ | ||
47 | #endif /* _ASM_POWERPC_DCR_GENERIC_H */ | ||
48 | |||
49 | |||
diff --git a/include/asm-powerpc/dcr-mmio.h b/include/asm-powerpc/dcr-mmio.h index 08532ff1899c..acd491dbd45a 100644 --- a/include/asm-powerpc/dcr-mmio.h +++ b/include/asm-powerpc/dcr-mmio.h | |||
@@ -27,20 +27,26 @@ typedef struct { | |||
27 | void __iomem *token; | 27 | void __iomem *token; |
28 | unsigned int stride; | 28 | unsigned int stride; |
29 | unsigned int base; | 29 | unsigned int base; |
30 | } dcr_host_t; | 30 | } dcr_host_mmio_t; |
31 | 31 | ||
32 | #define DCR_MAP_OK(host) ((host).token != NULL) | 32 | static inline bool dcr_map_ok_mmio(dcr_host_mmio_t host) |
33 | { | ||
34 | return host.token != NULL; | ||
35 | } | ||
33 | 36 | ||
34 | extern dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n, | 37 | extern dcr_host_mmio_t dcr_map_mmio(struct device_node *dev, |
35 | unsigned int dcr_c); | 38 | unsigned int dcr_n, |
36 | extern void dcr_unmap(dcr_host_t host, unsigned int dcr_c); | 39 | unsigned int dcr_c); |
40 | extern void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c); | ||
37 | 41 | ||
38 | static inline u32 dcr_read(dcr_host_t host, unsigned int dcr_n) | 42 | static inline u32 dcr_read_mmio(dcr_host_mmio_t host, unsigned int dcr_n) |
39 | { | 43 | { |
40 | return in_be32(host.token + ((host.base + dcr_n) * host.stride)); | 44 | return in_be32(host.token + ((host.base + dcr_n) * host.stride)); |
41 | } | 45 | } |
42 | 46 | ||
43 | static inline void dcr_write(dcr_host_t host, unsigned int dcr_n, u32 value) | 47 | static inline void dcr_write_mmio(dcr_host_mmio_t host, |
48 | unsigned int dcr_n, | ||
49 | u32 value) | ||
44 | { | 50 | { |
45 | out_be32(host.token + ((host.base + dcr_n) * host.stride), value); | 51 | out_be32(host.token + ((host.base + dcr_n) * host.stride), value); |
46 | } | 52 | } |
diff --git a/include/asm-powerpc/dcr-native.h b/include/asm-powerpc/dcr-native.h index f8398ce80372..72d2b72c7390 100644 --- a/include/asm-powerpc/dcr-native.h +++ b/include/asm-powerpc/dcr-native.h | |||
@@ -26,14 +26,18 @@ | |||
26 | 26 | ||
27 | typedef struct { | 27 | typedef struct { |
28 | unsigned int base; | 28 | unsigned int base; |
29 | } dcr_host_t; | 29 | } dcr_host_native_t; |
30 | 30 | ||
31 | #define DCR_MAP_OK(host) (1) | 31 | static inline bool dcr_map_ok_native(dcr_host_native_t host) |
32 | { | ||
33 | return 1; | ||
34 | } | ||
32 | 35 | ||
33 | #define dcr_map(dev, dcr_n, dcr_c) ((dcr_host_t){ .base = (dcr_n) }) | 36 | #define dcr_map_native(dev, dcr_n, dcr_c) \ |
34 | #define dcr_unmap(host, dcr_c) do {} while (0) | 37 | ((dcr_host_native_t){ .base = (dcr_n) }) |
35 | #define dcr_read(host, dcr_n) mfdcr(dcr_n + host.base) | 38 | #define dcr_unmap_native(host, dcr_c) do {} while (0) |
36 | #define dcr_write(host, dcr_n, value) mtdcr(dcr_n + host.base, value) | 39 | #define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base) |
40 | #define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value) | ||
37 | 41 | ||
38 | /* Device Control Registers */ | 42 | /* Device Control Registers */ |
39 | void __mtdcr(int reg, unsigned int val); | 43 | void __mtdcr(int reg, unsigned int val); |
diff --git a/include/asm-powerpc/dcr.h b/include/asm-powerpc/dcr.h index 9338d50538f1..53b283050ab3 100644 --- a/include/asm-powerpc/dcr.h +++ b/include/asm-powerpc/dcr.h | |||
@@ -20,14 +20,50 @@ | |||
20 | #ifndef _ASM_POWERPC_DCR_H | 20 | #ifndef _ASM_POWERPC_DCR_H |
21 | #define _ASM_POWERPC_DCR_H | 21 | #define _ASM_POWERPC_DCR_H |
22 | #ifdef __KERNEL__ | 22 | #ifdef __KERNEL__ |
23 | #ifndef __ASSEMBLY__ | ||
23 | #ifdef CONFIG_PPC_DCR | 24 | #ifdef CONFIG_PPC_DCR |
24 | 25 | ||
25 | #ifdef CONFIG_PPC_DCR_NATIVE | 26 | #ifdef CONFIG_PPC_DCR_NATIVE |
26 | #include <asm/dcr-native.h> | 27 | #include <asm/dcr-native.h> |
27 | #else | 28 | #endif |
29 | |||
30 | #ifdef CONFIG_PPC_DCR_MMIO | ||
28 | #include <asm/dcr-mmio.h> | 31 | #include <asm/dcr-mmio.h> |
29 | #endif | 32 | #endif |
30 | 33 | ||
34 | |||
35 | /* Indirection layer for providing both NATIVE and MMIO support. */ | ||
36 | |||
37 | #if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) | ||
38 | |||
39 | #include <asm/dcr-generic.h> | ||
40 | |||
41 | #define DCR_MAP_OK(host) dcr_map_ok_generic(host) | ||
42 | #define dcr_map(dev, dcr_n, dcr_c) dcr_map_generic(dev, dcr_n, dcr_c) | ||
43 | #define dcr_unmap(host, dcr_c) dcr_unmap_generic(host, dcr_c) | ||
44 | #define dcr_read(host, dcr_n) dcr_read_generic(host, dcr_n) | ||
45 | #define dcr_write(host, dcr_n, value) dcr_write_generic(host, dcr_n, value) | ||
46 | |||
47 | #else | ||
48 | |||
49 | #ifdef CONFIG_PPC_DCR_NATIVE | ||
50 | typedef dcr_host_native_t dcr_host_t; | ||
51 | #define DCR_MAP_OK(host) dcr_map_ok_native(host) | ||
52 | #define dcr_map(dev, dcr_n, dcr_c) dcr_map_native(dev, dcr_n, dcr_c) | ||
53 | #define dcr_unmap(host, dcr_c) dcr_unmap_native(host, dcr_c) | ||
54 | #define dcr_read(host, dcr_n) dcr_read_native(host, dcr_n) | ||
55 | #define dcr_write(host, dcr_n, value) dcr_write_native(host, dcr_n, value) | ||
56 | #else | ||
57 | typedef dcr_host_mmio_t dcr_host_t; | ||
58 | #define DCR_MAP_OK(host) dcr_map_ok_mmio(host) | ||
59 | #define dcr_map(dev, dcr_n, dcr_c) dcr_map_mmio(dev, dcr_n, dcr_c) | ||
60 | #define dcr_unmap(host, dcr_c) dcr_unmap_mmio(host, dcr_c) | ||
61 | #define dcr_read(host, dcr_n) dcr_read_mmio(host, dcr_n) | ||
62 | #define dcr_write(host, dcr_n, value) dcr_write_mmio(host, dcr_n, value) | ||
63 | #endif | ||
64 | |||
65 | #endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */ | ||
66 | |||
31 | /* | 67 | /* |
32 | * On CONFIG_PPC_MERGE, we have additional helpers to read the DCR | 68 | * On CONFIG_PPC_MERGE, we have additional helpers to read the DCR |
33 | * base from the device-tree | 69 | * base from the device-tree |
@@ -41,5 +77,6 @@ extern unsigned int dcr_resource_len(struct device_node *np, | |||
41 | #endif /* CONFIG_PPC_MERGE */ | 77 | #endif /* CONFIG_PPC_MERGE */ |
42 | 78 | ||
43 | #endif /* CONFIG_PPC_DCR */ | 79 | #endif /* CONFIG_PPC_DCR */ |
80 | #endif /* __ASSEMBLY__ */ | ||
44 | #endif /* __KERNEL__ */ | 81 | #endif /* __KERNEL__ */ |
45 | #endif /* _ASM_POWERPC_DCR_H */ | 82 | #endif /* _ASM_POWERPC_DCR_H */ |
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h index bbefb69bfb67..74c549780987 100644 --- a/include/asm-powerpc/dma-mapping.h +++ b/include/asm-powerpc/dma-mapping.h | |||
@@ -13,6 +13,7 @@ | |||
13 | /* need struct page definitions */ | 13 | /* need struct page definitions */ |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/scatterlist.h> | 15 | #include <linux/scatterlist.h> |
16 | #include <linux/dma-attrs.h> | ||
16 | #include <asm/io.h> | 17 | #include <asm/io.h> |
17 | 18 | ||
18 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | 19 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
@@ -44,6 +45,15 @@ extern void __dma_sync_page(struct page *page, unsigned long offset, | |||
44 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ | 45 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ |
45 | 46 | ||
46 | #ifdef CONFIG_PPC64 | 47 | #ifdef CONFIG_PPC64 |
48 | |||
49 | static inline unsigned long device_to_mask(struct device *dev) | ||
50 | { | ||
51 | if (dev->dma_mask && *dev->dma_mask) | ||
52 | return *dev->dma_mask; | ||
53 | /* Assume devices without mask can take 32 bit addresses */ | ||
54 | return 0xfffffffful; | ||
55 | } | ||
56 | |||
47 | /* | 57 | /* |
48 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO | 58 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO |
49 | */ | 59 | */ |
@@ -53,13 +63,17 @@ struct dma_mapping_ops { | |||
53 | void (*free_coherent)(struct device *dev, size_t size, | 63 | void (*free_coherent)(struct device *dev, size_t size, |
54 | void *vaddr, dma_addr_t dma_handle); | 64 | void *vaddr, dma_addr_t dma_handle); |
55 | dma_addr_t (*map_single)(struct device *dev, void *ptr, | 65 | dma_addr_t (*map_single)(struct device *dev, void *ptr, |
56 | size_t size, enum dma_data_direction direction); | 66 | size_t size, enum dma_data_direction direction, |
67 | struct dma_attrs *attrs); | ||
57 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, | 68 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, |
58 | size_t size, enum dma_data_direction direction); | 69 | size_t size, enum dma_data_direction direction, |
70 | struct dma_attrs *attrs); | ||
59 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | 71 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
60 | int nents, enum dma_data_direction direction); | 72 | int nents, enum dma_data_direction direction, |
73 | struct dma_attrs *attrs); | ||
61 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | 74 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, |
62 | int nents, enum dma_data_direction direction); | 75 | int nents, enum dma_data_direction direction, |
76 | struct dma_attrs *attrs); | ||
63 | int (*dma_supported)(struct device *dev, u64 mask); | 77 | int (*dma_supported)(struct device *dev, u64 mask); |
64 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); | 78 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); |
65 | }; | 79 | }; |
@@ -109,6 +123,77 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |||
109 | return 0; | 123 | return 0; |
110 | } | 124 | } |
111 | 125 | ||
126 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, | ||
127 | void *cpu_addr, | ||
128 | size_t size, | ||
129 | enum dma_data_direction direction, | ||
130 | struct dma_attrs *attrs) | ||
131 | { | ||
132 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
133 | |||
134 | BUG_ON(!dma_ops); | ||
135 | return dma_ops->map_single(dev, cpu_addr, size, direction, attrs); | ||
136 | } | ||
137 | |||
138 | static inline void dma_unmap_single_attrs(struct device *dev, | ||
139 | dma_addr_t dma_addr, | ||
140 | size_t size, | ||
141 | enum dma_data_direction direction, | ||
142 | struct dma_attrs *attrs) | ||
143 | { | ||
144 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
145 | |||
146 | BUG_ON(!dma_ops); | ||
147 | dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); | ||
148 | } | ||
149 | |||
150 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | ||
151 | struct page *page, | ||
152 | unsigned long offset, size_t size, | ||
153 | enum dma_data_direction direction, | ||
154 | struct dma_attrs *attrs) | ||
155 | { | ||
156 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
157 | |||
158 | BUG_ON(!dma_ops); | ||
159 | return dma_ops->map_single(dev, page_address(page) + offset, size, | ||
160 | direction, attrs); | ||
161 | } | ||
162 | |||
163 | static inline void dma_unmap_page_attrs(struct device *dev, | ||
164 | dma_addr_t dma_address, | ||
165 | size_t size, | ||
166 | enum dma_data_direction direction, | ||
167 | struct dma_attrs *attrs) | ||
168 | { | ||
169 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
170 | |||
171 | BUG_ON(!dma_ops); | ||
172 | dma_ops->unmap_single(dev, dma_address, size, direction, attrs); | ||
173 | } | ||
174 | |||
175 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | ||
176 | int nents, enum dma_data_direction direction, | ||
177 | struct dma_attrs *attrs) | ||
178 | { | ||
179 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
180 | |||
181 | BUG_ON(!dma_ops); | ||
182 | return dma_ops->map_sg(dev, sg, nents, direction, attrs); | ||
183 | } | ||
184 | |||
185 | static inline void dma_unmap_sg_attrs(struct device *dev, | ||
186 | struct scatterlist *sg, | ||
187 | int nhwentries, | ||
188 | enum dma_data_direction direction, | ||
189 | struct dma_attrs *attrs) | ||
190 | { | ||
191 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
192 | |||
193 | BUG_ON(!dma_ops); | ||
194 | dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs); | ||
195 | } | ||
196 | |||
112 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 197 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
113 | dma_addr_t *dma_handle, gfp_t flag) | 198 | dma_addr_t *dma_handle, gfp_t flag) |
114 | { | 199 | { |
@@ -131,63 +216,43 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |||
131 | size_t size, | 216 | size_t size, |
132 | enum dma_data_direction direction) | 217 | enum dma_data_direction direction) |
133 | { | 218 | { |
134 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 219 | return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL); |
135 | |||
136 | BUG_ON(!dma_ops); | ||
137 | return dma_ops->map_single(dev, cpu_addr, size, direction); | ||
138 | } | 220 | } |
139 | 221 | ||
140 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | 222 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, |
141 | size_t size, | 223 | size_t size, |
142 | enum dma_data_direction direction) | 224 | enum dma_data_direction direction) |
143 | { | 225 | { |
144 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 226 | dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL); |
145 | |||
146 | BUG_ON(!dma_ops); | ||
147 | dma_ops->unmap_single(dev, dma_addr, size, direction); | ||
148 | } | 227 | } |
149 | 228 | ||
150 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 229 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
151 | unsigned long offset, size_t size, | 230 | unsigned long offset, size_t size, |
152 | enum dma_data_direction direction) | 231 | enum dma_data_direction direction) |
153 | { | 232 | { |
154 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 233 | return dma_map_page_attrs(dev, page, offset, size, direction, NULL); |
155 | |||
156 | BUG_ON(!dma_ops); | ||
157 | return dma_ops->map_single(dev, page_address(page) + offset, size, | ||
158 | direction); | ||
159 | } | 234 | } |
160 | 235 | ||
161 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 236 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
162 | size_t size, | 237 | size_t size, |
163 | enum dma_data_direction direction) | 238 | enum dma_data_direction direction) |
164 | { | 239 | { |
165 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 240 | dma_unmap_page_attrs(dev, dma_address, size, direction, NULL); |
166 | |||
167 | BUG_ON(!dma_ops); | ||
168 | dma_ops->unmap_single(dev, dma_address, size, direction); | ||
169 | } | 241 | } |
170 | 242 | ||
171 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | 243 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, |
172 | int nents, enum dma_data_direction direction) | 244 | int nents, enum dma_data_direction direction) |
173 | { | 245 | { |
174 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 246 | return dma_map_sg_attrs(dev, sg, nents, direction, NULL); |
175 | |||
176 | BUG_ON(!dma_ops); | ||
177 | return dma_ops->map_sg(dev, sg, nents, direction); | ||
178 | } | 247 | } |
179 | 248 | ||
180 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 249 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
181 | int nhwentries, | 250 | int nhwentries, |
182 | enum dma_data_direction direction) | 251 | enum dma_data_direction direction) |
183 | { | 252 | { |
184 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 253 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); |
185 | |||
186 | BUG_ON(!dma_ops); | ||
187 | dma_ops->unmap_sg(dev, sg, nhwentries, direction); | ||
188 | } | 254 | } |
189 | 255 | ||
190 | |||
191 | /* | 256 | /* |
192 | * Available generic sets of operations | 257 | * Available generic sets of operations |
193 | */ | 258 | */ |
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h index 9080d85cb9d0..89664675b469 100644 --- a/include/asm-powerpc/elf.h +++ b/include/asm-powerpc/elf.h | |||
@@ -109,6 +109,7 @@ typedef elf_gregset_t32 compat_elf_gregset_t; | |||
109 | #ifdef __powerpc64__ | 109 | #ifdef __powerpc64__ |
110 | # define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */ | 110 | # define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */ |
111 | # define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */ | 111 | # define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */ |
112 | # define ELF_NVSRHALFREG 32 /* Half the vsx registers */ | ||
112 | # define ELF_GREG_TYPE elf_greg_t64 | 113 | # define ELF_GREG_TYPE elf_greg_t64 |
113 | #else | 114 | #else |
114 | # define ELF_NEVRREG 34 /* includes acc (as 2) */ | 115 | # define ELF_NEVRREG 34 /* includes acc (as 2) */ |
@@ -158,6 +159,7 @@ typedef __vector128 elf_vrreg_t; | |||
158 | typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG]; | 159 | typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG]; |
159 | #ifdef __powerpc64__ | 160 | #ifdef __powerpc64__ |
160 | typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32]; | 161 | typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32]; |
162 | typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG]; | ||
161 | #endif | 163 | #endif |
162 | 164 | ||
163 | #ifdef __KERNEL__ | 165 | #ifdef __KERNEL__ |
@@ -202,30 +204,8 @@ static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs, | |||
202 | } | 204 | } |
203 | #define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); | 205 | #define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); |
204 | 206 | ||
205 | static inline int dump_task_regs(struct task_struct *tsk, | ||
206 | elf_gregset_t *elf_regs) | ||
207 | { | ||
208 | struct pt_regs *regs = tsk->thread.regs; | ||
209 | if (regs) | ||
210 | ppc_elf_core_copy_regs(*elf_regs, regs); | ||
211 | |||
212 | return 1; | ||
213 | } | ||
214 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) | ||
215 | |||
216 | extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); | ||
217 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) | ||
218 | |||
219 | typedef elf_vrregset_t elf_fpxregset_t; | 207 | typedef elf_vrregset_t elf_fpxregset_t; |
220 | 208 | ||
221 | #ifdef CONFIG_ALTIVEC | ||
222 | extern int dump_task_altivec(struct task_struct *, elf_vrregset_t *vrregs); | ||
223 | #define ELF_CORE_COPY_XFPREGS(tsk, regs) dump_task_altivec(tsk, regs) | ||
224 | #define ELF_CORE_XFPREG_TYPE NT_PPC_VMX | ||
225 | #endif | ||
226 | |||
227 | #endif /* __KERNEL__ */ | ||
228 | |||
229 | /* ELF_HWCAP yields a mask that user programs can use to figure out what | 209 | /* ELF_HWCAP yields a mask that user programs can use to figure out what |
230 | instruction set this cpu supports. This could be done in userspace, | 210 | instruction set this cpu supports. This could be done in userspace, |
231 | but it's not easy, and we've already done it here. */ | 211 | but it's not easy, and we've already done it here. */ |
@@ -243,8 +223,6 @@ extern int dump_task_altivec(struct task_struct *, elf_vrregset_t *vrregs); | |||
243 | } while (0) | 223 | } while (0) |
244 | #endif /* __powerpc64__ */ | 224 | #endif /* __powerpc64__ */ |
245 | 225 | ||
246 | #ifdef __KERNEL__ | ||
247 | |||
248 | #ifdef __powerpc64__ | 226 | #ifdef __powerpc64__ |
249 | # define SET_PERSONALITY(ex, ibcs2) \ | 227 | # define SET_PERSONALITY(ex, ibcs2) \ |
250 | do { \ | 228 | do { \ |
@@ -257,7 +235,8 @@ do { \ | |||
257 | else \ | 235 | else \ |
258 | clear_thread_flag(TIF_ABI_PENDING); \ | 236 | clear_thread_flag(TIF_ABI_PENDING); \ |
259 | if (personality(current->personality) != PER_LINUX32) \ | 237 | if (personality(current->personality) != PER_LINUX32) \ |
260 | set_personality(PER_LINUX); \ | 238 | set_personality(PER_LINUX | \ |
239 | (current->personality & (~PER_MASK))); \ | ||
261 | } while (0) | 240 | } while (0) |
262 | /* | 241 | /* |
263 | * An executable for which elf_read_implies_exec() returns TRUE will | 242 | * An executable for which elf_read_implies_exec() returns TRUE will |
@@ -272,8 +251,6 @@ do { \ | |||
272 | # define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) | 251 | # define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) |
273 | #endif /* __powerpc64__ */ | 252 | #endif /* __powerpc64__ */ |
274 | 253 | ||
275 | #endif /* __KERNEL__ */ | ||
276 | |||
277 | extern int dcache_bsize; | 254 | extern int dcache_bsize; |
278 | extern int icache_bsize; | 255 | extern int icache_bsize; |
279 | extern int ucache_bsize; | 256 | extern int ucache_bsize; |
@@ -285,6 +262,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
285 | int executable_stack); | 262 | int executable_stack); |
286 | #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); | 263 | #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); |
287 | 264 | ||
265 | #endif /* __KERNEL__ */ | ||
266 | |||
288 | /* | 267 | /* |
289 | * The requirements here are: | 268 | * The requirements here are: |
290 | * - keep the final alignment of sp (sp & 0xf) | 269 | * - keep the final alignment of sp (sp & 0xf) |
@@ -422,6 +401,8 @@ do { \ | |||
422 | /* Keep this the last entry. */ | 401 | /* Keep this the last entry. */ |
423 | #define R_PPC64_NUM 107 | 402 | #define R_PPC64_NUM 107 |
424 | 403 | ||
404 | #ifdef __KERNEL__ | ||
405 | |||
425 | #ifdef CONFIG_SPU_BASE | 406 | #ifdef CONFIG_SPU_BASE |
426 | /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */ | 407 | /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */ |
427 | #define NT_SPU 1 | 408 | #define NT_SPU 1 |
@@ -430,4 +411,6 @@ do { \ | |||
430 | 411 | ||
431 | #endif /* CONFIG_SPU_BASE */ | 412 | #endif /* CONFIG_SPU_BASE */ |
432 | 413 | ||
414 | #endif /* __KERNEL */ | ||
415 | |||
433 | #endif /* _ASM_POWERPC_ELF_H */ | 416 | #endif /* _ASM_POWERPC_ELF_H */ |
diff --git a/include/asm-powerpc/feature-fixups.h b/include/asm-powerpc/feature-fixups.h new file mode 100644 index 000000000000..a1029967620b --- /dev/null +++ b/include/asm-powerpc/feature-fixups.h | |||
@@ -0,0 +1,126 @@ | |||
1 | #ifndef __ASM_POWERPC_FEATURE_FIXUPS_H | ||
2 | #define __ASM_POWERPC_FEATURE_FIXUPS_H | ||
3 | |||
4 | /* | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #ifdef __ASSEMBLY__ | ||
12 | |||
13 | /* | ||
14 | * Feature section common macros | ||
15 | * | ||
16 | * Note that the entries now contain offsets between the table entry | ||
17 | * and the code rather than absolute code pointers in order to be | ||
18 | * useable with the vdso shared library. There is also an assumption | ||
19 | * that values will be negative, that is, the fixup table has to be | ||
20 | * located after the code it fixes up. | ||
21 | */ | ||
22 | #if defined(CONFIG_PPC64) && !defined(__powerpc64__) | ||
23 | /* 64 bits kernel, 32 bits code (ie. vdso32) */ | ||
24 | #define FTR_ENTRY_LONG .llong | ||
25 | #define FTR_ENTRY_OFFSET .long 0xffffffff; .long | ||
26 | #else | ||
27 | /* 64 bit kernel 64 bit code, or 32 bit kernel 32 bit code */ | ||
28 | #define FTR_ENTRY_LONG PPC_LONG | ||
29 | #define FTR_ENTRY_OFFSET PPC_LONG | ||
30 | #endif | ||
31 | |||
32 | #define START_FTR_SECTION(label) label##1: | ||
33 | |||
34 | #define FTR_SECTION_ELSE_NESTED(label) \ | ||
35 | label##2: \ | ||
36 | .pushsection __ftr_alt_##label,"a"; \ | ||
37 | .align 2; \ | ||
38 | label##3: | ||
39 | |||
40 | #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ | ||
41 | label##4: \ | ||
42 | .popsection; \ | ||
43 | .pushsection sect,"a"; \ | ||
44 | .align 3; \ | ||
45 | label##5: \ | ||
46 | FTR_ENTRY_LONG msk; \ | ||
47 | FTR_ENTRY_LONG val; \ | ||
48 | FTR_ENTRY_OFFSET label##1b-label##5b; \ | ||
49 | FTR_ENTRY_OFFSET label##2b-label##5b; \ | ||
50 | FTR_ENTRY_OFFSET label##3b-label##5b; \ | ||
51 | FTR_ENTRY_OFFSET label##4b-label##5b; \ | ||
52 | .popsection; | ||
53 | |||
54 | |||
55 | /* CPU feature dependent sections */ | ||
56 | #define BEGIN_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) | ||
57 | #define BEGIN_FTR_SECTION START_FTR_SECTION(97) | ||
58 | |||
59 | #define END_FTR_SECTION_NESTED(msk, val, label) \ | ||
60 | FTR_SECTION_ELSE_NESTED(label) \ | ||
61 | MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) | ||
62 | |||
63 | #define END_FTR_SECTION(msk, val) \ | ||
64 | END_FTR_SECTION_NESTED(msk, val, 97) | ||
65 | |||
66 | #define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) | ||
67 | #define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) | ||
68 | |||
69 | /* CPU feature sections with alternatives, use BEGIN_FTR_SECTION to start */ | ||
70 | #define FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97) | ||
71 | #define ALT_FTR_SECTION_END_NESTED(msk, val, label) \ | ||
72 | MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) | ||
73 | #define ALT_FTR_SECTION_END_NESTED_IFSET(msk, label) \ | ||
74 | ALT_FTR_SECTION_END_NESTED(msk, msk, label) | ||
75 | #define ALT_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ | ||
76 | ALT_FTR_SECTION_END_NESTED(msk, 0, label) | ||
77 | #define ALT_FTR_SECTION_END(msk, val) \ | ||
78 | ALT_FTR_SECTION_END_NESTED(msk, val, 97) | ||
79 | #define ALT_FTR_SECTION_END_IFSET(msk) \ | ||
80 | ALT_FTR_SECTION_END_NESTED_IFSET(msk, 97) | ||
81 | #define ALT_FTR_SECTION_END_IFCLR(msk) \ | ||
82 | ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97) | ||
83 | |||
84 | /* Firmware feature dependent sections */ | ||
85 | #define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) | ||
86 | #define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97) | ||
87 | |||
88 | #define END_FW_FTR_SECTION_NESTED(msk, val, label) \ | ||
89 | FTR_SECTION_ELSE_NESTED(label) \ | ||
90 | MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) | ||
91 | |||
92 | #define END_FW_FTR_SECTION(msk, val) \ | ||
93 | END_FW_FTR_SECTION_NESTED(msk, val, 97) | ||
94 | |||
95 | #define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk)) | ||
96 | #define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0) | ||
97 | |||
98 | /* Firmware feature sections with alternatives */ | ||
99 | #define FW_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label) | ||
100 | #define FW_FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97) | ||
101 | #define ALT_FW_FTR_SECTION_END_NESTED(msk, val, label) \ | ||
102 | MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) | ||
103 | #define ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, label) \ | ||
104 | ALT_FW_FTR_SECTION_END_NESTED(msk, msk, label) | ||
105 | #define ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ | ||
106 | ALT_FW_FTR_SECTION_END_NESTED(msk, 0, label) | ||
107 | #define ALT_FW_FTR_SECTION_END(msk, val) \ | ||
108 | ALT_FW_FTR_SECTION_END_NESTED(msk, val, 97) | ||
109 | #define ALT_FW_FTR_SECTION_END_IFSET(msk) \ | ||
110 | ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, 97) | ||
111 | #define ALT_FW_FTR_SECTION_END_IFCLR(msk) \ | ||
112 | ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, 97) | ||
113 | |||
114 | #endif /* __ASSEMBLY__ */ | ||
115 | |||
116 | /* LWSYNC feature sections */ | ||
117 | #define START_LWSYNC_SECTION(label) label##1: | ||
118 | #define MAKE_LWSYNC_SECTION_ENTRY(label, sect) \ | ||
119 | label##2: \ | ||
120 | .pushsection sect,"a"; \ | ||
121 | .align 2; \ | ||
122 | label##3: \ | ||
123 | .long label##1b-label##3b; \ | ||
124 | .popsection; | ||
125 | |||
126 | #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ | ||
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h index 1e41bd1c8502..ef328995ba9d 100644 --- a/include/asm-powerpc/firmware.h +++ b/include/asm-powerpc/firmware.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | 16 | ||
17 | #include <asm/asm-compat.h> | 17 | #include <asm/asm-compat.h> |
18 | #include <asm/feature-fixups.h> | ||
18 | 19 | ||
19 | /* firmware feature bitmask values */ | 20 | /* firmware feature bitmask values */ |
20 | #define FIRMWARE_MAX_FEATURES 63 | 21 | #define FIRMWARE_MAX_FEATURES 63 |
@@ -125,18 +126,6 @@ extern int fwnmi_active; | |||
125 | 126 | ||
126 | extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; | 127 | extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; |
127 | 128 | ||
128 | #else /* __ASSEMBLY__ */ | ||
129 | |||
130 | #define BEGIN_FW_FTR_SECTION_NESTED(label) label: | ||
131 | #define BEGIN_FW_FTR_SECTION BEGIN_FW_FTR_SECTION_NESTED(97) | ||
132 | #define END_FW_FTR_SECTION_NESTED(msk, val, label) \ | ||
133 | MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) | ||
134 | #define END_FW_FTR_SECTION(msk, val) \ | ||
135 | END_FW_FTR_SECTION_NESTED(msk, val, 97) | ||
136 | |||
137 | #define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk)) | ||
138 | #define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0) | ||
139 | |||
140 | #endif /* __ASSEMBLY__ */ | 129 | #endif /* __ASSEMBLY__ */ |
141 | #endif /* __KERNEL__ */ | 130 | #endif /* __KERNEL__ */ |
142 | #endif /* __ASM_POWERPC_FIRMWARE_H */ | 131 | #endif /* __ASM_POWERPC_FIRMWARE_H */ |
diff --git a/include/asm-powerpc/fsl_gtm.h b/include/asm-powerpc/fsl_gtm.h new file mode 100644 index 000000000000..8e8c9b5032d3 --- /dev/null +++ b/include/asm-powerpc/fsl_gtm.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Freescale General-purpose Timers Module | ||
3 | * | ||
4 | * Copyright (c) Freescale Semicondutor, Inc. 2006. | ||
5 | * Shlomi Gridish <gridish@freescale.com> | ||
6 | * Jerry Huang <Chang-Ming.Huang@freescale.com> | ||
7 | * Copyright (c) MontaVista Software, Inc. 2008. | ||
8 | * Anton Vorontsov <avorontsov@ru.mvista.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | |||
16 | #ifndef __ASM_FSL_GTM_H | ||
17 | #define __ASM_FSL_GTM_H | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | |||
21 | struct gtm; | ||
22 | |||
23 | struct gtm_timer { | ||
24 | unsigned int irq; | ||
25 | |||
26 | struct gtm *gtm; | ||
27 | bool requested; | ||
28 | u8 __iomem *gtcfr; | ||
29 | __be16 __iomem *gtmdr; | ||
30 | __be16 __iomem *gtpsr; | ||
31 | __be16 __iomem *gtcnr; | ||
32 | __be16 __iomem *gtrfr; | ||
33 | __be16 __iomem *gtevr; | ||
34 | }; | ||
35 | |||
36 | extern struct gtm_timer *gtm_get_timer16(void); | ||
37 | extern struct gtm_timer *gtm_get_specific_timer16(struct gtm *gtm, | ||
38 | unsigned int timer); | ||
39 | extern void gtm_put_timer16(struct gtm_timer *tmr); | ||
40 | extern int gtm_set_timer16(struct gtm_timer *tmr, unsigned long usec, | ||
41 | bool reload); | ||
42 | extern int gtm_set_exact_timer16(struct gtm_timer *tmr, u16 usec, | ||
43 | bool reload); | ||
44 | extern void gtm_stop_timer16(struct gtm_timer *tmr); | ||
45 | extern void gtm_ack_timer16(struct gtm_timer *tmr, u16 events); | ||
46 | |||
47 | #endif /* __ASM_FSL_GTM_H */ | ||
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 89189488e286..8b627823f5f9 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h | |||
@@ -95,33 +95,60 @@ extern resource_size_t isa_mem_base; | |||
95 | #define IO_SET_SYNC_FLAG() | 95 | #define IO_SET_SYNC_FLAG() |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | #define DEF_MMIO_IN(name, type, insn) \ | 98 | /* gcc 4.0 and older doesn't have 'Z' constraint */ |
99 | static inline type name(const volatile type __iomem *addr) \ | 99 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0) |
100 | #define DEF_MMIO_IN_LE(name, size, insn) \ | ||
101 | static inline u##size name(const volatile u##size __iomem *addr) \ | ||
100 | { \ | 102 | { \ |
101 | type ret; \ | 103 | u##size ret; \ |
102 | __asm__ __volatile__("sync;" insn ";twi 0,%0,0;isync" \ | 104 | __asm__ __volatile__("sync;"#insn" %0,0,%1;twi 0,%0,0;isync" \ |
103 | : "=r" (ret) : "r" (addr), "m" (*addr) : "memory"); \ | 105 | : "=r" (ret) : "r" (addr), "m" (*addr) : "memory"); \ |
104 | return ret; \ | 106 | return ret; \ |
105 | } | 107 | } |
106 | 108 | ||
107 | #define DEF_MMIO_OUT(name, type, insn) \ | 109 | #define DEF_MMIO_OUT_LE(name, size, insn) \ |
108 | static inline void name(volatile type __iomem *addr, type val) \ | 110 | static inline void name(volatile u##size __iomem *addr, u##size val) \ |
109 | { \ | 111 | { \ |
110 | __asm__ __volatile__("sync;" insn \ | 112 | __asm__ __volatile__("sync;"#insn" %1,0,%2" \ |
111 | : "=m" (*addr) : "r" (val), "r" (addr) : "memory"); \ | 113 | : "=m" (*addr) : "r" (val), "r" (addr) : "memory"); \ |
112 | IO_SET_SYNC_FLAG(); \ | 114 | IO_SET_SYNC_FLAG(); \ |
113 | } | 115 | } |
116 | #else /* newer gcc */ | ||
117 | #define DEF_MMIO_IN_LE(name, size, insn) \ | ||
118 | static inline u##size name(const volatile u##size __iomem *addr) \ | ||
119 | { \ | ||
120 | u##size ret; \ | ||
121 | __asm__ __volatile__("sync;"#insn" %0,%y1;twi 0,%0,0;isync" \ | ||
122 | : "=r" (ret) : "Z" (*addr) : "memory"); \ | ||
123 | return ret; \ | ||
124 | } | ||
125 | |||
126 | #define DEF_MMIO_OUT_LE(name, size, insn) \ | ||
127 | static inline void name(volatile u##size __iomem *addr, u##size val) \ | ||
128 | { \ | ||
129 | __asm__ __volatile__("sync;"#insn" %1,%y0" \ | ||
130 | : "=Z" (*addr) : "r" (val) : "memory"); \ | ||
131 | IO_SET_SYNC_FLAG(); \ | ||
132 | } | ||
133 | #endif | ||
114 | 134 | ||
135 | #define DEF_MMIO_IN_BE(name, size, insn) \ | ||
136 | static inline u##size name(const volatile u##size __iomem *addr) \ | ||
137 | { \ | ||
138 | u##size ret; \ | ||
139 | __asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\ | ||
140 | : "=r" (ret) : "m" (*addr) : "memory"); \ | ||
141 | return ret; \ | ||
142 | } | ||
115 | 143 | ||
116 | #define DEF_MMIO_IN_BE(name, size, insn) \ | 144 | #define DEF_MMIO_OUT_BE(name, size, insn) \ |
117 | DEF_MMIO_IN(name, u##size, __stringify(insn)"%U2%X2 %0,%2") | 145 | static inline void name(volatile u##size __iomem *addr, u##size val) \ |
118 | #define DEF_MMIO_IN_LE(name, size, insn) \ | 146 | { \ |
119 | DEF_MMIO_IN(name, u##size, __stringify(insn)" %0,0,%1") | 147 | __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ |
148 | : "=m" (*addr) : "r" (val) : "memory"); \ | ||
149 | IO_SET_SYNC_FLAG(); \ | ||
150 | } | ||
120 | 151 | ||
121 | #define DEF_MMIO_OUT_BE(name, size, insn) \ | ||
122 | DEF_MMIO_OUT(name, u##size, __stringify(insn)"%U0%X0 %1,%0") | ||
123 | #define DEF_MMIO_OUT_LE(name, size, insn) \ | ||
124 | DEF_MMIO_OUT(name, u##size, __stringify(insn)" %1,0,%2") | ||
125 | 152 | ||
126 | DEF_MMIO_IN_BE(in_8, 8, lbz); | 153 | DEF_MMIO_IN_BE(in_8, 8, lbz); |
127 | DEF_MMIO_IN_BE(in_be16, 16, lhz); | 154 | DEF_MMIO_IN_BE(in_be16, 16, lhz); |
@@ -745,7 +772,7 @@ static inline void * bus_to_virt(unsigned long address) | |||
745 | #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) | 772 | #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) |
746 | 773 | ||
747 | #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set) | 774 | #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set) |
748 | #define clrsetbits_le16(addr, clear, set) clrsetbits(le32, addr, clear, set) | 775 | #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set) |
749 | 776 | ||
750 | #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) | 777 | #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) |
751 | 778 | ||
diff --git a/include/asm-powerpc/ioctl.h b/include/asm-powerpc/ioctl.h index 8eb99848c402..57d68304218b 100644 --- a/include/asm-powerpc/ioctl.h +++ b/include/asm-powerpc/ioctl.h | |||
@@ -1,69 +1,13 @@ | |||
1 | #ifndef _ASM_POWERPC_IOCTL_H | 1 | #ifndef _ASM_POWERPC_IOCTL_H |
2 | #define _ASM_POWERPC_IOCTL_H | 2 | #define _ASM_POWERPC_IOCTL_H |
3 | 3 | ||
4 | |||
5 | /* | ||
6 | * this was copied from the alpha as it's a bit cleaner there. | ||
7 | * -- Cort | ||
8 | */ | ||
9 | |||
10 | #define _IOC_NRBITS 8 | ||
11 | #define _IOC_TYPEBITS 8 | ||
12 | #define _IOC_SIZEBITS 13 | 4 | #define _IOC_SIZEBITS 13 |
13 | #define _IOC_DIRBITS 3 | 5 | #define _IOC_DIRBITS 3 |
14 | 6 | ||
15 | #define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) | ||
16 | #define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) | ||
17 | #define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) | ||
18 | #define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) | ||
19 | |||
20 | #define _IOC_NRSHIFT 0 | ||
21 | #define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) | ||
22 | #define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) | ||
23 | #define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) | ||
24 | |||
25 | /* | ||
26 | * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit. | ||
27 | * And this turns out useful to catch old ioctl numbers in header | ||
28 | * files for us. | ||
29 | */ | ||
30 | #define _IOC_NONE 1U | 7 | #define _IOC_NONE 1U |
31 | #define _IOC_READ 2U | 8 | #define _IOC_READ 2U |
32 | #define _IOC_WRITE 4U | 9 | #define _IOC_WRITE 4U |
33 | 10 | ||
34 | #define _IOC(dir,type,nr,size) \ | 11 | #include <asm-generic/ioctl.h> |
35 | (((dir) << _IOC_DIRSHIFT) | \ | ||
36 | ((type) << _IOC_TYPESHIFT) | \ | ||
37 | ((nr) << _IOC_NRSHIFT) | \ | ||
38 | ((size) << _IOC_SIZESHIFT)) | ||
39 | |||
40 | /* provoke compile error for invalid uses of size argument */ | ||
41 | extern unsigned int __invalid_size_argument_for_IOC; | ||
42 | #define _IOC_TYPECHECK(t) \ | ||
43 | ((sizeof(t) == sizeof(t[1]) && \ | ||
44 | sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ | ||
45 | sizeof(t) : __invalid_size_argument_for_IOC) | ||
46 | |||
47 | /* used to create numbers */ | ||
48 | #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) | ||
49 | #define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) | ||
50 | #define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) | ||
51 | #define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) | ||
52 | #define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) | ||
53 | #define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) | ||
54 | #define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) | ||
55 | |||
56 | /* used to decode them.. */ | ||
57 | #define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) | ||
58 | #define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) | ||
59 | #define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) | ||
60 | #define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) | ||
61 | |||
62 | /* various drivers, such as the pcmcia stuff, need these... */ | ||
63 | #define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) | ||
64 | #define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) | ||
65 | #define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) | ||
66 | #define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) | ||
67 | #define IOCSIZE_SHIFT (_IOC_SIZESHIFT) | ||
68 | 12 | ||
69 | #endif /* _ASM_POWERPC_IOCTL_H */ | 13 | #endif /* _ASM_POWERPC_IOCTL_H */ |
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h index 852e15f51a1e..51ecfef8d843 100644 --- a/include/asm-powerpc/iommu.h +++ b/include/asm-powerpc/iommu.h | |||
@@ -79,11 +79,13 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); | |||
79 | extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, | 79 | extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, |
80 | int nid); | 80 | int nid); |
81 | 81 | ||
82 | extern int iommu_map_sg(struct device *dev, struct scatterlist *sglist, | 82 | extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
83 | int nelems, unsigned long mask, | 83 | struct scatterlist *sglist, int nelems, |
84 | enum dma_data_direction direction); | 84 | unsigned long mask, enum dma_data_direction direction, |
85 | struct dma_attrs *attrs); | ||
85 | extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | 86 | extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, |
86 | int nelems, enum dma_data_direction direction); | 87 | int nelems, enum dma_data_direction direction, |
88 | struct dma_attrs *attrs); | ||
87 | 89 | ||
88 | extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, | 90 | extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
89 | size_t size, dma_addr_t *dma_handle, | 91 | size_t size, dma_addr_t *dma_handle, |
@@ -92,9 +94,11 @@ extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, | |||
92 | void *vaddr, dma_addr_t dma_handle); | 94 | void *vaddr, dma_addr_t dma_handle); |
93 | extern dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, | 95 | extern dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, |
94 | void *vaddr, size_t size, unsigned long mask, | 96 | void *vaddr, size_t size, unsigned long mask, |
95 | enum dma_data_direction direction); | 97 | enum dma_data_direction direction, |
98 | struct dma_attrs *attrs); | ||
96 | extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, | 99 | extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, |
97 | size_t size, enum dma_data_direction direction); | 100 | size_t size, enum dma_data_direction direction, |
101 | struct dma_attrs *attrs); | ||
98 | 102 | ||
99 | extern void iommu_init_early_pSeries(void); | 103 | extern void iommu_init_early_pSeries(void); |
100 | extern void iommu_init_early_iSeries(void); | 104 | extern void iommu_init_early_iSeries(void); |
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index 5089deb8fec3..1ef8e304e0ea 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h | |||
@@ -619,6 +619,19 @@ struct pt_regs; | |||
619 | 619 | ||
620 | #define __ARCH_HAS_DO_SOFTIRQ | 620 | #define __ARCH_HAS_DO_SOFTIRQ |
621 | 621 | ||
622 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | ||
623 | /* | ||
624 | * Per-cpu stacks for handling critical, debug and machine check | ||
625 | * level interrupts. | ||
626 | */ | ||
627 | extern struct thread_info *critirq_ctx[NR_CPUS]; | ||
628 | extern struct thread_info *dbgirq_ctx[NR_CPUS]; | ||
629 | extern struct thread_info *mcheckirq_ctx[NR_CPUS]; | ||
630 | extern void exc_lvl_ctx_init(void); | ||
631 | #else | ||
632 | #define exc_lvl_ctx_init() | ||
633 | #endif | ||
634 | |||
622 | #ifdef CONFIG_IRQSTACKS | 635 | #ifdef CONFIG_IRQSTACKS |
623 | /* | 636 | /* |
624 | * Per-cpu stacks for handling hard and soft interrupts. | 637 | * Per-cpu stacks for handling hard and soft interrupts. |
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h index 701857bc8e24..acdcdc66f1b6 100644 --- a/include/asm-powerpc/kexec.h +++ b/include/asm-powerpc/kexec.h | |||
@@ -34,6 +34,8 @@ | |||
34 | #ifndef __ASSEMBLY__ | 34 | #ifndef __ASSEMBLY__ |
35 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
36 | 36 | ||
37 | typedef void (*crash_shutdown_t)(void); | ||
38 | |||
37 | #ifdef CONFIG_KEXEC | 39 | #ifdef CONFIG_KEXEC |
38 | 40 | ||
39 | #ifdef __powerpc64__ | 41 | #ifdef __powerpc64__ |
@@ -123,7 +125,6 @@ struct pt_regs; | |||
123 | extern void default_machine_kexec(struct kimage *image); | 125 | extern void default_machine_kexec(struct kimage *image); |
124 | extern int default_machine_kexec_prepare(struct kimage *image); | 126 | extern int default_machine_kexec_prepare(struct kimage *image); |
125 | extern void default_machine_crash_shutdown(struct pt_regs *regs); | 127 | extern void default_machine_crash_shutdown(struct pt_regs *regs); |
126 | typedef void (*crash_shutdown_t)(void); | ||
127 | extern int crash_shutdown_register(crash_shutdown_t handler); | 128 | extern int crash_shutdown_register(crash_shutdown_t handler); |
128 | extern int crash_shutdown_unregister(crash_shutdown_t handler); | 129 | extern int crash_shutdown_unregister(crash_shutdown_t handler); |
129 | 130 | ||
@@ -143,6 +144,16 @@ static inline int overlaps_crashkernel(unsigned long start, unsigned long size) | |||
143 | 144 | ||
144 | static inline void reserve_crashkernel(void) { ; } | 145 | static inline void reserve_crashkernel(void) { ; } |
145 | 146 | ||
147 | static inline int crash_shutdown_register(crash_shutdown_t handler) | ||
148 | { | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static inline int crash_shutdown_unregister(crash_shutdown_t handler) | ||
153 | { | ||
154 | return 0; | ||
155 | } | ||
156 | |||
146 | #endif /* CONFIG_KEXEC */ | 157 | #endif /* CONFIG_KEXEC */ |
147 | #endif /* ! __ASSEMBLY__ */ | 158 | #endif /* ! __ASSEMBLY__ */ |
148 | #endif /* __KERNEL__ */ | 159 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index 54ed64df95b8..989922621e35 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h | |||
@@ -262,6 +262,7 @@ struct machdep_calls { | |||
262 | #endif | 262 | #endif |
263 | }; | 263 | }; |
264 | 264 | ||
265 | extern void e500_idle(void); | ||
265 | extern void power4_idle(void); | 266 | extern void power4_idle(void); |
266 | extern void power4_cpu_offline_powersave(void); | 267 | extern void power4_cpu_offline_powersave(void); |
267 | extern void ppc6xx_idle(void); | 268 | extern void ppc6xx_idle(void); |
diff --git a/include/asm-powerpc/mman.h b/include/asm-powerpc/mman.h index 24cf664a8295..9209f755763e 100644 --- a/include/asm-powerpc/mman.h +++ b/include/asm-powerpc/mman.h | |||
@@ -10,6 +10,8 @@ | |||
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define PROT_SAO 0x10 /* Strong Access Ordering */ | ||
14 | |||
13 | #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ | 15 | #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ |
14 | #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ | 16 | #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ |
15 | #define MAP_LOCKED 0x80 | 17 | #define MAP_LOCKED 0x80 |
@@ -24,4 +26,38 @@ | |||
24 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | 26 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ |
25 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | 27 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ |
26 | 28 | ||
29 | #ifdef __KERNEL__ | ||
30 | #ifdef CONFIG_PPC64 | ||
31 | |||
32 | #include <asm/cputable.h> | ||
33 | #include <linux/mm.h> | ||
34 | |||
35 | /* | ||
36 | * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits() | ||
37 | * here. How important is the optimization? | ||
38 | */ | ||
39 | static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) | ||
40 | { | ||
41 | return (prot & PROT_SAO) ? VM_SAO : 0; | ||
42 | } | ||
43 | #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot) | ||
44 | |||
45 | static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) | ||
46 | { | ||
47 | return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0; | ||
48 | } | ||
49 | #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) | ||
50 | |||
51 | static inline int arch_validate_prot(unsigned long prot) | ||
52 | { | ||
53 | if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) | ||
54 | return 0; | ||
55 | if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO)) | ||
56 | return 0; | ||
57 | return 1; | ||
58 | } | ||
59 | #define arch_validate_prot(prot) arch_validate_prot(prot) | ||
60 | |||
61 | #endif /* CONFIG_PPC64 */ | ||
62 | #endif /* __KERNEL__ */ | ||
27 | #endif /* _ASM_POWERPC_MMAN_H */ | 63 | #endif /* _ASM_POWERPC_MMAN_H */ |
diff --git a/include/asm-powerpc/mmu-hash32.h b/include/asm-powerpc/mmu-hash32.h index 6e21ca618ec3..16b1a1e77e64 100644 --- a/include/asm-powerpc/mmu-hash32.h +++ b/include/asm-powerpc/mmu-hash32.h | |||
@@ -28,24 +28,18 @@ | |||
28 | #define BPP_RW 0x02 /* Read/write */ | 28 | #define BPP_RW 0x02 /* Read/write */ |
29 | 29 | ||
30 | #ifndef __ASSEMBLY__ | 30 | #ifndef __ASSEMBLY__ |
31 | /* Contort a phys_addr_t into the right format/bits for a BAT */ | ||
32 | #ifdef CONFIG_PHYS_64BIT | ||
33 | #define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \ | ||
34 | ((x & 0x0000000e00000000ULL) >> 24) | \ | ||
35 | ((x & 0x0000000100000000ULL) >> 30))) | ||
36 | #else | ||
37 | #define BAT_PHYS_ADDR(x) (x) | ||
38 | #endif | ||
39 | |||
31 | struct ppc_bat { | 40 | struct ppc_bat { |
32 | struct { | 41 | u32 batu; |
33 | unsigned long bepi:15; /* Effective page index (virtual address) */ | 42 | u32 batl; |
34 | unsigned long :4; /* Unused */ | ||
35 | unsigned long bl:11; /* Block size mask */ | ||
36 | unsigned long vs:1; /* Supervisor valid */ | ||
37 | unsigned long vp:1; /* User valid */ | ||
38 | } batu; /* Upper register */ | ||
39 | struct { | ||
40 | unsigned long brpn:15; /* Real page index (physical address) */ | ||
41 | unsigned long :10; /* Unused */ | ||
42 | unsigned long w:1; /* Write-thru cache */ | ||
43 | unsigned long i:1; /* Cache inhibit */ | ||
44 | unsigned long m:1; /* Memory coherence */ | ||
45 | unsigned long g:1; /* Guarded (MBZ in IBAT) */ | ||
46 | unsigned long :1; /* Unused */ | ||
47 | unsigned long pp:2; /* Page access protections */ | ||
48 | } batl; /* Lower register */ | ||
49 | }; | 43 | }; |
50 | #endif /* !__ASSEMBLY__ */ | 44 | #endif /* !__ASSEMBLY__ */ |
51 | 45 | ||
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h index 39c5c5f62bf5..d1dc16afb118 100644 --- a/include/asm-powerpc/mmu-hash64.h +++ b/include/asm-powerpc/mmu-hash64.h | |||
@@ -182,6 +182,7 @@ extern int mmu_io_psize; | |||
182 | extern int mmu_kernel_ssize; | 182 | extern int mmu_kernel_ssize; |
183 | extern int mmu_highuser_ssize; | 183 | extern int mmu_highuser_ssize; |
184 | extern u16 mmu_slb_size; | 184 | extern u16 mmu_slb_size; |
185 | extern unsigned long tce_alloc_start, tce_alloc_end; | ||
185 | 186 | ||
186 | /* | 187 | /* |
187 | * If the processor supports 64k normal pages but not 64k cache | 188 | * If the processor supports 64k normal pages but not 64k cache |
diff --git a/include/asm-powerpc/mpc6xx.h b/include/asm-powerpc/mpc6xx.h new file mode 100644 index 000000000000..effc2291beb2 --- /dev/null +++ b/include/asm-powerpc/mpc6xx.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ASM_POWERPC_MPC6xx_H | ||
2 | #define __ASM_POWERPC_MPC6xx_H | ||
3 | |||
4 | void mpc6xx_enter_standby(void); | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h index a4d0f876b427..fe566a348a86 100644 --- a/include/asm-powerpc/mpic.h +++ b/include/asm-powerpc/mpic.h | |||
@@ -353,6 +353,8 @@ struct mpic | |||
353 | #define MPIC_ENABLE_MCK 0x00000200 | 353 | #define MPIC_ENABLE_MCK 0x00000200 |
354 | /* Disable bias among target selection, spread interrupts evenly */ | 354 | /* Disable bias among target selection, spread interrupts evenly */ |
355 | #define MPIC_NO_BIAS 0x00000400 | 355 | #define MPIC_NO_BIAS 0x00000400 |
356 | /* Ignore NIRQS as reported by FRR */ | ||
357 | #define MPIC_BROKEN_FRR_NIRQS 0x00000800 | ||
356 | 358 | ||
357 | /* MPIC HW modification ID */ | 359 | /* MPIC HW modification ID */ |
358 | #define MPIC_REGSET_MASK 0xf0000000 | 360 | #define MPIC_REGSET_MASK 0xf0000000 |
diff --git a/include/asm-powerpc/of_device.h b/include/asm-powerpc/of_device.h index 6526e139a463..3c123990ca2e 100644 --- a/include/asm-powerpc/of_device.h +++ b/include/asm-powerpc/of_device.h | |||
@@ -21,8 +21,6 @@ extern struct of_device *of_device_alloc(struct device_node *np, | |||
21 | const char *bus_id, | 21 | const char *bus_id, |
22 | struct device *parent); | 22 | struct device *parent); |
23 | 23 | ||
24 | extern ssize_t of_device_get_modalias(struct of_device *ofdev, | ||
25 | char *str, ssize_t len); | ||
26 | extern int of_device_uevent(struct device *dev, | 24 | extern int of_device_uevent(struct device *dev, |
27 | struct kobj_uevent_env *env); | 25 | struct kobj_uevent_env *env); |
28 | 26 | ||
diff --git a/include/asm-powerpc/pSeries_reconfig.h b/include/asm-powerpc/pSeries_reconfig.h index ea6cfb8efb84..e482e5352e69 100644 --- a/include/asm-powerpc/pSeries_reconfig.h +++ b/include/asm-powerpc/pSeries_reconfig.h | |||
@@ -9,8 +9,10 @@ | |||
9 | * added or removed on pSeries systems. | 9 | * added or removed on pSeries systems. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define PSERIES_RECONFIG_ADD 0x0001 | 12 | #define PSERIES_RECONFIG_ADD 0x0001 |
13 | #define PSERIES_RECONFIG_REMOVE 0x0002 | 13 | #define PSERIES_RECONFIG_REMOVE 0x0002 |
14 | #define PSERIES_DRCONF_MEM_ADD 0x0003 | ||
15 | #define PSERIES_DRCONF_MEM_REMOVE 0x0004 | ||
14 | 16 | ||
15 | #ifdef CONFIG_PPC_PSERIES | 17 | #ifdef CONFIG_PPC_PSERIES |
16 | extern int pSeries_reconfig_notifier_register(struct notifier_block *); | 18 | extern int pSeries_reconfig_notifier_register(struct notifier_block *); |
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index 25af4fc8daf4..02fd80710e9d 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h | |||
@@ -126,16 +126,22 @@ extern unsigned int get_slice_psize(struct mm_struct *mm, | |||
126 | 126 | ||
127 | extern void slice_init_context(struct mm_struct *mm, unsigned int psize); | 127 | extern void slice_init_context(struct mm_struct *mm, unsigned int psize); |
128 | extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); | 128 | extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); |
129 | extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, | ||
130 | unsigned long len, unsigned int psize); | ||
131 | |||
129 | #define slice_mm_new_context(mm) ((mm)->context.id == 0) | 132 | #define slice_mm_new_context(mm) ((mm)->context.id == 0) |
130 | 133 | ||
131 | #endif /* __ASSEMBLY__ */ | 134 | #endif /* __ASSEMBLY__ */ |
132 | #else | 135 | #else |
133 | #define slice_init() | 136 | #define slice_init() |
137 | #define get_slice_psize(mm, addr) ((mm)->context.user_psize) | ||
134 | #define slice_set_user_psize(mm, psize) \ | 138 | #define slice_set_user_psize(mm, psize) \ |
135 | do { \ | 139 | do { \ |
136 | (mm)->context.user_psize = (psize); \ | 140 | (mm)->context.user_psize = (psize); \ |
137 | (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ | 141 | (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ |
138 | } while (0) | 142 | } while (0) |
143 | #define slice_set_range_psize(mm, start, len, psize) \ | ||
144 | slice_set_user_psize((mm), (psize)) | ||
139 | #define slice_mm_new_context(mm) 1 | 145 | #define slice_mm_new_context(mm) 1 |
140 | #endif /* CONFIG_PPC_MM_SLICES */ | 146 | #endif /* CONFIG_PPC_MM_SLICES */ |
141 | 147 | ||
diff --git a/include/asm-powerpc/pci-bridge.h b/include/asm-powerpc/pci-bridge.h index b95d033ae6e6..ae2ea803a0f2 100644 --- a/include/asm-powerpc/pci-bridge.h +++ b/include/asm-powerpc/pci-bridge.h | |||
@@ -92,12 +92,15 @@ struct pci_controller { | |||
92 | * anything but the PHB. Only allow talking to the PHB if this is | 92 | * anything but the PHB. Only allow talking to the PHB if this is |
93 | * set. | 93 | * set. |
94 | * BIG_ENDIAN - cfg_addr is a big endian register | 94 | * BIG_ENDIAN - cfg_addr is a big endian register |
95 | * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs on | ||
96 | * the PLB4. Effectively disable MRM commands by setting this. | ||
95 | */ | 97 | */ |
96 | #define PPC_INDIRECT_TYPE_SET_CFG_TYPE 0x00000001 | 98 | #define PPC_INDIRECT_TYPE_SET_CFG_TYPE 0x00000001 |
97 | #define PPC_INDIRECT_TYPE_EXT_REG 0x00000002 | 99 | #define PPC_INDIRECT_TYPE_EXT_REG 0x00000002 |
98 | #define PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004 | 100 | #define PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004 |
99 | #define PPC_INDIRECT_TYPE_NO_PCIE_LINK 0x00000008 | 101 | #define PPC_INDIRECT_TYPE_NO_PCIE_LINK 0x00000008 |
100 | #define PPC_INDIRECT_TYPE_BIG_ENDIAN 0x00000010 | 102 | #define PPC_INDIRECT_TYPE_BIG_ENDIAN 0x00000010 |
103 | #define PPC_INDIRECT_TYPE_BROKEN_MRM 0x00000020 | ||
101 | u32 indirect_type; | 104 | u32 indirect_type; |
102 | #endif /* !CONFIG_PPC64 */ | 105 | #endif /* !CONFIG_PPC64 */ |
103 | /* Currently, we limit ourselves to 1 IO range and 3 mem | 106 | /* Currently, we limit ourselves to 1 IO range and 3 mem |
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h index 818e2abc81e2..fd2090dc1dce 100644 --- a/include/asm-powerpc/pgtable-4k.h +++ b/include/asm-powerpc/pgtable-4k.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 41 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
42 | 42 | ||
43 | /* PTE bits */ | 43 | /* PTE bits */ |
44 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ | ||
44 | #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ | 45 | #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ |
45 | #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ | 46 | #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ |
46 | #define _PAGE_F_SECOND _PAGE_SECONDARY | 47 | #define _PAGE_F_SECOND _PAGE_SECONDARY |
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h index 1cbd6b377eea..c5007712473f 100644 --- a/include/asm-powerpc/pgtable-64k.h +++ b/include/asm-powerpc/pgtable-64k.h | |||
@@ -75,6 +75,20 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) | |||
75 | #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ | 75 | #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ |
76 | #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ | 76 | #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ |
77 | 77 | ||
78 | /* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead, | ||
79 | * we set that to be the whole sub-bits mask. The C code will only | ||
80 | * test this, so a multi-bit mask will work. For combo pages, this | ||
81 | * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of | ||
82 | * all the sub bits. For real 64k pages, we now have the assembly set | ||
83 | * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap | ||
84 | * that mask. This is fine as long as the HIDX bits are never set on | ||
85 | * a PTE that isn't hashed, which is the case today. | ||
86 | * | ||
87 | * A little nit is for the huge page C code, which does the hashing | ||
88 | * in C, we need to provide which bit to use. | ||
89 | */ | ||
90 | #define _PAGE_HASHPTE _PAGE_HPTE_SUB | ||
91 | |||
78 | /* Note the full page bits must be in the same location as for normal | 92 | /* Note the full page bits must be in the same location as for normal |
79 | * 4k pages as the same asssembly will be used to insert 64K pages | 93 | * 4k pages as the same asssembly will be used to insert 64K pages |
80 | * wether the kernel has CONFIG_PPC_64K_PAGES or not | 94 | * wether the kernel has CONFIG_PPC_64K_PAGES or not |
@@ -83,8 +97,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) | |||
83 | #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ | 97 | #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ |
84 | 98 | ||
85 | /* PTE flags to conserve for HPTE identification */ | 99 | /* PTE flags to conserve for HPTE identification */ |
86 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_HPTE_SUB |\ | 100 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO) |
87 | _PAGE_COMBO) | ||
88 | 101 | ||
89 | /* Shift to put page number into pte. | 102 | /* Shift to put page number into pte. |
90 | * | 103 | * |
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index c08e714d0c42..73015f0139de 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h | |||
@@ -182,6 +182,9 @@ extern int icache_44x_need_flush; | |||
182 | #define _PMD_SIZE_16M 0x0e0 | 182 | #define _PMD_SIZE_16M 0x0e0 |
183 | #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) | 183 | #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) |
184 | 184 | ||
185 | /* Until my rework is finished, 40x still needs atomic PTE updates */ | ||
186 | #define PTE_ATOMIC_UPDATES 1 | ||
187 | |||
185 | #elif defined(CONFIG_44x) | 188 | #elif defined(CONFIG_44x) |
186 | /* | 189 | /* |
187 | * Definitions for PPC440 | 190 | * Definitions for PPC440 |
@@ -253,17 +256,17 @@ extern int icache_44x_need_flush; | |||
253 | */ | 256 | */ |
254 | 257 | ||
255 | #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ | 258 | #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ |
256 | #define _PAGE_RW 0x00000002 /* S: Write permission */ | 259 | #define _PAGE_RW 0x00000002 /* S: Write permission */ |
257 | #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ | 260 | #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ |
261 | #define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */ | ||
258 | #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ | 262 | #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ |
259 | #define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */ | 263 | #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ |
260 | #define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */ | 264 | #define _PAGE_USER 0x00000040 /* S: User page */ |
261 | #define _PAGE_USER 0x00000040 /* S: User page */ | 265 | #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ |
262 | #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ | 266 | #define _PAGE_GUARDED 0x00000100 /* H: G bit */ |
263 | #define _PAGE_GUARDED 0x00000100 /* H: G bit */ | 267 | #define _PAGE_COHERENT 0x00000200 /* H: M bit */ |
264 | #define _PAGE_DIRTY 0x00000200 /* S: Page dirty */ | 268 | #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ |
265 | #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ | 269 | #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ |
266 | #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ | ||
267 | 270 | ||
268 | /* TODO: Add large page lowmem mapping support */ | 271 | /* TODO: Add large page lowmem mapping support */ |
269 | #define _PMD_PRESENT 0 | 272 | #define _PMD_PRESENT 0 |
@@ -273,6 +276,7 @@ extern int icache_44x_need_flush; | |||
273 | /* ERPN in a PTE never gets cleared, ignore it */ | 276 | /* ERPN in a PTE never gets cleared, ignore it */ |
274 | #define _PTE_NONE_MASK 0xffffffff00000000ULL | 277 | #define _PTE_NONE_MASK 0xffffffff00000000ULL |
275 | 278 | ||
279 | |||
276 | #elif defined(CONFIG_FSL_BOOKE) | 280 | #elif defined(CONFIG_FSL_BOOKE) |
277 | /* | 281 | /* |
278 | MMU Assist Register 3: | 282 | MMU Assist Register 3: |
@@ -315,6 +319,9 @@ extern int icache_44x_need_flush; | |||
315 | #define _PMD_PRESENT_MASK (PAGE_MASK) | 319 | #define _PMD_PRESENT_MASK (PAGE_MASK) |
316 | #define _PMD_BAD (~PAGE_MASK) | 320 | #define _PMD_BAD (~PAGE_MASK) |
317 | 321 | ||
322 | /* Until my rework is finished, FSL BookE still needs atomic PTE updates */ | ||
323 | #define PTE_ATOMIC_UPDATES 1 | ||
324 | |||
318 | #elif defined(CONFIG_8xx) | 325 | #elif defined(CONFIG_8xx) |
319 | /* Definitions for 8xx embedded chips. */ | 326 | /* Definitions for 8xx embedded chips. */ |
320 | #define _PAGE_PRESENT 0x0001 /* Page is valid */ | 327 | #define _PAGE_PRESENT 0x0001 /* Page is valid */ |
@@ -345,6 +352,9 @@ extern int icache_44x_need_flush; | |||
345 | 352 | ||
346 | #define _PTE_NONE_MASK _PAGE_ACCESSED | 353 | #define _PTE_NONE_MASK _PAGE_ACCESSED |
347 | 354 | ||
355 | /* Until my rework is finished, 8xx still needs atomic PTE updates */ | ||
356 | #define PTE_ATOMIC_UPDATES 1 | ||
357 | |||
348 | #else /* CONFIG_6xx */ | 358 | #else /* CONFIG_6xx */ |
349 | /* Definitions for 60x, 740/750, etc. */ | 359 | /* Definitions for 60x, 740/750, etc. */ |
350 | #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ | 360 | #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ |
@@ -365,6 +375,10 @@ extern int icache_44x_need_flush; | |||
365 | #define _PMD_PRESENT 0 | 375 | #define _PMD_PRESENT 0 |
366 | #define _PMD_PRESENT_MASK (PAGE_MASK) | 376 | #define _PMD_PRESENT_MASK (PAGE_MASK) |
367 | #define _PMD_BAD (~PAGE_MASK) | 377 | #define _PMD_BAD (~PAGE_MASK) |
378 | |||
379 | /* Hash table based platforms need atomic updates of the linux PTE */ | ||
380 | #define PTE_ATOMIC_UPDATES 1 | ||
381 | |||
368 | #endif | 382 | #endif |
369 | 383 | ||
370 | /* | 384 | /* |
@@ -557,9 +571,11 @@ extern void add_hash_page(unsigned context, unsigned long va, | |||
557 | * low PTE word since we expect ALL flag bits to be there | 571 | * low PTE word since we expect ALL flag bits to be there |
558 | */ | 572 | */ |
559 | #ifndef CONFIG_PTE_64BIT | 573 | #ifndef CONFIG_PTE_64BIT |
560 | static inline unsigned long pte_update(pte_t *p, unsigned long clr, | 574 | static inline unsigned long pte_update(pte_t *p, |
575 | unsigned long clr, | ||
561 | unsigned long set) | 576 | unsigned long set) |
562 | { | 577 | { |
578 | #ifdef PTE_ATOMIC_UPDATES | ||
563 | unsigned long old, tmp; | 579 | unsigned long old, tmp; |
564 | 580 | ||
565 | __asm__ __volatile__("\ | 581 | __asm__ __volatile__("\ |
@@ -572,16 +588,26 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr, | |||
572 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | 588 | : "=&r" (old), "=&r" (tmp), "=m" (*p) |
573 | : "r" (p), "r" (clr), "r" (set), "m" (*p) | 589 | : "r" (p), "r" (clr), "r" (set), "m" (*p) |
574 | : "cc" ); | 590 | : "cc" ); |
591 | #else /* PTE_ATOMIC_UPDATES */ | ||
592 | unsigned long old = pte_val(*p); | ||
593 | *p = __pte((old & ~clr) | set); | ||
594 | #endif /* !PTE_ATOMIC_UPDATES */ | ||
595 | |||
575 | #ifdef CONFIG_44x | 596 | #ifdef CONFIG_44x |
576 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) | 597 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) |
577 | icache_44x_need_flush = 1; | 598 | icache_44x_need_flush = 1; |
578 | #endif | 599 | #endif |
579 | return old; | 600 | return old; |
580 | } | 601 | } |
581 | #else | 602 | #else /* CONFIG_PTE_64BIT */ |
582 | static inline unsigned long long pte_update(pte_t *p, unsigned long clr, | 603 | /* TODO: Change that to only modify the low word and move set_pte_at() |
583 | unsigned long set) | 604 | * out of line |
605 | */ | ||
606 | static inline unsigned long long pte_update(pte_t *p, | ||
607 | unsigned long clr, | ||
608 | unsigned long set) | ||
584 | { | 609 | { |
610 | #ifdef PTE_ATOMIC_UPDATES | ||
585 | unsigned long long old; | 611 | unsigned long long old; |
586 | unsigned long tmp; | 612 | unsigned long tmp; |
587 | 613 | ||
@@ -596,13 +622,18 @@ static inline unsigned long long pte_update(pte_t *p, unsigned long clr, | |||
596 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | 622 | : "=&r" (old), "=&r" (tmp), "=m" (*p) |
597 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) | 623 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) |
598 | : "cc" ); | 624 | : "cc" ); |
625 | #else /* PTE_ATOMIC_UPDATES */ | ||
626 | unsigned long long old = pte_val(*p); | ||
627 | *p = __pte((old & ~(unsigned long long)clr) | set); | ||
628 | #endif /* !PTE_ATOMIC_UPDATES */ | ||
629 | |||
599 | #ifdef CONFIG_44x | 630 | #ifdef CONFIG_44x |
600 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) | 631 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) |
601 | icache_44x_need_flush = 1; | 632 | icache_44x_need_flush = 1; |
602 | #endif | 633 | #endif |
603 | return old; | 634 | return old; |
604 | } | 635 | } |
605 | #endif | 636 | #endif /* CONFIG_PTE_64BIT */ |
606 | 637 | ||
607 | /* | 638 | /* |
608 | * set_pte stores a linux PTE into the linux page table. | 639 | * set_pte stores a linux PTE into the linux page table. |
@@ -620,8 +651,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
620 | } | 651 | } |
621 | 652 | ||
622 | /* | 653 | /* |
623 | * 2.6 calles this without flushing the TLB entry, this is wrong | 654 | * 2.6 calls this without flushing the TLB entry; this is wrong |
624 | * for our hash-based implementation, we fix that up here | 655 | * for our hash-based implementation, we fix that up here. |
625 | */ | 656 | */ |
626 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 657 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
627 | static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) | 658 | static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) |
@@ -652,6 +683,12 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
652 | { | 683 | { |
653 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); | 684 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); |
654 | } | 685 | } |
686 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
687 | unsigned long addr, pte_t *ptep) | ||
688 | { | ||
689 | ptep_set_wrprotect(mm, addr, ptep); | ||
690 | } | ||
691 | |||
655 | 692 | ||
656 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 693 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
657 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | 694 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) |
@@ -665,7 +702,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | |||
665 | ({ \ | 702 | ({ \ |
666 | int __changed = !pte_same(*(__ptep), __entry); \ | 703 | int __changed = !pte_same(*(__ptep), __entry); \ |
667 | if (__changed) { \ | 704 | if (__changed) { \ |
668 | __ptep_set_access_flags(__ptep, __entry, __dirty); \ | 705 | __ptep_set_access_flags(__ptep, __entry, __dirty); \ |
669 | flush_tlb_page_nohash(__vma, __address); \ | 706 | flush_tlb_page_nohash(__vma, __address); \ |
670 | } \ | 707 | } \ |
671 | __changed; \ | 708 | __changed; \ |
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h index 7686569a0bef..ab98a9c80b28 100644 --- a/include/asm-powerpc/pgtable-ppc64.h +++ b/include/asm-powerpc/pgtable-ppc64.h | |||
@@ -91,9 +91,11 @@ | |||
91 | #define _PAGE_DIRTY 0x0080 /* C: page changed */ | 91 | #define _PAGE_DIRTY 0x0080 /* C: page changed */ |
92 | #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ | 92 | #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ |
93 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ | 93 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ |
94 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ | ||
95 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ | 94 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ |
96 | 95 | ||
96 | /* Strong Access Ordering */ | ||
97 | #define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) | ||
98 | |||
97 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) | 99 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) |
98 | 100 | ||
99 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) | 101 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) |
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index 2dbd4e7884fa..0966899d974b 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include <linux/stringify.h> | 7 | #include <linux/stringify.h> |
8 | #include <asm/asm-compat.h> | 8 | #include <asm/asm-compat.h> |
9 | #include <asm/processor.h> | ||
9 | 10 | ||
10 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
11 | #error __FILE__ should only be used in assembler files | 12 | #error __FILE__ should only be used in assembler files |
@@ -73,6 +74,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | |||
73 | REST_10GPRS(22, base) | 74 | REST_10GPRS(22, base) |
74 | #endif | 75 | #endif |
75 | 76 | ||
77 | /* | ||
78 | * Define what the VSX XX1 form instructions will look like, then add | ||
79 | * the 128 bit load store instructions based on that. | ||
80 | */ | ||
81 | #define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \ | ||
82 | ((rb) << 11) | (((xs) >> 5))) | ||
83 | |||
84 | #define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb))) | ||
85 | #define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb))) | ||
76 | 86 | ||
77 | #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) | 87 | #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) |
78 | #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) | 88 | #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) |
@@ -83,13 +93,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | |||
83 | #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) | 93 | #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) |
84 | #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) | 94 | #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) |
85 | 95 | ||
86 | #define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base) | 96 | #define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) |
87 | #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) | 97 | #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) |
88 | #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) | 98 | #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) |
89 | #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) | 99 | #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) |
90 | #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) | 100 | #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) |
91 | #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) | 101 | #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) |
92 | #define REST_FPR(n, base) lfd n,THREAD_FPR0+8*(n)(base) | 102 | #define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) |
93 | #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) | 103 | #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) |
94 | #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) | 104 | #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) |
95 | #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) | 105 | #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) |
@@ -109,6 +119,33 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | |||
109 | #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) | 119 | #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) |
110 | #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) | 120 | #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) |
111 | 121 | ||
122 | /* Save the lower 32 VSRs in the thread VSR region */ | ||
123 | #define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,b,base) | ||
124 | #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) | ||
125 | #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) | ||
126 | #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) | ||
127 | #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) | ||
128 | #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) | ||
129 | #define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,b,base) | ||
130 | #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) | ||
131 | #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) | ||
132 | #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) | ||
133 | #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) | ||
134 | #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) | ||
135 | /* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */ | ||
136 | #define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,b,base) | ||
137 | #define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base) | ||
138 | #define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base) | ||
139 | #define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base) | ||
140 | #define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base) | ||
141 | #define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base) | ||
142 | #define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,b,base) | ||
143 | #define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base) | ||
144 | #define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base) | ||
145 | #define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base) | ||
146 | #define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base) | ||
147 | #define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base) | ||
148 | |||
112 | #define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base) | 149 | #define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base) |
113 | #define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base) | 150 | #define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base) |
114 | #define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base) | 151 | #define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base) |
@@ -356,6 +393,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) | |||
356 | #define toreal(rd) | 393 | #define toreal(rd) |
357 | #define fromreal(rd) | 394 | #define fromreal(rd) |
358 | 395 | ||
396 | /* | ||
397 | * We use addis to ensure compatibility with the "classic" ppc versions of | ||
398 | * these macros, which use rs = 0 to get the tophys offset in rd, rather than | ||
399 | * converting the address in r0, and so this version has to do that too | ||
400 | * (i.e. set register rd to 0 when rs == 0). | ||
401 | */ | ||
359 | #define tophys(rd,rs) \ | 402 | #define tophys(rd,rs) \ |
360 | addis rd,rs,0 | 403 | addis rd,rs,0 |
361 | 404 | ||
@@ -533,6 +576,73 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) | |||
533 | #define vr30 30 | 576 | #define vr30 30 |
534 | #define vr31 31 | 577 | #define vr31 31 |
535 | 578 | ||
579 | /* VSX Registers (VSRs) */ | ||
580 | |||
581 | #define vsr0 0 | ||
582 | #define vsr1 1 | ||
583 | #define vsr2 2 | ||
584 | #define vsr3 3 | ||
585 | #define vsr4 4 | ||
586 | #define vsr5 5 | ||
587 | #define vsr6 6 | ||
588 | #define vsr7 7 | ||
589 | #define vsr8 8 | ||
590 | #define vsr9 9 | ||
591 | #define vsr10 10 | ||
592 | #define vsr11 11 | ||
593 | #define vsr12 12 | ||
594 | #define vsr13 13 | ||
595 | #define vsr14 14 | ||
596 | #define vsr15 15 | ||
597 | #define vsr16 16 | ||
598 | #define vsr17 17 | ||
599 | #define vsr18 18 | ||
600 | #define vsr19 19 | ||
601 | #define vsr20 20 | ||
602 | #define vsr21 21 | ||
603 | #define vsr22 22 | ||
604 | #define vsr23 23 | ||
605 | #define vsr24 24 | ||
606 | #define vsr25 25 | ||
607 | #define vsr26 26 | ||
608 | #define vsr27 27 | ||
609 | #define vsr28 28 | ||
610 | #define vsr29 29 | ||
611 | #define vsr30 30 | ||
612 | #define vsr31 31 | ||
613 | #define vsr32 32 | ||
614 | #define vsr33 33 | ||
615 | #define vsr34 34 | ||
616 | #define vsr35 35 | ||
617 | #define vsr36 36 | ||
618 | #define vsr37 37 | ||
619 | #define vsr38 38 | ||
620 | #define vsr39 39 | ||
621 | #define vsr40 40 | ||
622 | #define vsr41 41 | ||
623 | #define vsr42 42 | ||
624 | #define vsr43 43 | ||
625 | #define vsr44 44 | ||
626 | #define vsr45 45 | ||
627 | #define vsr46 46 | ||
628 | #define vsr47 47 | ||
629 | #define vsr48 48 | ||
630 | #define vsr49 49 | ||
631 | #define vsr50 50 | ||
632 | #define vsr51 51 | ||
633 | #define vsr52 52 | ||
634 | #define vsr53 53 | ||
635 | #define vsr54 54 | ||
636 | #define vsr55 55 | ||
637 | #define vsr56 56 | ||
638 | #define vsr57 57 | ||
639 | #define vsr58 58 | ||
640 | #define vsr59 59 | ||
641 | #define vsr60 60 | ||
642 | #define vsr61 61 | ||
643 | #define vsr62 62 | ||
644 | #define vsr63 63 | ||
645 | |||
536 | /* SPE Registers (EVPRs) */ | 646 | /* SPE Registers (EVPRs) */ |
537 | 647 | ||
538 | #define evr0 0 | 648 | #define evr0 0 |
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index cf83f2d7e2a5..101ed87f7d84 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h | |||
@@ -12,6 +12,12 @@ | |||
12 | 12 | ||
13 | #include <asm/reg.h> | 13 | #include <asm/reg.h> |
14 | 14 | ||
15 | #ifdef CONFIG_VSX | ||
16 | #define TS_FPRWIDTH 2 | ||
17 | #else | ||
18 | #define TS_FPRWIDTH 1 | ||
19 | #endif | ||
20 | |||
15 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
16 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
17 | #include <asm/ptrace.h> | 23 | #include <asm/ptrace.h> |
@@ -78,9 +84,14 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | |||
78 | /* Lazy FPU handling on uni-processor */ | 84 | /* Lazy FPU handling on uni-processor */ |
79 | extern struct task_struct *last_task_used_math; | 85 | extern struct task_struct *last_task_used_math; |
80 | extern struct task_struct *last_task_used_altivec; | 86 | extern struct task_struct *last_task_used_altivec; |
87 | extern struct task_struct *last_task_used_vsx; | ||
81 | extern struct task_struct *last_task_used_spe; | 88 | extern struct task_struct *last_task_used_spe; |
82 | 89 | ||
83 | #ifdef CONFIG_PPC32 | 90 | #ifdef CONFIG_PPC32 |
91 | |||
92 | #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START | ||
93 | #error User TASK_SIZE overlaps with KERNEL_START address | ||
94 | #endif | ||
84 | #define TASK_SIZE (CONFIG_TASK_SIZE) | 95 | #define TASK_SIZE (CONFIG_TASK_SIZE) |
85 | 96 | ||
86 | /* This decides where the kernel will search for a free chunk of vm | 97 | /* This decides where the kernel will search for a free chunk of vm |
@@ -136,6 +147,10 @@ typedef struct { | |||
136 | unsigned long seg; | 147 | unsigned long seg; |
137 | } mm_segment_t; | 148 | } mm_segment_t; |
138 | 149 | ||
150 | #define TS_FPROFFSET 0 | ||
151 | #define TS_VSRLOWOFFSET 1 | ||
152 | #define TS_FPR(i) fpr[i][TS_FPROFFSET] | ||
153 | |||
139 | struct thread_struct { | 154 | struct thread_struct { |
140 | unsigned long ksp; /* Kernel stack pointer */ | 155 | unsigned long ksp; /* Kernel stack pointer */ |
141 | unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ | 156 | unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ |
@@ -152,8 +167,9 @@ struct thread_struct { | |||
152 | unsigned long dbcr0; /* debug control register values */ | 167 | unsigned long dbcr0; /* debug control register values */ |
153 | unsigned long dbcr1; | 168 | unsigned long dbcr1; |
154 | #endif | 169 | #endif |
155 | double fpr[32]; /* Complete floating point set */ | 170 | /* FP and VSX 0-31 register set */ |
156 | struct { /* fpr ... fpscr must be contiguous */ | 171 | double fpr[32][TS_FPRWIDTH]; |
172 | struct { | ||
157 | 173 | ||
158 | unsigned int pad; | 174 | unsigned int pad; |
159 | unsigned int val; /* Floating point status */ | 175 | unsigned int val; /* Floating point status */ |
@@ -173,6 +189,10 @@ struct thread_struct { | |||
173 | unsigned long vrsave; | 189 | unsigned long vrsave; |
174 | int used_vr; /* set if process has used altivec */ | 190 | int used_vr; /* set if process has used altivec */ |
175 | #endif /* CONFIG_ALTIVEC */ | 191 | #endif /* CONFIG_ALTIVEC */ |
192 | #ifdef CONFIG_VSX | ||
193 | /* VSR status */ | ||
194 | int used_vsr; /* set if process has used altivec */ | ||
195 | #endif /* CONFIG_VSX */ | ||
176 | #ifdef CONFIG_SPE | 196 | #ifdef CONFIG_SPE |
177 | unsigned long evr[32]; /* upper 32-bits of SPE regs */ | 197 | unsigned long evr[32]; /* upper 32-bits of SPE regs */ |
178 | u64 acc; /* Accumulator */ | 198 | u64 acc; /* Accumulator */ |
@@ -202,7 +222,7 @@ struct thread_struct { | |||
202 | .ksp_limit = INIT_SP_LIMIT, \ | 222 | .ksp_limit = INIT_SP_LIMIT, \ |
203 | .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ | 223 | .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ |
204 | .fs = KERNEL_DS, \ | 224 | .fs = KERNEL_DS, \ |
205 | .fpr = {0}, \ | 225 | .fpr = {{0}}, \ |
206 | .fpscr = { .val = 0, }, \ | 226 | .fpscr = { .val = 0, }, \ |
207 | .fpexc_mode = 0, \ | 227 | .fpexc_mode = 0, \ |
208 | } | 228 | } |
@@ -214,6 +234,8 @@ struct thread_struct { | |||
214 | #define thread_saved_pc(tsk) \ | 234 | #define thread_saved_pc(tsk) \ |
215 | ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) | 235 | ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) |
216 | 236 | ||
237 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs) | ||
238 | |||
217 | unsigned long get_wchan(struct task_struct *p); | 239 | unsigned long get_wchan(struct task_struct *p); |
218 | 240 | ||
219 | #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) | 241 | #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) |
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 78b7b0d494c0..eb3bd2e1c7f6 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h | |||
@@ -212,8 +212,16 @@ extern u64 of_translate_dma_address(struct device_node *dev, | |||
212 | */ | 212 | */ |
213 | extern const u32 *of_get_address(struct device_node *dev, int index, | 213 | extern const u32 *of_get_address(struct device_node *dev, int index, |
214 | u64 *size, unsigned int *flags); | 214 | u64 *size, unsigned int *flags); |
215 | #ifdef CONFIG_PCI | ||
215 | extern const u32 *of_get_pci_address(struct device_node *dev, int bar_no, | 216 | extern const u32 *of_get_pci_address(struct device_node *dev, int bar_no, |
216 | u64 *size, unsigned int *flags); | 217 | u64 *size, unsigned int *flags); |
218 | #else | ||
219 | static inline const u32 *of_get_pci_address(struct device_node *dev, | ||
220 | int bar_no, u64 *size, unsigned int *flags) | ||
221 | { | ||
222 | return NULL; | ||
223 | } | ||
224 | #endif /* CONFIG_PCI */ | ||
217 | 225 | ||
218 | /* Get an address as a resource. Note that if your address is | 226 | /* Get an address as a resource. Note that if your address is |
219 | * a PIO address, the conversion will fail if the physical address | 227 | * a PIO address, the conversion will fail if the physical address |
@@ -223,8 +231,16 @@ extern const u32 *of_get_pci_address(struct device_node *dev, int bar_no, | |||
223 | */ | 231 | */ |
224 | extern int of_address_to_resource(struct device_node *dev, int index, | 232 | extern int of_address_to_resource(struct device_node *dev, int index, |
225 | struct resource *r); | 233 | struct resource *r); |
234 | #ifdef CONFIG_PCI | ||
226 | extern int of_pci_address_to_resource(struct device_node *dev, int bar, | 235 | extern int of_pci_address_to_resource(struct device_node *dev, int bar, |
227 | struct resource *r); | 236 | struct resource *r); |
237 | #else | ||
238 | static inline int of_pci_address_to_resource(struct device_node *dev, int bar, | ||
239 | struct resource *r) | ||
240 | { | ||
241 | return -ENOSYS; | ||
242 | } | ||
243 | #endif /* CONFIG_PCI */ | ||
228 | 244 | ||
229 | /* Parse the ibm,dma-window property of an OF node into the busno, phys and | 245 | /* Parse the ibm,dma-window property of an OF node into the busno, phys and |
230 | * size parameters. | 246 | * size parameters. |
diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h index 39023dde1cc4..3d6e31024e56 100644 --- a/include/asm-powerpc/ptrace.h +++ b/include/asm-powerpc/ptrace.h | |||
@@ -119,6 +119,7 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, | |||
119 | #ifndef __powerpc64__ | 119 | #ifndef __powerpc64__ |
120 | #define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0) | 120 | #define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0) |
121 | #define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0) | 121 | #define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0) |
122 | #define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0) | ||
122 | #endif /* ! __powerpc64__ */ | 123 | #endif /* ! __powerpc64__ */ |
123 | #define TRAP(regs) ((regs)->trap & ~0xF) | 124 | #define TRAP(regs) ((regs)->trap & ~0xF) |
124 | #ifdef __powerpc64__ | 125 | #ifdef __powerpc64__ |
@@ -223,6 +224,14 @@ extern void user_disable_single_step(struct task_struct *); | |||
223 | #define PT_VRSAVE_32 (PT_VR0 + 33*4) | 224 | #define PT_VRSAVE_32 (PT_VR0 + 33*4) |
224 | #endif | 225 | #endif |
225 | 226 | ||
227 | /* | ||
228 | * Only store first 32 VSRs here. The second 32 VSRs in VR0-31 | ||
229 | */ | ||
230 | #define PT_VSR0 150 /* each VSR reg occupies 2 slots in 64-bit */ | ||
231 | #define PT_VSR31 (PT_VSR0 + 2*31) | ||
232 | #ifdef __KERNEL__ | ||
233 | #define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */ | ||
234 | #endif | ||
226 | #endif /* __powerpc64__ */ | 235 | #endif /* __powerpc64__ */ |
227 | 236 | ||
228 | /* | 237 | /* |
@@ -245,6 +254,10 @@ extern void user_disable_single_step(struct task_struct *); | |||
245 | #define PTRACE_GETEVRREGS 20 | 254 | #define PTRACE_GETEVRREGS 20 |
246 | #define PTRACE_SETEVRREGS 21 | 255 | #define PTRACE_SETEVRREGS 21 |
247 | 256 | ||
257 | /* Get the first 32 128bit VSX registers */ | ||
258 | #define PTRACE_GETVSRREGS 27 | ||
259 | #define PTRACE_SETVSRREGS 28 | ||
260 | |||
248 | /* | 261 | /* |
249 | * Get or set a debug register. The first 16 are DABR registers and the | 262 | * Get or set a debug register. The first 16 are DABR registers and the |
250 | * second 16 are IABR registers. | 263 | * second 16 are IABR registers. |
diff --git a/include/asm-powerpc/qe.h b/include/asm-powerpc/qe.h index c3be6e2e1490..edee15d269ea 100644 --- a/include/asm-powerpc/qe.h +++ b/include/asm-powerpc/qe.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #define _ASM_POWERPC_QE_H | 16 | #define _ASM_POWERPC_QE_H |
17 | #ifdef __KERNEL__ | 17 | #ifdef __KERNEL__ |
18 | 18 | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <asm/cpm.h> | ||
19 | #include <asm/immap_qe.h> | 21 | #include <asm/immap_qe.h> |
20 | 22 | ||
21 | #define QE_NUM_OF_SNUM 28 | 23 | #define QE_NUM_OF_SNUM 28 |
@@ -74,10 +76,38 @@ enum qe_clock { | |||
74 | QE_CLK_DUMMY | 76 | QE_CLK_DUMMY |
75 | }; | 77 | }; |
76 | 78 | ||
79 | static inline bool qe_clock_is_brg(enum qe_clock clk) | ||
80 | { | ||
81 | return clk >= QE_BRG1 && clk <= QE_BRG16; | ||
82 | } | ||
83 | |||
84 | extern spinlock_t cmxgcr_lock; | ||
85 | |||
77 | /* Export QE common operations */ | 86 | /* Export QE common operations */ |
78 | extern void qe_reset(void); | 87 | extern void __init qe_reset(void); |
88 | |||
89 | /* QE PIO */ | ||
90 | #define QE_PIO_PINS 32 | ||
91 | |||
92 | struct qe_pio_regs { | ||
93 | __be32 cpodr; /* Open drain register */ | ||
94 | __be32 cpdata; /* Data register */ | ||
95 | __be32 cpdir1; /* Direction register */ | ||
96 | __be32 cpdir2; /* Direction register */ | ||
97 | __be32 cppar1; /* Pin assignment register */ | ||
98 | __be32 cppar2; /* Pin assignment register */ | ||
99 | #ifdef CONFIG_PPC_85xx | ||
100 | u8 pad[8]; | ||
101 | #endif | ||
102 | }; | ||
103 | |||
79 | extern int par_io_init(struct device_node *np); | 104 | extern int par_io_init(struct device_node *np); |
80 | extern int par_io_of_config(struct device_node *np); | 105 | extern int par_io_of_config(struct device_node *np); |
106 | #define QE_PIO_DIR_IN 2 | ||
107 | #define QE_PIO_DIR_OUT 1 | ||
108 | extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, | ||
109 | int dir, int open_drain, int assignment, | ||
110 | int has_irq); | ||
81 | extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, | 111 | extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, |
82 | int assignment, int has_irq); | 112 | int assignment, int has_irq); |
83 | extern int par_io_data_set(u8 port, u8 pin, u8 val); | 113 | extern int par_io_data_set(u8 port, u8 pin, u8 val); |
@@ -89,20 +119,13 @@ unsigned int qe_get_brg_clk(void); | |||
89 | int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier); | 119 | int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier); |
90 | int qe_get_snum(void); | 120 | int qe_get_snum(void); |
91 | void qe_put_snum(u8 snum); | 121 | void qe_put_snum(u8 snum); |
92 | unsigned long qe_muram_alloc(int size, int align); | 122 | /* we actually use cpm_muram implementation, define this for convenience */ |
93 | int qe_muram_free(unsigned long offset); | 123 | #define qe_muram_init cpm_muram_init |
94 | unsigned long qe_muram_alloc_fixed(unsigned long offset, int size); | 124 | #define qe_muram_alloc cpm_muram_alloc |
95 | void qe_muram_dump(void); | 125 | #define qe_muram_alloc_fixed cpm_muram_alloc_fixed |
96 | 126 | #define qe_muram_free cpm_muram_free | |
97 | static inline void __iomem *qe_muram_addr(unsigned long offset) | 127 | #define qe_muram_addr cpm_muram_addr |
98 | { | 128 | #define qe_muram_offset cpm_muram_offset |
99 | return (void __iomem *)&qe_immr->muram[offset]; | ||
100 | } | ||
101 | |||
102 | static inline unsigned long qe_muram_offset(void __iomem *addr) | ||
103 | { | ||
104 | return addr - (void __iomem *)qe_immr->muram; | ||
105 | } | ||
106 | 129 | ||
107 | /* Structure that defines QE firmware binary files. | 130 | /* Structure that defines QE firmware binary files. |
108 | * | 131 | * |
@@ -156,6 +179,9 @@ int qe_upload_firmware(const struct qe_firmware *firmware); | |||
156 | /* Obtain information on the uploaded firmware */ | 179 | /* Obtain information on the uploaded firmware */ |
157 | struct qe_firmware_info *qe_get_firmware_info(void); | 180 | struct qe_firmware_info *qe_get_firmware_info(void); |
158 | 181 | ||
182 | /* QE USB */ | ||
183 | int qe_usb_clock_set(enum qe_clock clk, int rate); | ||
184 | |||
159 | /* Buffer descriptors */ | 185 | /* Buffer descriptors */ |
160 | struct qe_bd { | 186 | struct qe_bd { |
161 | __be16 status; | 187 | __be16 status; |
@@ -166,20 +192,6 @@ struct qe_bd { | |||
166 | #define BD_STATUS_MASK 0xffff0000 | 192 | #define BD_STATUS_MASK 0xffff0000 |
167 | #define BD_LENGTH_MASK 0x0000ffff | 193 | #define BD_LENGTH_MASK 0x0000ffff |
168 | 194 | ||
169 | #define BD_SC_EMPTY 0x8000 /* Receive is empty */ | ||
170 | #define BD_SC_READY 0x8000 /* Transmit is ready */ | ||
171 | #define BD_SC_WRAP 0x2000 /* Last buffer descriptor */ | ||
172 | #define BD_SC_INTRPT 0x1000 /* Interrupt on change */ | ||
173 | #define BD_SC_LAST 0x0800 /* Last buffer in frame */ | ||
174 | #define BD_SC_CM 0x0200 /* Continous mode */ | ||
175 | #define BD_SC_ID 0x0100 /* Rec'd too many idles */ | ||
176 | #define BD_SC_P 0x0100 /* xmt preamble */ | ||
177 | #define BD_SC_BR 0x0020 /* Break received */ | ||
178 | #define BD_SC_FR 0x0010 /* Framing error */ | ||
179 | #define BD_SC_PR 0x0008 /* Parity error */ | ||
180 | #define BD_SC_OV 0x0002 /* Overrun */ | ||
181 | #define BD_SC_CD 0x0001 /* ?? */ | ||
182 | |||
183 | /* Alignment */ | 195 | /* Alignment */ |
184 | #define QE_INTR_TABLE_ALIGN 16 /* ??? */ | 196 | #define QE_INTR_TABLE_ALIGN 16 /* ??? */ |
185 | #define QE_ALIGNMENT_OF_BD 8 | 197 | #define QE_ALIGNMENT_OF_BD 8 |
@@ -254,6 +266,16 @@ enum comm_dir { | |||
254 | #define QE_CMXGCR_MII_ENET_MNG 0x00007000 | 266 | #define QE_CMXGCR_MII_ENET_MNG 0x00007000 |
255 | #define QE_CMXGCR_MII_ENET_MNG_SHIFT 12 | 267 | #define QE_CMXGCR_MII_ENET_MNG_SHIFT 12 |
256 | #define QE_CMXGCR_USBCS 0x0000000f | 268 | #define QE_CMXGCR_USBCS 0x0000000f |
269 | #define QE_CMXGCR_USBCS_CLK3 0x1 | ||
270 | #define QE_CMXGCR_USBCS_CLK5 0x2 | ||
271 | #define QE_CMXGCR_USBCS_CLK7 0x3 | ||
272 | #define QE_CMXGCR_USBCS_CLK9 0x4 | ||
273 | #define QE_CMXGCR_USBCS_CLK13 0x5 | ||
274 | #define QE_CMXGCR_USBCS_CLK17 0x6 | ||
275 | #define QE_CMXGCR_USBCS_CLK19 0x7 | ||
276 | #define QE_CMXGCR_USBCS_CLK21 0x8 | ||
277 | #define QE_CMXGCR_USBCS_BRG9 0x9 | ||
278 | #define QE_CMXGCR_USBCS_BRG10 0xa | ||
257 | 279 | ||
258 | /* QE CECR Commands. | 280 | /* QE CECR Commands. |
259 | */ | 281 | */ |
@@ -283,7 +305,7 @@ enum comm_dir { | |||
283 | #define QE_HPAC_START_TX 0x0000060b | 305 | #define QE_HPAC_START_TX 0x0000060b |
284 | #define QE_HPAC_START_RX 0x0000070b | 306 | #define QE_HPAC_START_RX 0x0000070b |
285 | #define QE_USB_STOP_TX 0x0000000a | 307 | #define QE_USB_STOP_TX 0x0000000a |
286 | #define QE_USB_RESTART_TX 0x0000000b | 308 | #define QE_USB_RESTART_TX 0x0000000c |
287 | #define QE_QMC_STOP_TX 0x0000000c | 309 | #define QE_QMC_STOP_TX 0x0000000c |
288 | #define QE_QMC_STOP_RX 0x0000000d | 310 | #define QE_QMC_STOP_RX 0x0000000d |
289 | #define QE_SS7_SU_FIL_RESET 0x0000000e | 311 | #define QE_SS7_SU_FIL_RESET 0x0000000e |
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index edc0cfd7f6e2..bbccadfee0d6 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ | 30 | #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ |
31 | #define MSR_HV_LG 60 /* Hypervisor state */ | 31 | #define MSR_HV_LG 60 /* Hypervisor state */ |
32 | #define MSR_VEC_LG 25 /* Enable AltiVec */ | 32 | #define MSR_VEC_LG 25 /* Enable AltiVec */ |
33 | #define MSR_VSX_LG 23 /* Enable VSX */ | ||
33 | #define MSR_POW_LG 18 /* Enable Power Management */ | 34 | #define MSR_POW_LG 18 /* Enable Power Management */ |
34 | #define MSR_WE_LG 18 /* Wait State Enable */ | 35 | #define MSR_WE_LG 18 /* Wait State Enable */ |
35 | #define MSR_TGPR_LG 17 /* TLB Update registers in use */ | 36 | #define MSR_TGPR_LG 17 /* TLB Update registers in use */ |
@@ -71,6 +72,7 @@ | |||
71 | #endif | 72 | #endif |
72 | 73 | ||
73 | #define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */ | 74 | #define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */ |
75 | #define MSR_VSX __MASK(MSR_VSX_LG) /* Enable VSX */ | ||
74 | #define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */ | 76 | #define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */ |
75 | #define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */ | 77 | #define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */ |
76 | #define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */ | 78 | #define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */ |
@@ -240,7 +242,7 @@ | |||
240 | #define HID0_DAPUEN (1<<8) /* Debug APU enable */ | 242 | #define HID0_DAPUEN (1<<8) /* Debug APU enable */ |
241 | #define HID0_SGE (1<<7) /* Store Gathering Enable */ | 243 | #define HID0_SGE (1<<7) /* Store Gathering Enable */ |
242 | #define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */ | 244 | #define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */ |
243 | #define HID0_DFCA (1<<6) /* Data Cache Flush Assist */ | 245 | #define HID0_DCFA (1<<6) /* Data Cache Flush Assist */ |
244 | #define HID0_LRSTK (1<<4) /* Link register stack - 745x */ | 246 | #define HID0_LRSTK (1<<4) /* Link register stack - 745x */ |
245 | #define HID0_BTIC (1<<5) /* Branch Target Instr Cache Enable */ | 247 | #define HID0_BTIC (1<<5) /* Branch Target Instr Cache Enable */ |
246 | #define HID0_ABE (1<<3) /* Address Broadcast Enable */ | 248 | #define HID0_ABE (1<<3) /* Address Broadcast Enable */ |
@@ -732,6 +734,8 @@ | |||
732 | " .llong %1\n" \ | 734 | " .llong %1\n" \ |
733 | " .llong 97b-98b\n" \ | 735 | " .llong 97b-98b\n" \ |
734 | " .llong 99b-98b\n" \ | 736 | " .llong 99b-98b\n" \ |
737 | " .llong 0\n" \ | ||
738 | " .llong 0\n" \ | ||
735 | ".previous" \ | 739 | ".previous" \ |
736 | : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;}) | 740 | : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;}) |
737 | #else | 741 | #else |
diff --git a/include/asm-powerpc/reg_booke.h b/include/asm-powerpc/reg_booke.h index cf54a3f31753..be980f4ee495 100644 --- a/include/asm-powerpc/reg_booke.h +++ b/include/asm-powerpc/reg_booke.h | |||
@@ -61,6 +61,8 @@ | |||
61 | #define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ | 61 | #define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ |
62 | #define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ | 62 | #define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ |
63 | #define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ | 63 | #define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ |
64 | #define SPRN_L1CFG0 0x203 /* L1 Cache Configure Register 0 */ | ||
65 | #define SPRN_L1CFG1 0x204 /* L1 Cache Configure Register 1 */ | ||
64 | #define SPRN_ATB 0x20E /* Alternate Time Base */ | 66 | #define SPRN_ATB 0x20E /* Alternate Time Base */ |
65 | #define SPRN_ATBL 0x20E /* Alternate Time Base Lower */ | 67 | #define SPRN_ATBL 0x20E /* Alternate Time Base Lower */ |
66 | #define SPRN_ATBU 0x20F /* Alternate Time Base Upper */ | 68 | #define SPRN_ATBU 0x20F /* Alternate Time Base Upper */ |
@@ -78,6 +80,7 @@ | |||
78 | #define SPRN_DSRR1 0x23F /* Debug Save and Restore Register 1 */ | 80 | #define SPRN_DSRR1 0x23F /* Debug Save and Restore Register 1 */ |
79 | #define SPRN_SPRG8 0x25C /* Special Purpose Register General 8 */ | 81 | #define SPRN_SPRG8 0x25C /* Special Purpose Register General 8 */ |
80 | #define SPRN_SPRG9 0x25D /* Special Purpose Register General 9 */ | 82 | #define SPRN_SPRG9 0x25D /* Special Purpose Register General 9 */ |
83 | #define SPRN_L1CSR2 0x25E /* L1 Cache Control and Status Register 2 */ | ||
81 | #define SPRN_MAS0 0x270 /* MMU Assist Register 0 */ | 84 | #define SPRN_MAS0 0x270 /* MMU Assist Register 0 */ |
82 | #define SPRN_MAS1 0x271 /* MMU Assist Register 1 */ | 85 | #define SPRN_MAS1 0x271 /* MMU Assist Register 1 */ |
83 | #define SPRN_MAS2 0x272 /* MMU Assist Register 2 */ | 86 | #define SPRN_MAS2 0x272 /* MMU Assist Register 2 */ |
@@ -108,6 +111,8 @@ | |||
108 | #define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */ | 111 | #define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */ |
109 | #define SPRN_PIT 0x3DB /* Programmable Interval Timer */ | 112 | #define SPRN_PIT 0x3DB /* Programmable Interval Timer */ |
110 | #define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */ | 113 | #define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */ |
114 | #define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */ | ||
115 | #define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */ | ||
111 | #define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */ | 116 | #define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */ |
112 | #define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */ | 117 | #define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */ |
113 | #define SPRN_SVR 0x3FF /* System Version Register */ | 118 | #define SPRN_SVR 0x3FF /* System Version Register */ |
@@ -210,6 +215,7 @@ | |||
210 | #ifdef CONFIG_BOOKE | 215 | #ifdef CONFIG_BOOKE |
211 | #define DBSR_IC 0x08000000 /* Instruction Completion */ | 216 | #define DBSR_IC 0x08000000 /* Instruction Completion */ |
212 | #define DBSR_BT 0x04000000 /* Branch Taken */ | 217 | #define DBSR_BT 0x04000000 /* Branch Taken */ |
218 | #define DBSR_IRPT 0x02000000 /* Exception Debug Event */ | ||
213 | #define DBSR_TIE 0x01000000 /* Trap Instruction Event */ | 219 | #define DBSR_TIE 0x01000000 /* Trap Instruction Event */ |
214 | #define DBSR_IAC1 0x00800000 /* Instr Address Compare 1 Event */ | 220 | #define DBSR_IAC1 0x00800000 /* Instr Address Compare 1 Event */ |
215 | #define DBSR_IAC2 0x00400000 /* Instr Address Compare 2 Event */ | 221 | #define DBSR_IAC2 0x00400000 /* Instr Address Compare 2 Event */ |
@@ -219,10 +225,14 @@ | |||
219 | #define DBSR_DAC1W 0x00040000 /* Data Addr Compare 1 Write Event */ | 225 | #define DBSR_DAC1W 0x00040000 /* Data Addr Compare 1 Write Event */ |
220 | #define DBSR_DAC2R 0x00020000 /* Data Addr Compare 2 Read Event */ | 226 | #define DBSR_DAC2R 0x00020000 /* Data Addr Compare 2 Read Event */ |
221 | #define DBSR_DAC2W 0x00010000 /* Data Addr Compare 2 Write Event */ | 227 | #define DBSR_DAC2W 0x00010000 /* Data Addr Compare 2 Write Event */ |
228 | #define DBSR_RET 0x00008000 /* Return Debug Event */ | ||
229 | #define DBSR_CIRPT 0x00000040 /* Critical Interrupt Taken Event */ | ||
230 | #define DBSR_CRET 0x00000020 /* Critical Return Debug Event */ | ||
222 | #endif | 231 | #endif |
223 | #ifdef CONFIG_40x | 232 | #ifdef CONFIG_40x |
224 | #define DBSR_IC 0x80000000 /* Instruction Completion */ | 233 | #define DBSR_IC 0x80000000 /* Instruction Completion */ |
225 | #define DBSR_BT 0x40000000 /* Branch taken */ | 234 | #define DBSR_BT 0x40000000 /* Branch taken */ |
235 | #define DBSR_IRPT 0x20000000 /* Exception Debug Event */ | ||
226 | #define DBSR_TIE 0x10000000 /* Trap Instruction debug Event */ | 236 | #define DBSR_TIE 0x10000000 /* Trap Instruction debug Event */ |
227 | #define DBSR_IAC1 0x04000000 /* Instruction Address Compare 1 Event */ | 237 | #define DBSR_IAC1 0x04000000 /* Instruction Address Compare 1 Event */ |
228 | #define DBSR_IAC2 0x02000000 /* Instruction Address Compare 2 Event */ | 238 | #define DBSR_IAC2 0x02000000 /* Instruction Address Compare 2 Event */ |
@@ -253,6 +263,7 @@ | |||
253 | #define ESR_BO 0x00020000 /* Byte Ordering */ | 263 | #define ESR_BO 0x00020000 /* Byte Ordering */ |
254 | 264 | ||
255 | /* Bit definitions related to the DBCR0. */ | 265 | /* Bit definitions related to the DBCR0. */ |
266 | #if defined(CONFIG_40x) | ||
256 | #define DBCR0_EDM 0x80000000 /* External Debug Mode */ | 267 | #define DBCR0_EDM 0x80000000 /* External Debug Mode */ |
257 | #define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ | 268 | #define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ |
258 | #define DBCR0_RST 0x30000000 /* all the bits in the RST field */ | 269 | #define DBCR0_RST 0x30000000 /* all the bits in the RST field */ |
@@ -261,20 +272,69 @@ | |||
261 | #define DBCR0_RST_CORE 0x10000000 /* Core Reset */ | 272 | #define DBCR0_RST_CORE 0x10000000 /* Core Reset */ |
262 | #define DBCR0_RST_NONE 0x00000000 /* No Reset */ | 273 | #define DBCR0_RST_NONE 0x00000000 /* No Reset */ |
263 | #define DBCR0_IC 0x08000000 /* Instruction Completion */ | 274 | #define DBCR0_IC 0x08000000 /* Instruction Completion */ |
275 | #define DBCR0_ICMP DBCR0_IC | ||
264 | #define DBCR0_BT 0x04000000 /* Branch Taken */ | 276 | #define DBCR0_BT 0x04000000 /* Branch Taken */ |
277 | #define DBCR0_BRT DBCR0_BT | ||
265 | #define DBCR0_EDE 0x02000000 /* Exception Debug Event */ | 278 | #define DBCR0_EDE 0x02000000 /* Exception Debug Event */ |
279 | #define DBCR0_IRPT DBCR0_EDE | ||
266 | #define DBCR0_TDE 0x01000000 /* TRAP Debug Event */ | 280 | #define DBCR0_TDE 0x01000000 /* TRAP Debug Event */ |
267 | #define DBCR0_IA1 0x00800000 /* Instr Addr compare 1 enable */ | 281 | #define DBCR0_IA1 0x00800000 /* Instr Addr compare 1 enable */ |
282 | #define DBCR0_IAC1 DBCR0_IA1 | ||
268 | #define DBCR0_IA2 0x00400000 /* Instr Addr compare 2 enable */ | 283 | #define DBCR0_IA2 0x00400000 /* Instr Addr compare 2 enable */ |
284 | #define DBCR0_IAC2 DBCR0_IA2 | ||
269 | #define DBCR0_IA12 0x00200000 /* Instr Addr 1-2 range enable */ | 285 | #define DBCR0_IA12 0x00200000 /* Instr Addr 1-2 range enable */ |
270 | #define DBCR0_IA12X 0x00100000 /* Instr Addr 1-2 range eXclusive */ | 286 | #define DBCR0_IA12X 0x00100000 /* Instr Addr 1-2 range eXclusive */ |
271 | #define DBCR0_IA3 0x00080000 /* Instr Addr compare 3 enable */ | 287 | #define DBCR0_IA3 0x00080000 /* Instr Addr compare 3 enable */ |
288 | #define DBCR0_IAC3 DBCR0_IA3 | ||
272 | #define DBCR0_IA4 0x00040000 /* Instr Addr compare 4 enable */ | 289 | #define DBCR0_IA4 0x00040000 /* Instr Addr compare 4 enable */ |
290 | #define DBCR0_IAC4 DBCR0_IA4 | ||
273 | #define DBCR0_IA34 0x00020000 /* Instr Addr 3-4 range Enable */ | 291 | #define DBCR0_IA34 0x00020000 /* Instr Addr 3-4 range Enable */ |
274 | #define DBCR0_IA34X 0x00010000 /* Instr Addr 3-4 range eXclusive */ | 292 | #define DBCR0_IA34X 0x00010000 /* Instr Addr 3-4 range eXclusive */ |
275 | #define DBCR0_IA12T 0x00008000 /* Instr Addr 1-2 range Toggle */ | 293 | #define DBCR0_IA12T 0x00008000 /* Instr Addr 1-2 range Toggle */ |
276 | #define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ | 294 | #define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ |
277 | #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ | 295 | #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ |
296 | #elif defined(CONFIG_BOOKE) | ||
297 | #define DBCR0_EDM 0x80000000 /* External Debug Mode */ | ||
298 | #define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ | ||
299 | #define DBCR0_RST 0x30000000 /* all the bits in the RST field */ | ||
300 | /* DBCR0_RST_* is 44x specific and not followed in fsl booke */ | ||
301 | #define DBCR0_RST_SYSTEM 0x30000000 /* System Reset */ | ||
302 | #define DBCR0_RST_CHIP 0x20000000 /* Chip Reset */ | ||
303 | #define DBCR0_RST_CORE 0x10000000 /* Core Reset */ | ||
304 | #define DBCR0_RST_NONE 0x00000000 /* No Reset */ | ||
305 | #define DBCR0_ICMP 0x08000000 /* Instruction Completion */ | ||
306 | #define DBCR0_IC DBCR0_ICMP | ||
307 | #define DBCR0_BRT 0x04000000 /* Branch Taken */ | ||
308 | #define DBCR0_BT DBCR0_BRT | ||
309 | #define DBCR0_IRPT 0x02000000 /* Exception Debug Event */ | ||
310 | #define DBCR0_TDE 0x01000000 /* TRAP Debug Event */ | ||
311 | #define DBCR0_TIE DBCR0_TDE | ||
312 | #define DBCR0_IAC1 0x00800000 /* Instr Addr compare 1 enable */ | ||
313 | #define DBCR0_IAC2 0x00400000 /* Instr Addr compare 2 enable */ | ||
314 | #define DBCR0_IAC3 0x00200000 /* Instr Addr compare 3 enable */ | ||
315 | #define DBCR0_IAC4 0x00100000 /* Instr Addr compare 4 enable */ | ||
316 | #define DBCR0_DAC1R 0x00080000 /* DAC 1 Read enable */ | ||
317 | #define DBCR0_DAC1W 0x00040000 /* DAC 1 Write enable */ | ||
318 | #define DBCR0_DAC2R 0x00020000 /* DAC 2 Read enable */ | ||
319 | #define DBCR0_DAC2W 0x00010000 /* DAC 2 Write enable */ | ||
320 | #define DBCR0_RET 0x00008000 /* Return Debug Event */ | ||
321 | #define DBCR0_CIRPT 0x00000040 /* Critical Interrupt Taken Event */ | ||
322 | #define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */ | ||
323 | #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ | ||
324 | |||
325 | /* Bit definitions related to the DBCR1. */ | ||
326 | #define DBCR1_IAC12M 0x00800000 /* Instr Addr 1-2 range enable */ | ||
327 | #define DBCR1_IAC12MX 0x00C00000 /* Instr Addr 1-2 range eXclusive */ | ||
328 | #define DBCR1_IAC12AT 0x00010000 /* Instr Addr 1-2 range Toggle */ | ||
329 | #define DBCR1_IAC34M 0x00000080 /* Instr Addr 3-4 range enable */ | ||
330 | #define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */ | ||
331 | #define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */ | ||
332 | |||
333 | /* Bit definitions related to the DBCR2. */ | ||
334 | #define DBCR2_DAC12M 0x00800000 /* DAC 1-2 range enable */ | ||
335 | #define DBCR2_DAC12MX 0x00C00000 /* DAC 1-2 range eXclusive */ | ||
336 | #define DBCR2_DAC12A 0x00200000 /* DAC 1-2 Asynchronous */ | ||
337 | #endif | ||
278 | 338 | ||
279 | /* Bit definitions related to the TCR. */ | 339 | /* Bit definitions related to the TCR. */ |
280 | #define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */ | 340 | #define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */ |
@@ -336,6 +396,20 @@ | |||
336 | #define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */ | 396 | #define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */ |
337 | #define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */ | 397 | #define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */ |
338 | 398 | ||
399 | /* Bit definitions for L2CSR0. */ | ||
400 | #define L2CSR0_L2E 0x80000000 /* L2 Cache Enable */ | ||
401 | #define L2CSR0_L2PE 0x40000000 /* L2 Cache Parity/ECC Enable */ | ||
402 | #define L2CSR0_L2WP 0x1c000000 /* L2 I/D Way Partioning */ | ||
403 | #define L2CSR0_L2CM 0x03000000 /* L2 Cache Coherency Mode */ | ||
404 | #define L2CSR0_L2FI 0x00200000 /* L2 Cache Flash Invalidate */ | ||
405 | #define L2CSR0_L2IO 0x00100000 /* L2 Cache Instruction Only */ | ||
406 | #define L2CSR0_L2DO 0x00010000 /* L2 Cache Data Only */ | ||
407 | #define L2CSR0_L2REP 0x00003000 /* L2 Line Replacement Algo */ | ||
408 | #define L2CSR0_L2FL 0x00000800 /* L2 Cache Flush */ | ||
409 | #define L2CSR0_L2LFC 0x00000400 /* L2 Cache Lock Flash Clear */ | ||
410 | #define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */ | ||
411 | #define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */ | ||
412 | |||
339 | /* Bit definitions for SGR. */ | 413 | /* Bit definitions for SGR. */ |
340 | #define SGR_NORMAL 0 /* Speculative fetching allowed. */ | 414 | #define SGR_NORMAL 0 /* Speculative fetching allowed. */ |
341 | #define SGR_GUARDED 1 /* Speculative fetching disallowed. */ | 415 | #define SGR_GUARDED 1 /* Speculative fetching disallowed. */ |
diff --git a/include/asm-powerpc/sigcontext.h b/include/asm-powerpc/sigcontext.h index 165d630e1cf3..9c1f24fd5d11 100644 --- a/include/asm-powerpc/sigcontext.h +++ b/include/asm-powerpc/sigcontext.h | |||
@@ -43,9 +43,44 @@ struct sigcontext { | |||
43 | * it must be copied via a vector register to/from storage) or as a word. | 43 | * it must be copied via a vector register to/from storage) or as a word. |
44 | * The entry with index 33 contains the vrsave as the first word (offset 0) | 44 | * The entry with index 33 contains the vrsave as the first word (offset 0) |
45 | * within the quadword. | 45 | * within the quadword. |
46 | * | ||
47 | * Part of the VSX data is stored here also by extending vmx_restore | ||
48 | * by an additional 32 double words. Architecturally the layout of | ||
49 | * the VSR registers and how they overlap on top of the legacy FPR and | ||
50 | * VR registers is shown below: | ||
51 | * | ||
52 | * VSR doubleword 0 VSR doubleword 1 | ||
53 | * ---------------------------------------------------------------- | ||
54 | * VSR[0] | FPR[0] | | | ||
55 | * ---------------------------------------------------------------- | ||
56 | * VSR[1] | FPR[1] | | | ||
57 | * ---------------------------------------------------------------- | ||
58 | * | ... | | | ||
59 | * | ... | | | ||
60 | * ---------------------------------------------------------------- | ||
61 | * VSR[30] | FPR[30] | | | ||
62 | * ---------------------------------------------------------------- | ||
63 | * VSR[31] | FPR[31] | | | ||
64 | * ---------------------------------------------------------------- | ||
65 | * VSR[32] | VR[0] | | ||
66 | * ---------------------------------------------------------------- | ||
67 | * VSR[33] | VR[1] | | ||
68 | * ---------------------------------------------------------------- | ||
69 | * | ... | | ||
70 | * | ... | | ||
71 | * ---------------------------------------------------------------- | ||
72 | * VSR[62] | VR[30] | | ||
73 | * ---------------------------------------------------------------- | ||
74 | * VSR[63] | VR[31] | | ||
75 | * ---------------------------------------------------------------- | ||
76 | * | ||
77 | * FPR/VSR 0-31 doubleword 0 is stored in fp_regs, and VMX/VSR 32-63 | ||
78 | * is stored at the start of vmx_reserve. vmx_reserve is extended for | ||
79 | * backwards compatility to store VSR 0-31 doubleword 1 after the VMX | ||
80 | * registers and vscr/vrsave. | ||
46 | */ | 81 | */ |
47 | elf_vrreg_t __user *v_regs; | 82 | elf_vrreg_t __user *v_regs; |
48 | long vmx_reserve[ELF_NVRREG+ELF_NVRREG+1]; | 83 | long vmx_reserve[ELF_NVRREG+ELF_NVRREG+32+1]; |
49 | #endif | 84 | #endif |
50 | }; | 85 | }; |
51 | 86 | ||
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h index 505f35bacaa9..1cd43e3d94fb 100644 --- a/include/asm-powerpc/smp.h +++ b/include/asm-powerpc/smp.h | |||
@@ -37,6 +37,8 @@ extern void cpu_die(void); | |||
37 | extern void smp_send_debugger_break(int cpu); | 37 | extern void smp_send_debugger_break(int cpu); |
38 | extern void smp_message_recv(int); | 38 | extern void smp_message_recv(int); |
39 | 39 | ||
40 | DECLARE_PER_CPU(unsigned int, pvr); | ||
41 | |||
40 | #ifdef CONFIG_HOTPLUG_CPU | 42 | #ifdef CONFIG_HOTPLUG_CPU |
41 | extern void fixup_irqs(cpumask_t map); | 43 | extern void fixup_irqs(cpumask_t map); |
42 | int generic_cpu_disable(void); | 44 | int generic_cpu_disable(void); |
diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h index 9aea8e9f0bd1..54a47ea2c3aa 100644 --- a/include/asm-powerpc/sparsemem.h +++ b/include/asm-powerpc/sparsemem.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #define MAX_PHYSADDR_BITS 44 | 13 | #define MAX_PHYSADDR_BITS 44 |
14 | #define MAX_PHYSMEM_BITS 44 | 14 | #define MAX_PHYSMEM_BITS 44 |
15 | 15 | ||
16 | #endif /* CONFIG_SPARSEMEM */ | ||
17 | |||
16 | #ifdef CONFIG_MEMORY_HOTPLUG | 18 | #ifdef CONFIG_MEMORY_HOTPLUG |
17 | extern void create_section_mapping(unsigned long start, unsigned long end); | 19 | extern void create_section_mapping(unsigned long start, unsigned long end); |
18 | extern int remove_section_mapping(unsigned long start, unsigned long end); | 20 | extern int remove_section_mapping(unsigned long start, unsigned long end); |
@@ -26,7 +28,5 @@ static inline int hot_add_scn_to_nid(unsigned long scn_addr) | |||
26 | #endif /* CONFIG_NUMA */ | 28 | #endif /* CONFIG_NUMA */ |
27 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 29 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
28 | 30 | ||
29 | #endif /* CONFIG_SPARSEMEM */ | ||
30 | |||
31 | #endif /* __KERNEL__ */ | 31 | #endif /* __KERNEL__ */ |
32 | #endif /* _ASM_POWERPC_SPARSEMEM_H */ | 32 | #endif /* _ASM_POWERPC_SPARSEMEM_H */ |
diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h index 258c93993190..f56a843f4705 100644 --- a/include/asm-powerpc/spinlock.h +++ b/include/asm-powerpc/spinlock.h | |||
@@ -54,7 +54,7 @@ | |||
54 | * This returns the old value in the lock, so we succeeded | 54 | * This returns the old value in the lock, so we succeeded |
55 | * in getting the lock if the return value is 0. | 55 | * in getting the lock if the return value is 0. |
56 | */ | 56 | */ |
57 | static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) | 57 | static inline unsigned long __spin_trylock(raw_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp, token; | 59 | unsigned long tmp, token; |
60 | 60 | ||
@@ -73,7 +73,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) | |||
73 | return tmp; | 73 | return tmp; |
74 | } | 74 | } |
75 | 75 | ||
76 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) | 76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
77 | { | 77 | { |
78 | CLEAR_IO_SYNC; | 78 | CLEAR_IO_SYNC; |
79 | return __spin_trylock(lock) == 0; | 79 | return __spin_trylock(lock) == 0; |
@@ -104,7 +104,7 @@ extern void __rw_yield(raw_rwlock_t *lock); | |||
104 | #define SHARED_PROCESSOR 0 | 104 | #define SHARED_PROCESSOR 0 |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) | 107 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
108 | { | 108 | { |
109 | CLEAR_IO_SYNC; | 109 | CLEAR_IO_SYNC; |
110 | while (1) { | 110 | while (1) { |
@@ -119,7 +119,8 @@ static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) | |||
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 122 | static inline |
123 | void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
123 | { | 124 | { |
124 | unsigned long flags_dis; | 125 | unsigned long flags_dis; |
125 | 126 | ||
@@ -139,7 +140,7 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long | |||
139 | } | 140 | } |
140 | } | 141 | } |
141 | 142 | ||
142 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | 143 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
143 | { | 144 | { |
144 | SYNC_IO; | 145 | SYNC_IO; |
145 | __asm__ __volatile__("# __raw_spin_unlock\n\t" | 146 | __asm__ __volatile__("# __raw_spin_unlock\n\t" |
@@ -180,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | |||
180 | * This returns the old value in the lock + 1, | 181 | * This returns the old value in the lock + 1, |
181 | * so we got a read lock if the return value is > 0. | 182 | * so we got a read lock if the return value is > 0. |
182 | */ | 183 | */ |
183 | static long __inline__ __read_trylock(raw_rwlock_t *rw) | 184 | static inline long __read_trylock(raw_rwlock_t *rw) |
184 | { | 185 | { |
185 | long tmp; | 186 | long tmp; |
186 | 187 | ||
@@ -204,7 +205,7 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw) | |||
204 | * This returns the old value in the lock, | 205 | * This returns the old value in the lock, |
205 | * so we got the write lock if the return value is 0. | 206 | * so we got the write lock if the return value is 0. |
206 | */ | 207 | */ |
207 | static __inline__ long __write_trylock(raw_rwlock_t *rw) | 208 | static inline long __write_trylock(raw_rwlock_t *rw) |
208 | { | 209 | { |
209 | long tmp, token; | 210 | long tmp, token; |
210 | 211 | ||
@@ -224,7 +225,7 @@ static __inline__ long __write_trylock(raw_rwlock_t *rw) | |||
224 | return tmp; | 225 | return tmp; |
225 | } | 226 | } |
226 | 227 | ||
227 | static void __inline__ __raw_read_lock(raw_rwlock_t *rw) | 228 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
228 | { | 229 | { |
229 | while (1) { | 230 | while (1) { |
230 | if (likely(__read_trylock(rw) > 0)) | 231 | if (likely(__read_trylock(rw) > 0)) |
@@ -238,7 +239,7 @@ static void __inline__ __raw_read_lock(raw_rwlock_t *rw) | |||
238 | } | 239 | } |
239 | } | 240 | } |
240 | 241 | ||
241 | static void __inline__ __raw_write_lock(raw_rwlock_t *rw) | 242 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
242 | { | 243 | { |
243 | while (1) { | 244 | while (1) { |
244 | if (likely(__write_trylock(rw) == 0)) | 245 | if (likely(__write_trylock(rw) == 0)) |
@@ -252,17 +253,17 @@ static void __inline__ __raw_write_lock(raw_rwlock_t *rw) | |||
252 | } | 253 | } |
253 | } | 254 | } |
254 | 255 | ||
255 | static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) | 256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
256 | { | 257 | { |
257 | return __read_trylock(rw) > 0; | 258 | return __read_trylock(rw) > 0; |
258 | } | 259 | } |
259 | 260 | ||
260 | static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) | 261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
261 | { | 262 | { |
262 | return __write_trylock(rw) == 0; | 263 | return __write_trylock(rw) == 0; |
263 | } | 264 | } |
264 | 265 | ||
265 | static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | 266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
266 | { | 267 | { |
267 | long tmp; | 268 | long tmp; |
268 | 269 | ||
@@ -279,7 +280,7 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | |||
279 | : "cr0", "memory"); | 280 | : "cr0", "memory"); |
280 | } | 281 | } |
281 | 282 | ||
282 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | 283 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
283 | { | 284 | { |
284 | __asm__ __volatile__("# write_unlock\n\t" | 285 | __asm__ __volatile__("# write_unlock\n\t" |
285 | LWSYNC_ON_SMP: : :"memory"); | 286 | LWSYNC_ON_SMP: : :"memory"); |
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h index 99348c1f4cab..8b2eb044270a 100644 --- a/include/asm-powerpc/spu.h +++ b/include/asm-powerpc/spu.h | |||
@@ -191,6 +191,7 @@ struct cbe_spu_info { | |||
191 | struct list_head spus; | 191 | struct list_head spus; |
192 | int n_spus; | 192 | int n_spus; |
193 | int nr_active; | 193 | int nr_active; |
194 | atomic_t busy_spus; | ||
194 | atomic_t reserved_spus; | 195 | atomic_t reserved_spus; |
195 | }; | 196 | }; |
196 | 197 | ||
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h index 2cda3c38a9fa..45963e80f557 100644 --- a/include/asm-powerpc/synch.h +++ b/include/asm-powerpc/synch.h | |||
@@ -3,34 +3,42 @@ | |||
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #include <linux/stringify.h> | 5 | #include <linux/stringify.h> |
6 | #include <asm/feature-fixups.h> | ||
6 | 7 | ||
7 | #ifdef __powerpc64__ | 8 | #ifndef __ASSEMBLY__ |
8 | #define __SUBARCH_HAS_LWSYNC | 9 | extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; |
9 | #endif | 10 | extern void do_lwsync_fixups(unsigned long value, void *fixup_start, |
11 | void *fixup_end); | ||
12 | |||
13 | static inline void eieio(void) | ||
14 | { | ||
15 | __asm__ __volatile__ ("eieio" : : : "memory"); | ||
16 | } | ||
17 | |||
18 | static inline void isync(void) | ||
19 | { | ||
20 | __asm__ __volatile__ ("isync" : : : "memory"); | ||
21 | } | ||
22 | #endif /* __ASSEMBLY__ */ | ||
10 | 23 | ||
11 | #ifdef __SUBARCH_HAS_LWSYNC | 24 | #if defined(__powerpc64__) |
12 | # define LWSYNC lwsync | 25 | # define LWSYNC lwsync |
26 | #elif defined(CONFIG_E500) | ||
27 | # define LWSYNC \ | ||
28 | START_LWSYNC_SECTION(96); \ | ||
29 | sync; \ | ||
30 | MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup); | ||
13 | #else | 31 | #else |
14 | # define LWSYNC sync | 32 | # define LWSYNC sync |
15 | #endif | 33 | #endif |
16 | 34 | ||
17 | #ifdef CONFIG_SMP | 35 | #ifdef CONFIG_SMP |
18 | #define ISYNC_ON_SMP "\n\tisync\n" | 36 | #define ISYNC_ON_SMP "\n\tisync\n" |
19 | #define LWSYNC_ON_SMP __stringify(LWSYNC) "\n" | 37 | #define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n" |
20 | #else | 38 | #else |
21 | #define ISYNC_ON_SMP | 39 | #define ISYNC_ON_SMP |
22 | #define LWSYNC_ON_SMP | 40 | #define LWSYNC_ON_SMP |
23 | #endif | 41 | #endif |
24 | 42 | ||
25 | static inline void eieio(void) | ||
26 | { | ||
27 | __asm__ __volatile__ ("eieio" : : : "memory"); | ||
28 | } | ||
29 | |||
30 | static inline void isync(void) | ||
31 | { | ||
32 | __asm__ __volatile__ ("isync" : : : "memory"); | ||
33 | } | ||
34 | |||
35 | #endif /* __KERNEL__ */ | 43 | #endif /* __KERNEL__ */ |
36 | #endif /* _ASM_POWERPC_SYNCH_H */ | 44 | #endif /* _ASM_POWERPC_SYNCH_H */ |
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 5235f875b932..e6e25e2364eb 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h | |||
@@ -30,8 +30,8 @@ | |||
30 | * | 30 | * |
31 | * For wmb(), we use sync since wmb is used in drivers to order | 31 | * For wmb(), we use sync since wmb is used in drivers to order |
32 | * stores to system memory with respect to writes to the device. | 32 | * stores to system memory with respect to writes to the device. |
33 | * However, smp_wmb() can be a lighter-weight eieio barrier on | 33 | * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier |
34 | * SMP since it is only used to order updates to system memory. | 34 | * on SMP since it is only used to order updates to system memory. |
35 | */ | 35 | */ |
36 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | 36 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") |
37 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") | 37 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") |
@@ -43,9 +43,16 @@ | |||
43 | #ifdef __KERNEL__ | 43 | #ifdef __KERNEL__ |
44 | #define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */ | 44 | #define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */ |
45 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
46 | |||
47 | #ifdef __SUBARCH_HAS_LWSYNC | ||
48 | # define SMPWMB lwsync | ||
49 | #else | ||
50 | # define SMPWMB eieio | ||
51 | #endif | ||
52 | |||
46 | #define smp_mb() mb() | 53 | #define smp_mb() mb() |
47 | #define smp_rmb() rmb() | 54 | #define smp_rmb() rmb() |
48 | #define smp_wmb() eieio() | 55 | #define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory") |
49 | #define smp_read_barrier_depends() read_barrier_depends() | 56 | #define smp_read_barrier_depends() read_barrier_depends() |
50 | #else | 57 | #else |
51 | #define smp_mb() barrier() | 58 | #define smp_mb() barrier() |
@@ -132,6 +139,8 @@ extern void enable_kernel_altivec(void); | |||
132 | extern void giveup_altivec(struct task_struct *); | 139 | extern void giveup_altivec(struct task_struct *); |
133 | extern void load_up_altivec(struct task_struct *); | 140 | extern void load_up_altivec(struct task_struct *); |
134 | extern int emulate_altivec(struct pt_regs *); | 141 | extern int emulate_altivec(struct pt_regs *); |
142 | extern void __giveup_vsx(struct task_struct *); | ||
143 | extern void giveup_vsx(struct task_struct *); | ||
135 | extern void enable_kernel_spe(void); | 144 | extern void enable_kernel_spe(void); |
136 | extern void giveup_spe(struct task_struct *); | 145 | extern void giveup_spe(struct task_struct *); |
137 | extern void load_up_spe(struct task_struct *); | 146 | extern void load_up_spe(struct task_struct *); |
@@ -155,6 +164,14 @@ static inline void flush_altivec_to_thread(struct task_struct *t) | |||
155 | } | 164 | } |
156 | #endif | 165 | #endif |
157 | 166 | ||
167 | #ifdef CONFIG_VSX | ||
168 | extern void flush_vsx_to_thread(struct task_struct *); | ||
169 | #else | ||
170 | static inline void flush_vsx_to_thread(struct task_struct *t) | ||
171 | { | ||
172 | } | ||
173 | #endif | ||
174 | |||
158 | #ifdef CONFIG_SPE | 175 | #ifdef CONFIG_SPE |
159 | extern void flush_spe_to_thread(struct task_struct *); | 176 | extern void flush_spe_to_thread(struct task_struct *); |
160 | #else | 177 | #else |
@@ -190,6 +207,7 @@ extern struct task_struct *_switch(struct thread_struct *prev, | |||
190 | 207 | ||
191 | extern unsigned int rtas_data; | 208 | extern unsigned int rtas_data; |
192 | extern int mem_init_done; /* set on boot once kmalloc can be called */ | 209 | extern int mem_init_done; /* set on boot once kmalloc can be called */ |
210 | extern int init_bootmem_done; /* set on !NUMA once bootmem is available */ | ||
193 | extern unsigned long memory_limit; | 211 | extern unsigned long memory_limit; |
194 | extern unsigned long klimit; | 212 | extern unsigned long klimit; |
195 | 213 | ||
@@ -518,54 +536,6 @@ extern void reloc_got2(unsigned long); | |||
518 | 536 | ||
519 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) | 537 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) |
520 | 538 | ||
521 | static inline void create_instruction(unsigned long addr, unsigned int instr) | ||
522 | { | ||
523 | unsigned int *p; | ||
524 | p = (unsigned int *)addr; | ||
525 | *p = instr; | ||
526 | asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p)); | ||
527 | } | ||
528 | |||
529 | /* Flags for create_branch: | ||
530 | * "b" == create_branch(addr, target, 0); | ||
531 | * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); | ||
532 | * "bl" == create_branch(addr, target, BRANCH_SET_LINK); | ||
533 | * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); | ||
534 | */ | ||
535 | #define BRANCH_SET_LINK 0x1 | ||
536 | #define BRANCH_ABSOLUTE 0x2 | ||
537 | |||
538 | static inline void create_branch(unsigned long addr, | ||
539 | unsigned long target, int flags) | ||
540 | { | ||
541 | unsigned int instruction; | ||
542 | |||
543 | if (! (flags & BRANCH_ABSOLUTE)) | ||
544 | target = target - addr; | ||
545 | |||
546 | /* Mask out the flags and target, so they don't step on each other. */ | ||
547 | instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC); | ||
548 | |||
549 | create_instruction(addr, instruction); | ||
550 | } | ||
551 | |||
552 | static inline void create_function_call(unsigned long addr, void * func) | ||
553 | { | ||
554 | unsigned long func_addr; | ||
555 | |||
556 | #ifdef CONFIG_PPC64 | ||
557 | /* | ||
558 | * On PPC64 the function pointer actually points to the function's | ||
559 | * descriptor. The first entry in the descriptor is the address | ||
560 | * of the function text. | ||
561 | */ | ||
562 | func_addr = *(unsigned long *)func; | ||
563 | #else | ||
564 | func_addr = (unsigned long)func; | ||
565 | #endif | ||
566 | create_branch(addr, func_addr, BRANCH_SET_LINK); | ||
567 | } | ||
568 | |||
569 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 539 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
570 | extern void account_system_vtime(struct task_struct *); | 540 | extern void account_system_vtime(struct task_struct *); |
571 | #endif | 541 | #endif |
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h index d030f5ce39ad..b705c2a7651a 100644 --- a/include/asm-powerpc/thread_info.h +++ b/include/asm-powerpc/thread_info.h | |||
@@ -116,7 +116,6 @@ static inline struct thread_info *current_thread_info(void) | |||
116 | #define TIF_SECCOMP 10 /* secure computing */ | 116 | #define TIF_SECCOMP 10 /* secure computing */ |
117 | #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ | 117 | #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ |
118 | #define TIF_NOERROR 12 /* Force successful syscall return */ | 118 | #define TIF_NOERROR 12 /* Force successful syscall return */ |
119 | #define TIF_RESTORE_SIGMASK 13 /* Restore signal mask in do_signal */ | ||
120 | #define TIF_FREEZE 14 /* Freezing for suspend */ | 119 | #define TIF_FREEZE 14 /* Freezing for suspend */ |
121 | #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ | 120 | #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ |
122 | #define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */ | 121 | #define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */ |
@@ -134,21 +133,33 @@ static inline struct thread_info *current_thread_info(void) | |||
134 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 133 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
135 | #define _TIF_RESTOREALL (1<<TIF_RESTOREALL) | 134 | #define _TIF_RESTOREALL (1<<TIF_RESTOREALL) |
136 | #define _TIF_NOERROR (1<<TIF_NOERROR) | 135 | #define _TIF_NOERROR (1<<TIF_NOERROR) |
137 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | ||
138 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 136 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
139 | #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) | 137 | #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) |
140 | #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) | 138 | #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) |
141 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) | 139 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) |
142 | 140 | ||
143 | #define _TIF_USER_WORK_MASK ( _TIF_SIGPENDING | \ | 141 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED) |
144 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) | ||
145 | #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) | 142 | #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) |
146 | 143 | ||
147 | /* Bits in local_flags */ | 144 | /* Bits in local_flags */ |
148 | /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ | 145 | /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ |
149 | #define TLF_NAPPING 0 /* idle thread enabled NAP mode */ | 146 | #define TLF_NAPPING 0 /* idle thread enabled NAP mode */ |
147 | #define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ | ||
148 | #define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */ | ||
150 | 149 | ||
151 | #define _TLF_NAPPING (1 << TLF_NAPPING) | 150 | #define _TLF_NAPPING (1 << TLF_NAPPING) |
151 | #define _TLF_SLEEPING (1 << TLF_SLEEPING) | ||
152 | #define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK) | ||
153 | |||
154 | #ifndef __ASSEMBLY__ | ||
155 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
156 | static inline void set_restore_sigmask(void) | ||
157 | { | ||
158 | struct thread_info *ti = current_thread_info(); | ||
159 | ti->local_flags |= _TLF_RESTORE_SIGMASK; | ||
160 | set_bit(TIF_SIGPENDING, &ti->flags); | ||
161 | } | ||
162 | #endif /* !__ASSEMBLY__ */ | ||
152 | 163 | ||
153 | #endif /* __KERNEL__ */ | 164 | #endif /* __KERNEL__ */ |
154 | 165 | ||
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h index ce5de6e0e690..febd581ec9b0 100644 --- a/include/asm-powerpc/time.h +++ b/include/asm-powerpc/time.h | |||
@@ -33,6 +33,7 @@ extern unsigned tb_to_us; | |||
33 | 33 | ||
34 | struct rtc_time; | 34 | struct rtc_time; |
35 | extern void to_tm(int tim, struct rtc_time * tm); | 35 | extern void to_tm(int tim, struct rtc_time * tm); |
36 | extern void GregorianDay(struct rtc_time *tm); | ||
36 | extern time_t last_rtc_update; | 37 | extern time_t last_rtc_update; |
37 | 38 | ||
38 | extern void generic_calibrate_decr(void); | 39 | extern void generic_calibrate_decr(void); |
diff --git a/include/asm-powerpc/timex.h b/include/asm-powerpc/timex.h index 92dedde761d1..c55e14f7ef44 100644 --- a/include/asm-powerpc/timex.h +++ b/include/asm-powerpc/timex.h | |||
@@ -38,6 +38,8 @@ static inline cycles_t get_cycles(void) | |||
38 | " .long 0\n" | 38 | " .long 0\n" |
39 | " .long 97b-98b\n" | 39 | " .long 97b-98b\n" |
40 | " .long 99b-98b\n" | 40 | " .long 99b-98b\n" |
41 | " .long 0\n" | ||
42 | " .long 0\n" | ||
41 | ".previous" | 43 | ".previous" |
42 | : "=r" (ret) : "i" (CPU_FTR_601)); | 44 | : "=r" (ret) : "i" (CPU_FTR_601)); |
43 | return ret; | 45 | return ret; |
diff --git a/include/asm-powerpc/xmon.h b/include/asm-powerpc/xmon.h index 88320a05f0a8..5eb8e599e5cc 100644 --- a/include/asm-powerpc/xmon.h +++ b/include/asm-powerpc/xmon.h | |||
@@ -12,13 +12,22 @@ | |||
12 | 12 | ||
13 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
14 | 14 | ||
15 | #include <linux/irqreturn.h> | ||
16 | |||
15 | #ifdef CONFIG_XMON | 17 | #ifdef CONFIG_XMON |
16 | extern void xmon_setup(void); | 18 | extern void xmon_setup(void); |
17 | extern void xmon_register_spus(struct list_head *list); | 19 | extern void xmon_register_spus(struct list_head *list); |
20 | struct pt_regs; | ||
21 | extern int xmon(struct pt_regs *excp); | ||
22 | extern irqreturn_t xmon_irq(int, void *); | ||
18 | #else | 23 | #else |
19 | static inline void xmon_setup(void) { }; | 24 | static inline void xmon_setup(void) { }; |
20 | static inline void xmon_register_spus(struct list_head *list) { }; | 25 | static inline void xmon_register_spus(struct list_head *list) { }; |
21 | #endif | 26 | #endif |
22 | 27 | ||
28 | #if defined(CONFIG_XMON) && defined(CONFIG_SMP) | ||
29 | extern int cpus_are_in_xmon(void); | ||
30 | #endif | ||
31 | |||
23 | #endif /* __KERNEL __ */ | 32 | #endif /* __KERNEL __ */ |
24 | #endif /* __ASM_POWERPC_XMON_H */ | 33 | #endif /* __ASM_POWERPC_XMON_H */ |