diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-14 00:24:55 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-14 00:24:55 -0500 |
commit | 87530db5ec7d519c7ba334e414307c5130ae2da8 (patch) | |
tree | a1720bb29998d8d658e91ed106263561a8101bf0 /include | |
parent | a148058c5cf72ad23fb6c4cf9828f7276e13151c (diff) | |
parent | d5f079000b20887bfbc84d772a27709c5665e652 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-powerpc/atomic.h | 46 | ||||
-rw-r--r-- | include/asm-powerpc/bitops.h | 6 | ||||
-rw-r--r-- | include/asm-powerpc/cputable.h | 14 | ||||
-rw-r--r-- | include/asm-powerpc/elf.h | 16 | ||||
-rw-r--r-- | include/asm-powerpc/futex.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/hvcall.h | 5 | ||||
-rw-r--r-- | include/asm-powerpc/lppaca.h | 6 | ||||
-rw-r--r-- | include/asm-powerpc/paca.h | 14 | ||||
-rw-r--r-- | include/asm-powerpc/ppc_asm.h | 76 | ||||
-rw-r--r-- | include/asm-powerpc/prom.h | 8 | ||||
-rw-r--r-- | include/asm-powerpc/spinlock.h | 21 | ||||
-rw-r--r-- | include/asm-powerpc/synch.h | 23 | ||||
-rw-r--r-- | include/asm-powerpc/system.h | 8 | ||||
-rw-r--r-- | include/asm-powerpc/time.h | 5 | ||||
-rw-r--r-- | include/linux/proc_fs.h | 5 |
15 files changed, 128 insertions, 127 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index 248f9aec959c..147a38dcc766 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h | |||
@@ -36,7 +36,7 @@ static __inline__ int atomic_add_return(int a, atomic_t *v) | |||
36 | int t; | 36 | int t; |
37 | 37 | ||
38 | __asm__ __volatile__( | 38 | __asm__ __volatile__( |
39 | EIEIO_ON_SMP | 39 | LWSYNC_ON_SMP |
40 | "1: lwarx %0,0,%2 # atomic_add_return\n\ | 40 | "1: lwarx %0,0,%2 # atomic_add_return\n\ |
41 | add %0,%1,%0\n" | 41 | add %0,%1,%0\n" |
42 | PPC405_ERR77(0,%2) | 42 | PPC405_ERR77(0,%2) |
@@ -72,7 +72,7 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v) | |||
72 | int t; | 72 | int t; |
73 | 73 | ||
74 | __asm__ __volatile__( | 74 | __asm__ __volatile__( |
75 | EIEIO_ON_SMP | 75 | LWSYNC_ON_SMP |
76 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ | 76 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ |
77 | subf %0,%1,%0\n" | 77 | subf %0,%1,%0\n" |
78 | PPC405_ERR77(0,%2) | 78 | PPC405_ERR77(0,%2) |
@@ -106,7 +106,7 @@ static __inline__ int atomic_inc_return(atomic_t *v) | |||
106 | int t; | 106 | int t; |
107 | 107 | ||
108 | __asm__ __volatile__( | 108 | __asm__ __volatile__( |
109 | EIEIO_ON_SMP | 109 | LWSYNC_ON_SMP |
110 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ | 110 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ |
111 | addic %0,%0,1\n" | 111 | addic %0,%0,1\n" |
112 | PPC405_ERR77(0,%1) | 112 | PPC405_ERR77(0,%1) |
@@ -150,7 +150,7 @@ static __inline__ int atomic_dec_return(atomic_t *v) | |||
150 | int t; | 150 | int t; |
151 | 151 | ||
152 | __asm__ __volatile__( | 152 | __asm__ __volatile__( |
153 | EIEIO_ON_SMP | 153 | LWSYNC_ON_SMP |
154 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ | 154 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ |
155 | addic %0,%0,-1\n" | 155 | addic %0,%0,-1\n" |
156 | PPC405_ERR77(0,%1) | 156 | PPC405_ERR77(0,%1) |
@@ -176,19 +176,19 @@ static __inline__ int atomic_dec_return(atomic_t *v) | |||
176 | * Atomically adds @a to @v, so long as it was not @u. | 176 | * Atomically adds @a to @v, so long as it was not @u. |
177 | * Returns non-zero if @v was not @u, and zero otherwise. | 177 | * Returns non-zero if @v was not @u, and zero otherwise. |
178 | */ | 178 | */ |
179 | #define atomic_add_unless(v, a, u) \ | 179 | #define atomic_add_unless(v, a, u) \ |
180 | ({ \ | 180 | ({ \ |
181 | int c, old; \ | 181 | int c, old; \ |
182 | c = atomic_read(v); \ | 182 | c = atomic_read(v); \ |
183 | for (;;) { \ | 183 | for (;;) { \ |
184 | if (unlikely(c == (u))) \ | 184 | if (unlikely(c == (u))) \ |
185 | break; \ | 185 | break; \ |
186 | old = atomic_cmpxchg((v), c, c + (a)); \ | 186 | old = atomic_cmpxchg((v), c, c + (a)); \ |
187 | if (likely(old == c)) \ | 187 | if (likely(old == c)) \ |
188 | break; \ | 188 | break; \ |
189 | c = old; \ | 189 | c = old; \ |
190 | } \ | 190 | } \ |
191 | c != (u); \ | 191 | c != (u); \ |
192 | }) | 192 | }) |
193 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 193 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
194 | 194 | ||
@@ -204,7 +204,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) | |||
204 | int t; | 204 | int t; |
205 | 205 | ||
206 | __asm__ __volatile__( | 206 | __asm__ __volatile__( |
207 | EIEIO_ON_SMP | 207 | LWSYNC_ON_SMP |
208 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ | 208 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ |
209 | addic. %0,%0,-1\n\ | 209 | addic. %0,%0,-1\n\ |
210 | blt- 2f\n" | 210 | blt- 2f\n" |
@@ -253,7 +253,7 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v) | |||
253 | long t; | 253 | long t; |
254 | 254 | ||
255 | __asm__ __volatile__( | 255 | __asm__ __volatile__( |
256 | EIEIO_ON_SMP | 256 | LWSYNC_ON_SMP |
257 | "1: ldarx %0,0,%2 # atomic64_add_return\n\ | 257 | "1: ldarx %0,0,%2 # atomic64_add_return\n\ |
258 | add %0,%1,%0\n\ | 258 | add %0,%1,%0\n\ |
259 | stdcx. %0,0,%2 \n\ | 259 | stdcx. %0,0,%2 \n\ |
@@ -287,7 +287,7 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v) | |||
287 | long t; | 287 | long t; |
288 | 288 | ||
289 | __asm__ __volatile__( | 289 | __asm__ __volatile__( |
290 | EIEIO_ON_SMP | 290 | LWSYNC_ON_SMP |
291 | "1: ldarx %0,0,%2 # atomic64_sub_return\n\ | 291 | "1: ldarx %0,0,%2 # atomic64_sub_return\n\ |
292 | subf %0,%1,%0\n\ | 292 | subf %0,%1,%0\n\ |
293 | stdcx. %0,0,%2 \n\ | 293 | stdcx. %0,0,%2 \n\ |
@@ -319,7 +319,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v) | |||
319 | long t; | 319 | long t; |
320 | 320 | ||
321 | __asm__ __volatile__( | 321 | __asm__ __volatile__( |
322 | EIEIO_ON_SMP | 322 | LWSYNC_ON_SMP |
323 | "1: ldarx %0,0,%1 # atomic64_inc_return\n\ | 323 | "1: ldarx %0,0,%1 # atomic64_inc_return\n\ |
324 | addic %0,%0,1\n\ | 324 | addic %0,%0,1\n\ |
325 | stdcx. %0,0,%1 \n\ | 325 | stdcx. %0,0,%1 \n\ |
@@ -361,7 +361,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v) | |||
361 | long t; | 361 | long t; |
362 | 362 | ||
363 | __asm__ __volatile__( | 363 | __asm__ __volatile__( |
364 | EIEIO_ON_SMP | 364 | LWSYNC_ON_SMP |
365 | "1: ldarx %0,0,%1 # atomic64_dec_return\n\ | 365 | "1: ldarx %0,0,%1 # atomic64_dec_return\n\ |
366 | addic %0,%0,-1\n\ | 366 | addic %0,%0,-1\n\ |
367 | stdcx. %0,0,%1\n\ | 367 | stdcx. %0,0,%1\n\ |
@@ -386,7 +386,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) | |||
386 | long t; | 386 | long t; |
387 | 387 | ||
388 | __asm__ __volatile__( | 388 | __asm__ __volatile__( |
389 | EIEIO_ON_SMP | 389 | LWSYNC_ON_SMP |
390 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ | 390 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ |
391 | addic. %0,%0,-1\n\ | 391 | addic. %0,%0,-1\n\ |
392 | blt- 2f\n\ | 392 | blt- 2f\n\ |
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h index 1996eaa8aeae..bf6941a810b8 100644 --- a/include/asm-powerpc/bitops.h +++ b/include/asm-powerpc/bitops.h | |||
@@ -112,7 +112,7 @@ static __inline__ int test_and_set_bit(unsigned long nr, | |||
112 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 112 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
113 | 113 | ||
114 | __asm__ __volatile__( | 114 | __asm__ __volatile__( |
115 | EIEIO_ON_SMP | 115 | LWSYNC_ON_SMP |
116 | "1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n" | 116 | "1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n" |
117 | "or %1,%0,%2 \n" | 117 | "or %1,%0,%2 \n" |
118 | PPC405_ERR77(0,%3) | 118 | PPC405_ERR77(0,%3) |
@@ -134,7 +134,7 @@ static __inline__ int test_and_clear_bit(unsigned long nr, | |||
134 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 134 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
135 | 135 | ||
136 | __asm__ __volatile__( | 136 | __asm__ __volatile__( |
137 | EIEIO_ON_SMP | 137 | LWSYNC_ON_SMP |
138 | "1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n" | 138 | "1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n" |
139 | "andc %1,%0,%2 \n" | 139 | "andc %1,%0,%2 \n" |
140 | PPC405_ERR77(0,%3) | 140 | PPC405_ERR77(0,%3) |
@@ -156,7 +156,7 @@ static __inline__ int test_and_change_bit(unsigned long nr, | |||
156 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | 156 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); |
157 | 157 | ||
158 | __asm__ __volatile__( | 158 | __asm__ __volatile__( |
159 | EIEIO_ON_SMP | 159 | LWSYNC_ON_SMP |
160 | "1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n" | 160 | "1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n" |
161 | "xor %1,%0,%2 \n" | 161 | "xor %1,%0,%2 \n" |
162 | PPC405_ERR77(0,%3) | 162 | PPC405_ERR77(0,%3) |
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index ef6ead34a773..64210549f56b 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #define PPC_FEATURE_POWER5 0x00040000 | 19 | #define PPC_FEATURE_POWER5 0x00040000 |
20 | #define PPC_FEATURE_POWER5_PLUS 0x00020000 | 20 | #define PPC_FEATURE_POWER5_PLUS 0x00020000 |
21 | #define PPC_FEATURE_CELL 0x00010000 | 21 | #define PPC_FEATURE_CELL 0x00010000 |
22 | #define PPC_FEATURE_BOOKE 0x00008000 | ||
22 | 23 | ||
23 | #ifdef __KERNEL__ | 24 | #ifdef __KERNEL__ |
24 | #ifndef __ASSEMBLY__ | 25 | #ifndef __ASSEMBLY__ |
@@ -31,11 +32,11 @@ struct cpu_spec; | |||
31 | typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec); | 32 | typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec); |
32 | 33 | ||
33 | enum powerpc_oprofile_type { | 34 | enum powerpc_oprofile_type { |
34 | INVALID = 0, | 35 | PPC_OPROFILE_INVALID = 0, |
35 | RS64 = 1, | 36 | PPC_OPROFILE_RS64 = 1, |
36 | POWER4 = 2, | 37 | PPC_OPROFILE_POWER4 = 2, |
37 | G4 = 3, | 38 | PPC_OPROFILE_G4 = 3, |
38 | BOOKE = 4, | 39 | PPC_OPROFILE_BOOKE = 4, |
39 | }; | 40 | }; |
40 | 41 | ||
41 | struct cpu_spec { | 42 | struct cpu_spec { |
@@ -64,6 +65,9 @@ struct cpu_spec { | |||
64 | 65 | ||
65 | /* Processor specific oprofile operations */ | 66 | /* Processor specific oprofile operations */ |
66 | enum powerpc_oprofile_type oprofile_type; | 67 | enum powerpc_oprofile_type oprofile_type; |
68 | |||
69 | /* Name of processor class, for the ELF AT_PLATFORM entry */ | ||
70 | char *platform; | ||
67 | }; | 71 | }; |
68 | 72 | ||
69 | extern struct cpu_spec *cur_cpu_spec; | 73 | extern struct cpu_spec *cur_cpu_spec; |
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h index 45f2af6f89c4..94d228f9c6ac 100644 --- a/include/asm-powerpc/elf.h +++ b/include/asm-powerpc/elf.h | |||
@@ -221,20 +221,18 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); | |||
221 | instruction set this cpu supports. This could be done in userspace, | 221 | instruction set this cpu supports. This could be done in userspace, |
222 | but it's not easy, and we've already done it here. */ | 222 | but it's not easy, and we've already done it here. */ |
223 | # define ELF_HWCAP (cur_cpu_spec->cpu_user_features) | 223 | # define ELF_HWCAP (cur_cpu_spec->cpu_user_features) |
224 | #ifdef __powerpc64__ | ||
225 | # define ELF_PLAT_INIT(_r, load_addr) do { \ | ||
226 | _r->gpr[2] = load_addr; \ | ||
227 | } while (0) | ||
228 | #endif /* __powerpc64__ */ | ||
229 | 224 | ||
230 | /* This yields a string that ld.so will use to load implementation | 225 | /* This yields a string that ld.so will use to load implementation |
231 | specific libraries for optimization. This is more specific in | 226 | specific libraries for optimization. This is more specific in |
232 | intent than poking at uname or /proc/cpuinfo. | 227 | intent than poking at uname or /proc/cpuinfo. */ |
233 | 228 | ||
234 | For the moment, we have only optimizations for the Intel generations, | 229 | #define ELF_PLATFORM (cur_cpu_spec->platform) |
235 | but that could change... */ | ||
236 | 230 | ||
237 | #define ELF_PLATFORM (NULL) | 231 | #ifdef __powerpc64__ |
232 | # define ELF_PLAT_INIT(_r, load_addr) do { \ | ||
233 | _r->gpr[2] = load_addr; \ | ||
234 | } while (0) | ||
235 | #endif /* __powerpc64__ */ | ||
238 | 236 | ||
239 | #ifdef __KERNEL__ | 237 | #ifdef __KERNEL__ |
240 | 238 | ||
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h index f0319d50b129..39e85f320a76 100644 --- a/include/asm-powerpc/futex.h +++ b/include/asm-powerpc/futex.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | 12 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ |
13 | __asm__ __volatile ( \ | 13 | __asm__ __volatile ( \ |
14 | SYNC_ON_SMP \ | 14 | LWSYNC_ON_SMP \ |
15 | "1: lwarx %0,0,%2\n" \ | 15 | "1: lwarx %0,0,%2\n" \ |
16 | insn \ | 16 | insn \ |
17 | PPC405_ERR77(0, %2) \ | 17 | PPC405_ERR77(0, %2) \ |
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h index da7af5a720e0..38ca9ad6110d 100644 --- a/include/asm-powerpc/hvcall.h +++ b/include/asm-powerpc/hvcall.h | |||
@@ -6,7 +6,10 @@ | |||
6 | 6 | ||
7 | #define H_Success 0 | 7 | #define H_Success 0 |
8 | #define H_Busy 1 /* Hardware busy -- retry later */ | 8 | #define H_Busy 1 /* Hardware busy -- retry later */ |
9 | #define H_Closed 2 /* Resource closed */ | ||
9 | #define H_Constrained 4 /* Resource request constrained to max allowed */ | 10 | #define H_Constrained 4 /* Resource request constrained to max allowed */ |
11 | #define H_InProgress 14 /* Kind of like busy */ | ||
12 | #define H_Continue 18 /* Returned from H_Join on success */ | ||
10 | #define H_LongBusyStartRange 9900 /* Start of long busy range */ | 13 | #define H_LongBusyStartRange 9900 /* Start of long busy range */ |
11 | #define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */ | 14 | #define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */ |
12 | #define H_LongBusyOrder10msec 9901 /* Long busy, hint that 10msec is a good time to retry */ | 15 | #define H_LongBusyOrder10msec 9901 /* Long busy, hint that 10msec is a good time to retry */ |
@@ -114,6 +117,8 @@ | |||
114 | #define H_REGISTER_VTERM 0x154 | 117 | #define H_REGISTER_VTERM 0x154 |
115 | #define H_FREE_VTERM 0x158 | 118 | #define H_FREE_VTERM 0x158 |
116 | #define H_POLL_PENDING 0x1D8 | 119 | #define H_POLL_PENDING 0x1D8 |
120 | #define H_JOIN 0x298 | ||
121 | #define H_ENABLE_CRQ 0x2B0 | ||
117 | 122 | ||
118 | #ifndef __ASSEMBLY__ | 123 | #ifndef __ASSEMBLY__ |
119 | 124 | ||
diff --git a/include/asm-powerpc/lppaca.h b/include/asm-powerpc/lppaca.h index ff82ea7c4829..cd9f11f1ef14 100644 --- a/include/asm-powerpc/lppaca.h +++ b/include/asm-powerpc/lppaca.h | |||
@@ -29,7 +29,9 @@ | |||
29 | //---------------------------------------------------------------------------- | 29 | //---------------------------------------------------------------------------- |
30 | #include <asm/types.h> | 30 | #include <asm/types.h> |
31 | 31 | ||
32 | struct lppaca { | 32 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k |
33 | * alignment is sufficient to prevent this */ | ||
34 | struct __attribute__((__aligned__(0x400))) lppaca { | ||
33 | //============================================================================= | 35 | //============================================================================= |
34 | // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data | 36 | // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data |
35 | // NOTE: The xDynXyz fields are fields that will be dynamically changed by | 37 | // NOTE: The xDynXyz fields are fields that will be dynamically changed by |
@@ -129,5 +131,7 @@ struct lppaca { | |||
129 | u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF | 131 | u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF |
130 | }; | 132 | }; |
131 | 133 | ||
134 | extern struct lppaca lppaca[]; | ||
135 | |||
132 | #endif /* __KERNEL__ */ | 136 | #endif /* __KERNEL__ */ |
133 | #endif /* _ASM_POWERPC_LPPACA_H */ | 137 | #endif /* _ASM_POWERPC_LPPACA_H */ |
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index a64b4d425dab..c9add8f1ad94 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | register struct paca_struct *local_paca asm("r13"); | 24 | register struct paca_struct *local_paca asm("r13"); |
25 | #define get_paca() local_paca | 25 | #define get_paca() local_paca |
26 | #define get_lppaca() (get_paca()->lppaca_ptr) | ||
26 | 27 | ||
27 | struct task_struct; | 28 | struct task_struct; |
28 | 29 | ||
@@ -95,19 +96,6 @@ struct paca_struct { | |||
95 | u64 saved_r1; /* r1 save for RTAS calls */ | 96 | u64 saved_r1; /* r1 save for RTAS calls */ |
96 | u64 saved_msr; /* MSR saved here by enter_rtas */ | 97 | u64 saved_msr; /* MSR saved here by enter_rtas */ |
97 | u8 proc_enabled; /* irq soft-enable flag */ | 98 | u8 proc_enabled; /* irq soft-enable flag */ |
98 | |||
99 | /* | ||
100 | * iSeries structure which the hypervisor knows about - | ||
101 | * this structure should not cross a page boundary. | ||
102 | * The vpa_init/register_vpa call is now known to fail if the | ||
103 | * lppaca structure crosses a page boundary. | ||
104 | * The lppaca is also used on POWER5 pSeries boxes. | ||
105 | * The lppaca is 640 bytes long, and cannot readily change | ||
106 | * since the hypervisor knows its layout, so a 1kB | ||
107 | * alignment will suffice to ensure that it doesn't | ||
108 | * cross a page boundary. | ||
109 | */ | ||
110 | struct lppaca lppaca __attribute__((__aligned__(0x400))); | ||
111 | }; | 99 | }; |
112 | 100 | ||
113 | extern struct paca_struct paca[]; | 101 | extern struct paca_struct paca[]; |
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index 0dc798d46ea4..ab8688d39024 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h | |||
@@ -156,52 +156,56 @@ n: | |||
156 | #endif | 156 | #endif |
157 | 157 | ||
158 | /* | 158 | /* |
159 | * LOADADDR( rn, name ) | 159 | * LOAD_REG_IMMEDIATE(rn, expr) |
160 | * loads the address of 'name' into 'rn' | 160 | * Loads the value of the constant expression 'expr' into register 'rn' |
161 | * using immediate instructions only. Use this when it's important not | ||
162 | * to reference other data (i.e. on ppc64 when the TOC pointer is not | ||
163 | * valid). | ||
161 | * | 164 | * |
162 | * LOADBASE( rn, name ) | 165 | * LOAD_REG_ADDR(rn, name) |
163 | * loads the address (possibly without the low 16 bits) of 'name' into 'rn' | 166 | * Loads the address of label 'name' into register 'rn'. Use this when |
164 | * suitable for base+disp addressing | 167 | * you don't particularly need immediate instructions only, but you need |
168 | * the whole address in one register (e.g. it's a structure address and | ||
169 | * you want to access various offsets within it). On ppc32 this is | ||
170 | * identical to LOAD_REG_IMMEDIATE. | ||
171 | * | ||
172 | * LOAD_REG_ADDRBASE(rn, name) | ||
173 | * ADDROFF(name) | ||
174 | * LOAD_REG_ADDRBASE loads part of the address of label 'name' into | ||
175 | * register 'rn'. ADDROFF(name) returns the remainder of the address as | ||
176 | * a constant expression. ADDROFF(name) is a signed expression < 16 bits | ||
177 | * in size, so is suitable for use directly as an offset in load and store | ||
178 | * instructions. Use this when loading/storing a single word or less as: | ||
179 | * LOAD_REG_ADDRBASE(rX, name) | ||
180 | * ld rY,ADDROFF(name)(rX) | ||
165 | */ | 181 | */ |
166 | #ifdef __powerpc64__ | 182 | #ifdef __powerpc64__ |
167 | #define LOADADDR(rn,name) \ | 183 | #define LOAD_REG_IMMEDIATE(reg,expr) \ |
168 | lis rn,name##@highest; \ | 184 | lis (reg),(expr)@highest; \ |
169 | ori rn,rn,name##@higher; \ | 185 | ori (reg),(reg),(expr)@higher; \ |
170 | rldicr rn,rn,32,31; \ | 186 | rldicr (reg),(reg),32,31; \ |
171 | oris rn,rn,name##@h; \ | 187 | oris (reg),(reg),(expr)@h; \ |
172 | ori rn,rn,name##@l | 188 | ori (reg),(reg),(expr)@l; |
173 | 189 | ||
174 | #define LOADBASE(rn,name) \ | 190 | #define LOAD_REG_ADDR(reg,name) \ |
175 | ld rn,name@got(r2) | 191 | ld (reg),name@got(r2) |
176 | 192 | ||
177 | #define OFF(name) 0 | 193 | #define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name) |
178 | 194 | #define ADDROFF(name) 0 | |
179 | #define SET_REG_TO_CONST(reg, value) \ | ||
180 | lis reg,(((value)>>48)&0xFFFF); \ | ||
181 | ori reg,reg,(((value)>>32)&0xFFFF); \ | ||
182 | rldicr reg,reg,32,31; \ | ||
183 | oris reg,reg,(((value)>>16)&0xFFFF); \ | ||
184 | ori reg,reg,((value)&0xFFFF); | ||
185 | |||
186 | #define SET_REG_TO_LABEL(reg, label) \ | ||
187 | lis reg,(label)@highest; \ | ||
188 | ori reg,reg,(label)@higher; \ | ||
189 | rldicr reg,reg,32,31; \ | ||
190 | oris reg,reg,(label)@h; \ | ||
191 | ori reg,reg,(label)@l; | ||
192 | 195 | ||
193 | /* offsets for stack frame layout */ | 196 | /* offsets for stack frame layout */ |
194 | #define LRSAVE 16 | 197 | #define LRSAVE 16 |
195 | 198 | ||
196 | #else /* 32-bit */ | 199 | #else /* 32-bit */ |
197 | #define LOADADDR(rn,name) \ | ||
198 | lis rn,name@ha; \ | ||
199 | addi rn,rn,name@l | ||
200 | 200 | ||
201 | #define LOADBASE(rn,name) \ | 201 | #define LOAD_REG_IMMEDIATE(reg,expr) \ |
202 | lis rn,name@ha | 202 | lis (reg),(expr)@ha; \ |
203 | addi (reg),(reg),(expr)@l; | ||
204 | |||
205 | #define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name) | ||
203 | 206 | ||
204 | #define OFF(name) name@l | 207 | #define LOAD_REG_ADDRBASE(reg, name) lis (reg),name@ha |
208 | #define ADDROFF(name) name@l | ||
205 | 209 | ||
206 | /* offsets for stack frame layout */ | 210 | /* offsets for stack frame layout */ |
207 | #define LRSAVE 4 | 211 | #define LRSAVE 4 |
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 329e9bf62260..5b2bd4eefb01 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h | |||
@@ -87,6 +87,7 @@ struct device_node { | |||
87 | char *full_name; | 87 | char *full_name; |
88 | 88 | ||
89 | struct property *properties; | 89 | struct property *properties; |
90 | struct property *deadprops; /* removed properties */ | ||
90 | struct device_node *parent; | 91 | struct device_node *parent; |
91 | struct device_node *child; | 92 | struct device_node *child; |
92 | struct device_node *sibling; | 93 | struct device_node *sibling; |
@@ -135,6 +136,9 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev); | |||
135 | extern struct device_node *of_get_parent(const struct device_node *node); | 136 | extern struct device_node *of_get_parent(const struct device_node *node); |
136 | extern struct device_node *of_get_next_child(const struct device_node *node, | 137 | extern struct device_node *of_get_next_child(const struct device_node *node, |
137 | struct device_node *prev); | 138 | struct device_node *prev); |
139 | extern struct property *of_find_property(struct device_node *np, | ||
140 | const char *name, | ||
141 | int *lenp); | ||
138 | extern struct device_node *of_node_get(struct device_node *node); | 142 | extern struct device_node *of_node_get(struct device_node *node); |
139 | extern void of_node_put(struct device_node *node); | 143 | extern void of_node_put(struct device_node *node); |
140 | 144 | ||
@@ -164,6 +168,10 @@ extern int prom_n_size_cells(struct device_node* np); | |||
164 | extern int prom_n_intr_cells(struct device_node* np); | 168 | extern int prom_n_intr_cells(struct device_node* np); |
165 | extern void prom_get_irq_senses(unsigned char *senses, int off, int max); | 169 | extern void prom_get_irq_senses(unsigned char *senses, int off, int max); |
166 | extern int prom_add_property(struct device_node* np, struct property* prop); | 170 | extern int prom_add_property(struct device_node* np, struct property* prop); |
171 | extern int prom_remove_property(struct device_node *np, struct property *prop); | ||
172 | extern int prom_update_property(struct device_node *np, | ||
173 | struct property *newprop, | ||
174 | struct property *oldprop); | ||
167 | 175 | ||
168 | #ifdef CONFIG_PPC32 | 176 | #ifdef CONFIG_PPC32 |
169 | /* | 177 | /* |
diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h index 754900901cd8..895cb6d3a42a 100644 --- a/include/asm-powerpc/spinlock.h +++ b/include/asm-powerpc/spinlock.h | |||
@@ -46,7 +46,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) | |||
46 | 46 | ||
47 | token = LOCK_TOKEN; | 47 | token = LOCK_TOKEN; |
48 | __asm__ __volatile__( | 48 | __asm__ __volatile__( |
49 | "1: lwarx %0,0,%2 # __spin_trylock\n\ | 49 | "1: lwarx %0,0,%2\n\ |
50 | cmpwi 0,%0,0\n\ | 50 | cmpwi 0,%0,0\n\ |
51 | bne- 2f\n\ | 51 | bne- 2f\n\ |
52 | stwcx. %1,0,%2\n\ | 52 | stwcx. %1,0,%2\n\ |
@@ -80,7 +80,7 @@ static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) | |||
80 | 80 | ||
81 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | 81 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) |
82 | /* We only yield to the hypervisor if we are in shared processor mode */ | 82 | /* We only yield to the hypervisor if we are in shared processor mode */ |
83 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) | 83 | #define SHARED_PROCESSOR (get_lppaca()->shared_proc) |
84 | extern void __spin_yield(raw_spinlock_t *lock); | 84 | extern void __spin_yield(raw_spinlock_t *lock); |
85 | extern void __rw_yield(raw_rwlock_t *lock); | 85 | extern void __rw_yield(raw_rwlock_t *lock); |
86 | #else /* SPLPAR || ISERIES */ | 86 | #else /* SPLPAR || ISERIES */ |
@@ -124,8 +124,8 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long | |||
124 | 124 | ||
125 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | 125 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) |
126 | { | 126 | { |
127 | __asm__ __volatile__(SYNC_ON_SMP" # __raw_spin_unlock" | 127 | __asm__ __volatile__("# __raw_spin_unlock\n\t" |
128 | : : :"memory"); | 128 | LWSYNC_ON_SMP: : :"memory"); |
129 | lock->slock = 0; | 129 | lock->slock = 0; |
130 | } | 130 | } |
131 | 131 | ||
@@ -167,7 +167,7 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw) | |||
167 | long tmp; | 167 | long tmp; |
168 | 168 | ||
169 | __asm__ __volatile__( | 169 | __asm__ __volatile__( |
170 | "1: lwarx %0,0,%1 # read_trylock\n" | 170 | "1: lwarx %0,0,%1\n" |
171 | __DO_SIGN_EXTEND | 171 | __DO_SIGN_EXTEND |
172 | " addic. %0,%0,1\n\ | 172 | " addic. %0,%0,1\n\ |
173 | ble- 2f\n" | 173 | ble- 2f\n" |
@@ -192,7 +192,7 @@ static __inline__ long __write_trylock(raw_rwlock_t *rw) | |||
192 | 192 | ||
193 | token = WRLOCK_TOKEN; | 193 | token = WRLOCK_TOKEN; |
194 | __asm__ __volatile__( | 194 | __asm__ __volatile__( |
195 | "1: lwarx %0,0,%2 # write_trylock\n\ | 195 | "1: lwarx %0,0,%2\n\ |
196 | cmpwi 0,%0,0\n\ | 196 | cmpwi 0,%0,0\n\ |
197 | bne- 2f\n" | 197 | bne- 2f\n" |
198 | PPC405_ERR77(0,%1) | 198 | PPC405_ERR77(0,%1) |
@@ -249,8 +249,9 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | |||
249 | long tmp; | 249 | long tmp; |
250 | 250 | ||
251 | __asm__ __volatile__( | 251 | __asm__ __volatile__( |
252 | "eieio # read_unlock\n\ | 252 | "# read_unlock\n\t" |
253 | 1: lwarx %0,0,%1\n\ | 253 | LWSYNC_ON_SMP |
254 | "1: lwarx %0,0,%1\n\ | ||
254 | addic %0,%0,-1\n" | 255 | addic %0,%0,-1\n" |
255 | PPC405_ERR77(0,%1) | 256 | PPC405_ERR77(0,%1) |
256 | " stwcx. %0,0,%1\n\ | 257 | " stwcx. %0,0,%1\n\ |
@@ -262,8 +263,8 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | |||
262 | 263 | ||
263 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | 264 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
264 | { | 265 | { |
265 | __asm__ __volatile__(SYNC_ON_SMP" # write_unlock" | 266 | __asm__ __volatile__("# write_unlock\n\t" |
266 | : : :"memory"); | 267 | LWSYNC_ON_SMP: : :"memory"); |
267 | rw->lock = 0; | 268 | rw->lock = 0; |
268 | } | 269 | } |
269 | 270 | ||
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h index 794870ab8fd3..c90d9d9aae72 100644 --- a/include/asm-powerpc/synch.h +++ b/include/asm-powerpc/synch.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #define _ASM_POWERPC_SYNCH_H | 2 | #define _ASM_POWERPC_SYNCH_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #include <linux/stringify.h> | ||
6 | |||
5 | #ifdef __powerpc64__ | 7 | #ifdef __powerpc64__ |
6 | #define __SUBARCH_HAS_LWSYNC | 8 | #define __SUBARCH_HAS_LWSYNC |
7 | #endif | 9 | #endif |
@@ -12,20 +14,12 @@ | |||
12 | # define LWSYNC sync | 14 | # define LWSYNC sync |
13 | #endif | 15 | #endif |
14 | 16 | ||
15 | |||
16 | /* | ||
17 | * Arguably the bitops and *xchg operations don't imply any memory barrier | ||
18 | * or SMP ordering, but in fact a lot of drivers expect them to imply | ||
19 | * both, since they do on x86 cpus. | ||
20 | */ | ||
21 | #ifdef CONFIG_SMP | 17 | #ifdef CONFIG_SMP |
22 | #define EIEIO_ON_SMP "eieio\n" | ||
23 | #define ISYNC_ON_SMP "\n\tisync" | 18 | #define ISYNC_ON_SMP "\n\tisync" |
24 | #define SYNC_ON_SMP __stringify(LWSYNC) "\n" | 19 | #define LWSYNC_ON_SMP __stringify(LWSYNC) "\n" |
25 | #else | 20 | #else |
26 | #define EIEIO_ON_SMP | ||
27 | #define ISYNC_ON_SMP | 21 | #define ISYNC_ON_SMP |
28 | #define SYNC_ON_SMP | 22 | #define LWSYNC_ON_SMP |
29 | #endif | 23 | #endif |
30 | 24 | ||
31 | static inline void eieio(void) | 25 | static inline void eieio(void) |
@@ -38,14 +32,5 @@ static inline void isync(void) | |||
38 | __asm__ __volatile__ ("isync" : : : "memory"); | 32 | __asm__ __volatile__ ("isync" : : : "memory"); |
39 | } | 33 | } |
40 | 34 | ||
41 | #ifdef CONFIG_SMP | ||
42 | #define eieio_on_smp() eieio() | ||
43 | #define isync_on_smp() isync() | ||
44 | #else | ||
45 | #define eieio_on_smp() __asm__ __volatile__("": : :"memory") | ||
46 | #define isync_on_smp() __asm__ __volatile__("": : :"memory") | ||
47 | #endif | ||
48 | |||
49 | #endif /* __KERNEL__ */ | 35 | #endif /* __KERNEL__ */ |
50 | #endif /* _ASM_POWERPC_SYNCH_H */ | 36 | #endif /* _ASM_POWERPC_SYNCH_H */ |
51 | |||
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 9b822afa7d0e..d9bf53653b10 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h | |||
@@ -212,7 +212,7 @@ __xchg_u32(volatile void *p, unsigned long val) | |||
212 | unsigned long prev; | 212 | unsigned long prev; |
213 | 213 | ||
214 | __asm__ __volatile__( | 214 | __asm__ __volatile__( |
215 | EIEIO_ON_SMP | 215 | LWSYNC_ON_SMP |
216 | "1: lwarx %0,0,%2 \n" | 216 | "1: lwarx %0,0,%2 \n" |
217 | PPC405_ERR77(0,%2) | 217 | PPC405_ERR77(0,%2) |
218 | " stwcx. %3,0,%2 \n\ | 218 | " stwcx. %3,0,%2 \n\ |
@@ -232,7 +232,7 @@ __xchg_u64(volatile void *p, unsigned long val) | |||
232 | unsigned long prev; | 232 | unsigned long prev; |
233 | 233 | ||
234 | __asm__ __volatile__( | 234 | __asm__ __volatile__( |
235 | EIEIO_ON_SMP | 235 | LWSYNC_ON_SMP |
236 | "1: ldarx %0,0,%2 \n" | 236 | "1: ldarx %0,0,%2 \n" |
237 | PPC405_ERR77(0,%2) | 237 | PPC405_ERR77(0,%2) |
238 | " stdcx. %3,0,%2 \n\ | 238 | " stdcx. %3,0,%2 \n\ |
@@ -287,7 +287,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | |||
287 | unsigned int prev; | 287 | unsigned int prev; |
288 | 288 | ||
289 | __asm__ __volatile__ ( | 289 | __asm__ __volatile__ ( |
290 | EIEIO_ON_SMP | 290 | LWSYNC_ON_SMP |
291 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | 291 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
292 | cmpw 0,%0,%3\n\ | 292 | cmpw 0,%0,%3\n\ |
293 | bne- 2f\n" | 293 | bne- 2f\n" |
@@ -311,7 +311,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) | |||
311 | unsigned long prev; | 311 | unsigned long prev; |
312 | 312 | ||
313 | __asm__ __volatile__ ( | 313 | __asm__ __volatile__ ( |
314 | EIEIO_ON_SMP | 314 | LWSYNC_ON_SMP |
315 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | 315 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
316 | cmpd 0,%0,%3\n\ | 316 | cmpd 0,%0,%3\n\ |
317 | bne- 2f\n\ | 317 | bne- 2f\n\ |
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h index d9b86a17271b..baddc9ab57ad 100644 --- a/include/asm-powerpc/time.h +++ b/include/asm-powerpc/time.h | |||
@@ -175,11 +175,10 @@ static inline void set_dec(int val) | |||
175 | set_dec_cpu6(val); | 175 | set_dec_cpu6(val); |
176 | #else | 176 | #else |
177 | #ifdef CONFIG_PPC_ISERIES | 177 | #ifdef CONFIG_PPC_ISERIES |
178 | struct paca_struct *lpaca = get_paca(); | ||
179 | int cur_dec; | 178 | int cur_dec; |
180 | 179 | ||
181 | if (lpaca->lppaca.shared_proc) { | 180 | if (get_lppaca()->shared_proc) { |
182 | lpaca->lppaca.virtual_decr = val; | 181 | get_lppaca()->virtual_decr = val; |
183 | cur_dec = get_dec(); | 182 | cur_dec = get_dec(); |
184 | if (cur_dec > val) | 183 | if (cur_dec > val) |
185 | HvCall_setVirtualDecr(); | 184 | HvCall_setVirtualDecr(); |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 74488e49166d..aa6322d45198 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -146,6 +146,11 @@ struct property; | |||
146 | extern void proc_device_tree_init(void); | 146 | extern void proc_device_tree_init(void); |
147 | extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *); | 147 | extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *); |
148 | extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop); | 148 | extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop); |
149 | extern void proc_device_tree_remove_prop(struct proc_dir_entry *pde, | ||
150 | struct property *prop); | ||
151 | extern void proc_device_tree_update_prop(struct proc_dir_entry *pde, | ||
152 | struct property *newprop, | ||
153 | struct property *oldprop); | ||
149 | #endif /* CONFIG_PROC_DEVICETREE */ | 154 | #endif /* CONFIG_PROC_DEVICETREE */ |
150 | 155 | ||
151 | extern struct proc_dir_entry *proc_symlink(const char *, | 156 | extern struct proc_dir_entry *proc_symlink(const char *, |