diff options
Diffstat (limited to 'arch/powerpc')
52 files changed, 1290 insertions, 302 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index bf15e7b4cd3d..654bba5cf6b4 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -240,6 +240,33 @@ config PPC_OF_PLATFORM_PCI | |||
240 | config ARCH_SUPPORTS_DEBUG_PAGEALLOC | 240 | config ARCH_SUPPORTS_DEBUG_PAGEALLOC |
241 | def_bool y | 241 | def_bool y |
242 | 242 | ||
243 | config PPC_ADV_DEBUG_REGS | ||
244 | bool | ||
245 | depends on 40x || BOOKE | ||
246 | default y | ||
247 | |||
248 | config PPC_ADV_DEBUG_IACS | ||
249 | int | ||
250 | depends on PPC_ADV_DEBUG_REGS | ||
251 | default 4 if 44x | ||
252 | default 2 | ||
253 | |||
254 | config PPC_ADV_DEBUG_DACS | ||
255 | int | ||
256 | depends on PPC_ADV_DEBUG_REGS | ||
257 | default 2 | ||
258 | |||
259 | config PPC_ADV_DEBUG_DVCS | ||
260 | int | ||
261 | depends on PPC_ADV_DEBUG_REGS | ||
262 | default 2 if 44x | ||
263 | default 0 | ||
264 | |||
265 | config PPC_ADV_DEBUG_DAC_RANGE | ||
266 | bool | ||
267 | depends on PPC_ADV_DEBUG_REGS && 44x | ||
268 | default y | ||
269 | |||
243 | source "init/Kconfig" | 270 | source "init/Kconfig" |
244 | 271 | ||
245 | source "kernel/Kconfig.freezer" | 272 | source "kernel/Kconfig.freezer" |
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index 8f0fe7971949..c1b475a941eb 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_POWERPC_ASM_COMPAT_H | 2 | #define _ASM_POWERPC_ASM_COMPAT_H |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <asm/ppc-opcode.h> | ||
5 | 6 | ||
6 | #ifdef __ASSEMBLY__ | 7 | #ifdef __ASSEMBLY__ |
7 | # define stringify_in_c(...) __VA_ARGS__ | 8 | # define stringify_in_c(...) __VA_ARGS__ |
@@ -24,7 +25,7 @@ | |||
24 | #define PPC_LONG stringify_in_c(.llong) | 25 | #define PPC_LONG stringify_in_c(.llong) |
25 | #define PPC_LONG_ALIGN stringify_in_c(.balign 8) | 26 | #define PPC_LONG_ALIGN stringify_in_c(.balign 8) |
26 | #define PPC_TLNEI stringify_in_c(tdnei) | 27 | #define PPC_TLNEI stringify_in_c(tdnei) |
27 | #define PPC_LLARX stringify_in_c(ldarx) | 28 | #define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) |
28 | #define PPC_STLCX stringify_in_c(stdcx.) | 29 | #define PPC_STLCX stringify_in_c(stdcx.) |
29 | #define PPC_CNTLZL stringify_in_c(cntlzd) | 30 | #define PPC_CNTLZL stringify_in_c(cntlzd) |
30 | 31 | ||
@@ -46,7 +47,7 @@ | |||
46 | #define PPC_LONG stringify_in_c(.long) | 47 | #define PPC_LONG stringify_in_c(.long) |
47 | #define PPC_LONG_ALIGN stringify_in_c(.balign 4) | 48 | #define PPC_LONG_ALIGN stringify_in_c(.balign 4) |
48 | #define PPC_TLNEI stringify_in_c(twnei) | 49 | #define PPC_TLNEI stringify_in_c(twnei) |
49 | #define PPC_LLARX stringify_in_c(lwarx) | 50 | #define PPC_LLARX(t, a, b, eh) PPC_LWARX(t, a, b, eh) |
50 | #define PPC_STLCX stringify_in_c(stwcx.) | 51 | #define PPC_STLCX stringify_in_c(stwcx.) |
51 | #define PPC_CNTLZL stringify_in_c(cntlzw) | 52 | #define PPC_CNTLZL stringify_in_c(cntlzw) |
52 | #define PPC_MTOCRF stringify_in_c(mtcrf) | 53 | #define PPC_MTOCRF stringify_in_c(mtcrf) |
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 4012483b1899..b8f152ece025 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h | |||
@@ -49,13 +49,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v) | |||
49 | int t; | 49 | int t; |
50 | 50 | ||
51 | __asm__ __volatile__( | 51 | __asm__ __volatile__( |
52 | LWSYNC_ON_SMP | 52 | PPC_RELEASE_BARRIER |
53 | "1: lwarx %0,0,%2 # atomic_add_return\n\ | 53 | "1: lwarx %0,0,%2 # atomic_add_return\n\ |
54 | add %0,%1,%0\n" | 54 | add %0,%1,%0\n" |
55 | PPC405_ERR77(0,%2) | 55 | PPC405_ERR77(0,%2) |
56 | " stwcx. %0,0,%2 \n\ | 56 | " stwcx. %0,0,%2 \n\ |
57 | bne- 1b" | 57 | bne- 1b" |
58 | ISYNC_ON_SMP | 58 | PPC_ACQUIRE_BARRIER |
59 | : "=&r" (t) | 59 | : "=&r" (t) |
60 | : "r" (a), "r" (&v->counter) | 60 | : "r" (a), "r" (&v->counter) |
61 | : "cc", "memory"); | 61 | : "cc", "memory"); |
@@ -85,13 +85,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v) | |||
85 | int t; | 85 | int t; |
86 | 86 | ||
87 | __asm__ __volatile__( | 87 | __asm__ __volatile__( |
88 | LWSYNC_ON_SMP | 88 | PPC_RELEASE_BARRIER |
89 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ | 89 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ |
90 | subf %0,%1,%0\n" | 90 | subf %0,%1,%0\n" |
91 | PPC405_ERR77(0,%2) | 91 | PPC405_ERR77(0,%2) |
92 | " stwcx. %0,0,%2 \n\ | 92 | " stwcx. %0,0,%2 \n\ |
93 | bne- 1b" | 93 | bne- 1b" |
94 | ISYNC_ON_SMP | 94 | PPC_ACQUIRE_BARRIER |
95 | : "=&r" (t) | 95 | : "=&r" (t) |
96 | : "r" (a), "r" (&v->counter) | 96 | : "r" (a), "r" (&v->counter) |
97 | : "cc", "memory"); | 97 | : "cc", "memory"); |
@@ -119,13 +119,13 @@ static __inline__ int atomic_inc_return(atomic_t *v) | |||
119 | int t; | 119 | int t; |
120 | 120 | ||
121 | __asm__ __volatile__( | 121 | __asm__ __volatile__( |
122 | LWSYNC_ON_SMP | 122 | PPC_RELEASE_BARRIER |
123 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ | 123 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ |
124 | addic %0,%0,1\n" | 124 | addic %0,%0,1\n" |
125 | PPC405_ERR77(0,%1) | 125 | PPC405_ERR77(0,%1) |
126 | " stwcx. %0,0,%1 \n\ | 126 | " stwcx. %0,0,%1 \n\ |
127 | bne- 1b" | 127 | bne- 1b" |
128 | ISYNC_ON_SMP | 128 | PPC_ACQUIRE_BARRIER |
129 | : "=&r" (t) | 129 | : "=&r" (t) |
130 | : "r" (&v->counter) | 130 | : "r" (&v->counter) |
131 | : "cc", "xer", "memory"); | 131 | : "cc", "xer", "memory"); |
@@ -163,13 +163,13 @@ static __inline__ int atomic_dec_return(atomic_t *v) | |||
163 | int t; | 163 | int t; |
164 | 164 | ||
165 | __asm__ __volatile__( | 165 | __asm__ __volatile__( |
166 | LWSYNC_ON_SMP | 166 | PPC_RELEASE_BARRIER |
167 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ | 167 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ |
168 | addic %0,%0,-1\n" | 168 | addic %0,%0,-1\n" |
169 | PPC405_ERR77(0,%1) | 169 | PPC405_ERR77(0,%1) |
170 | " stwcx. %0,0,%1\n\ | 170 | " stwcx. %0,0,%1\n\ |
171 | bne- 1b" | 171 | bne- 1b" |
172 | ISYNC_ON_SMP | 172 | PPC_ACQUIRE_BARRIER |
173 | : "=&r" (t) | 173 | : "=&r" (t) |
174 | : "r" (&v->counter) | 174 | : "r" (&v->counter) |
175 | : "cc", "xer", "memory"); | 175 | : "cc", "xer", "memory"); |
@@ -194,7 +194,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
194 | int t; | 194 | int t; |
195 | 195 | ||
196 | __asm__ __volatile__ ( | 196 | __asm__ __volatile__ ( |
197 | LWSYNC_ON_SMP | 197 | PPC_RELEASE_BARRIER |
198 | "1: lwarx %0,0,%1 # atomic_add_unless\n\ | 198 | "1: lwarx %0,0,%1 # atomic_add_unless\n\ |
199 | cmpw 0,%0,%3 \n\ | 199 | cmpw 0,%0,%3 \n\ |
200 | beq- 2f \n\ | 200 | beq- 2f \n\ |
@@ -202,7 +202,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
202 | PPC405_ERR77(0,%2) | 202 | PPC405_ERR77(0,%2) |
203 | " stwcx. %0,0,%1 \n\ | 203 | " stwcx. %0,0,%1 \n\ |
204 | bne- 1b \n" | 204 | bne- 1b \n" |
205 | ISYNC_ON_SMP | 205 | PPC_ACQUIRE_BARRIER |
206 | " subf %0,%2,%0 \n\ | 206 | " subf %0,%2,%0 \n\ |
207 | 2:" | 207 | 2:" |
208 | : "=&r" (t) | 208 | : "=&r" (t) |
@@ -227,7 +227,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) | |||
227 | int t; | 227 | int t; |
228 | 228 | ||
229 | __asm__ __volatile__( | 229 | __asm__ __volatile__( |
230 | LWSYNC_ON_SMP | 230 | PPC_RELEASE_BARRIER |
231 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ | 231 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ |
232 | cmpwi %0,1\n\ | 232 | cmpwi %0,1\n\ |
233 | addi %0,%0,-1\n\ | 233 | addi %0,%0,-1\n\ |
@@ -235,7 +235,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) | |||
235 | PPC405_ERR77(0,%1) | 235 | PPC405_ERR77(0,%1) |
236 | " stwcx. %0,0,%1\n\ | 236 | " stwcx. %0,0,%1\n\ |
237 | bne- 1b" | 237 | bne- 1b" |
238 | ISYNC_ON_SMP | 238 | PPC_ACQUIRE_BARRIER |
239 | "\n\ | 239 | "\n\ |
240 | 2:" : "=&b" (t) | 240 | 2:" : "=&b" (t) |
241 | : "r" (&v->counter) | 241 | : "r" (&v->counter) |
@@ -286,12 +286,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v) | |||
286 | long t; | 286 | long t; |
287 | 287 | ||
288 | __asm__ __volatile__( | 288 | __asm__ __volatile__( |
289 | LWSYNC_ON_SMP | 289 | PPC_RELEASE_BARRIER |
290 | "1: ldarx %0,0,%2 # atomic64_add_return\n\ | 290 | "1: ldarx %0,0,%2 # atomic64_add_return\n\ |
291 | add %0,%1,%0\n\ | 291 | add %0,%1,%0\n\ |
292 | stdcx. %0,0,%2 \n\ | 292 | stdcx. %0,0,%2 \n\ |
293 | bne- 1b" | 293 | bne- 1b" |
294 | ISYNC_ON_SMP | 294 | PPC_ACQUIRE_BARRIER |
295 | : "=&r" (t) | 295 | : "=&r" (t) |
296 | : "r" (a), "r" (&v->counter) | 296 | : "r" (a), "r" (&v->counter) |
297 | : "cc", "memory"); | 297 | : "cc", "memory"); |
@@ -320,12 +320,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v) | |||
320 | long t; | 320 | long t; |
321 | 321 | ||
322 | __asm__ __volatile__( | 322 | __asm__ __volatile__( |
323 | LWSYNC_ON_SMP | 323 | PPC_RELEASE_BARRIER |
324 | "1: ldarx %0,0,%2 # atomic64_sub_return\n\ | 324 | "1: ldarx %0,0,%2 # atomic64_sub_return\n\ |
325 | subf %0,%1,%0\n\ | 325 | subf %0,%1,%0\n\ |
326 | stdcx. %0,0,%2 \n\ | 326 | stdcx. %0,0,%2 \n\ |
327 | bne- 1b" | 327 | bne- 1b" |
328 | ISYNC_ON_SMP | 328 | PPC_ACQUIRE_BARRIER |
329 | : "=&r" (t) | 329 | : "=&r" (t) |
330 | : "r" (a), "r" (&v->counter) | 330 | : "r" (a), "r" (&v->counter) |
331 | : "cc", "memory"); | 331 | : "cc", "memory"); |
@@ -352,12 +352,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v) | |||
352 | long t; | 352 | long t; |
353 | 353 | ||
354 | __asm__ __volatile__( | 354 | __asm__ __volatile__( |
355 | LWSYNC_ON_SMP | 355 | PPC_RELEASE_BARRIER |
356 | "1: ldarx %0,0,%1 # atomic64_inc_return\n\ | 356 | "1: ldarx %0,0,%1 # atomic64_inc_return\n\ |
357 | addic %0,%0,1\n\ | 357 | addic %0,%0,1\n\ |
358 | stdcx. %0,0,%1 \n\ | 358 | stdcx. %0,0,%1 \n\ |
359 | bne- 1b" | 359 | bne- 1b" |
360 | ISYNC_ON_SMP | 360 | PPC_ACQUIRE_BARRIER |
361 | : "=&r" (t) | 361 | : "=&r" (t) |
362 | : "r" (&v->counter) | 362 | : "r" (&v->counter) |
363 | : "cc", "xer", "memory"); | 363 | : "cc", "xer", "memory"); |
@@ -394,12 +394,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v) | |||
394 | long t; | 394 | long t; |
395 | 395 | ||
396 | __asm__ __volatile__( | 396 | __asm__ __volatile__( |
397 | LWSYNC_ON_SMP | 397 | PPC_RELEASE_BARRIER |
398 | "1: ldarx %0,0,%1 # atomic64_dec_return\n\ | 398 | "1: ldarx %0,0,%1 # atomic64_dec_return\n\ |
399 | addic %0,%0,-1\n\ | 399 | addic %0,%0,-1\n\ |
400 | stdcx. %0,0,%1\n\ | 400 | stdcx. %0,0,%1\n\ |
401 | bne- 1b" | 401 | bne- 1b" |
402 | ISYNC_ON_SMP | 402 | PPC_ACQUIRE_BARRIER |
403 | : "=&r" (t) | 403 | : "=&r" (t) |
404 | : "r" (&v->counter) | 404 | : "r" (&v->counter) |
405 | : "cc", "xer", "memory"); | 405 | : "cc", "xer", "memory"); |
@@ -419,13 +419,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) | |||
419 | long t; | 419 | long t; |
420 | 420 | ||
421 | __asm__ __volatile__( | 421 | __asm__ __volatile__( |
422 | LWSYNC_ON_SMP | 422 | PPC_RELEASE_BARRIER |
423 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ | 423 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ |
424 | addic. %0,%0,-1\n\ | 424 | addic. %0,%0,-1\n\ |
425 | blt- 2f\n\ | 425 | blt- 2f\n\ |
426 | stdcx. %0,0,%1\n\ | 426 | stdcx. %0,0,%1\n\ |
427 | bne- 1b" | 427 | bne- 1b" |
428 | ISYNC_ON_SMP | 428 | PPC_ACQUIRE_BARRIER |
429 | "\n\ | 429 | "\n\ |
430 | 2:" : "=&r" (t) | 430 | 2:" : "=&r" (t) |
431 | : "r" (&v->counter) | 431 | : "r" (&v->counter) |
@@ -451,14 +451,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |||
451 | long t; | 451 | long t; |
452 | 452 | ||
453 | __asm__ __volatile__ ( | 453 | __asm__ __volatile__ ( |
454 | LWSYNC_ON_SMP | 454 | PPC_RELEASE_BARRIER |
455 | "1: ldarx %0,0,%1 # atomic_add_unless\n\ | 455 | "1: ldarx %0,0,%1 # atomic_add_unless\n\ |
456 | cmpd 0,%0,%3 \n\ | 456 | cmpd 0,%0,%3 \n\ |
457 | beq- 2f \n\ | 457 | beq- 2f \n\ |
458 | add %0,%2,%0 \n" | 458 | add %0,%2,%0 \n" |
459 | " stdcx. %0,0,%1 \n\ | 459 | " stdcx. %0,0,%1 \n\ |
460 | bne- 1b \n" | 460 | bne- 1b \n" |
461 | ISYNC_ON_SMP | 461 | PPC_ACQUIRE_BARRIER |
462 | " subf %0,%2,%0 \n\ | 462 | " subf %0,%2,%0 \n\ |
463 | 2:" | 463 | 2:" |
464 | : "=&r" (t) | 464 | : "=&r" (t) |
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 56f2f2ea5631..30964ae2d096 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h | |||
@@ -65,7 +65,7 @@ static __inline__ void fn(unsigned long mask, \ | |||
65 | unsigned long *p = (unsigned long *)_p; \ | 65 | unsigned long *p = (unsigned long *)_p; \ |
66 | __asm__ __volatile__ ( \ | 66 | __asm__ __volatile__ ( \ |
67 | prefix \ | 67 | prefix \ |
68 | "1:" PPC_LLARX "%0,0,%3\n" \ | 68 | "1:" PPC_LLARX(%0,0,%3,0) "\n" \ |
69 | stringify_in_c(op) "%0,%0,%2\n" \ | 69 | stringify_in_c(op) "%0,%0,%2\n" \ |
70 | PPC405_ERR77(0,%3) \ | 70 | PPC405_ERR77(0,%3) \ |
71 | PPC_STLCX "%0,0,%3\n" \ | 71 | PPC_STLCX "%0,0,%3\n" \ |
@@ -78,7 +78,7 @@ static __inline__ void fn(unsigned long mask, \ | |||
78 | 78 | ||
79 | DEFINE_BITOP(set_bits, or, "", "") | 79 | DEFINE_BITOP(set_bits, or, "", "") |
80 | DEFINE_BITOP(clear_bits, andc, "", "") | 80 | DEFINE_BITOP(clear_bits, andc, "", "") |
81 | DEFINE_BITOP(clear_bits_unlock, andc, LWSYNC_ON_SMP, "") | 81 | DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER, "") |
82 | DEFINE_BITOP(change_bits, xor, "", "") | 82 | DEFINE_BITOP(change_bits, xor, "", "") |
83 | 83 | ||
84 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) | 84 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) |
@@ -103,31 +103,35 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr) | |||
103 | 103 | ||
104 | /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output | 104 | /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output |
105 | * operands. */ | 105 | * operands. */ |
106 | #define DEFINE_TESTOP(fn, op, prefix, postfix) \ | 106 | #define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \ |
107 | static __inline__ unsigned long fn( \ | 107 | static __inline__ unsigned long fn( \ |
108 | unsigned long mask, \ | 108 | unsigned long mask, \ |
109 | volatile unsigned long *_p) \ | 109 | volatile unsigned long *_p) \ |
110 | { \ | 110 | { \ |
111 | unsigned long old, t; \ | 111 | unsigned long old, t; \ |
112 | unsigned long *p = (unsigned long *)_p; \ | 112 | unsigned long *p = (unsigned long *)_p; \ |
113 | __asm__ __volatile__ ( \ | 113 | __asm__ __volatile__ ( \ |
114 | prefix \ | 114 | prefix \ |
115 | "1:" PPC_LLARX "%0,0,%3\n" \ | 115 | "1:" PPC_LLARX(%0,0,%3,eh) "\n" \ |
116 | stringify_in_c(op) "%1,%0,%2\n" \ | 116 | stringify_in_c(op) "%1,%0,%2\n" \ |
117 | PPC405_ERR77(0,%3) \ | 117 | PPC405_ERR77(0,%3) \ |
118 | PPC_STLCX "%1,0,%3\n" \ | 118 | PPC_STLCX "%1,0,%3\n" \ |
119 | "bne- 1b\n" \ | 119 | "bne- 1b\n" \ |
120 | postfix \ | 120 | postfix \ |
121 | : "=&r" (old), "=&r" (t) \ | 121 | : "=&r" (old), "=&r" (t) \ |
122 | : "r" (mask), "r" (p) \ | 122 | : "r" (mask), "r" (p) \ |
123 | : "cc", "memory"); \ | 123 | : "cc", "memory"); \ |
124 | return (old & mask); \ | 124 | return (old & mask); \ |
125 | } | 125 | } |
126 | 126 | ||
127 | DEFINE_TESTOP(test_and_set_bits, or, LWSYNC_ON_SMP, ISYNC_ON_SMP) | 127 | DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER, |
128 | DEFINE_TESTOP(test_and_set_bits_lock, or, "", ISYNC_ON_SMP) | 128 | PPC_ACQUIRE_BARRIER, 0) |
129 | DEFINE_TESTOP(test_and_clear_bits, andc, LWSYNC_ON_SMP, ISYNC_ON_SMP) | 129 | DEFINE_TESTOP(test_and_set_bits_lock, or, "", |
130 | DEFINE_TESTOP(test_and_change_bits, xor, LWSYNC_ON_SMP, ISYNC_ON_SMP) | 130 | PPC_ACQUIRE_BARRIER, 1) |
131 | DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER, | ||
132 | PPC_ACQUIRE_BARRIER, 0) | ||
133 | DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER, | ||
134 | PPC_ACQUIRE_BARRIER, 0) | ||
131 | 135 | ||
132 | static __inline__ int test_and_set_bit(unsigned long nr, | 136 | static __inline__ int test_and_set_bit(unsigned long nr, |
133 | volatile unsigned long *addr) | 137 | volatile unsigned long *addr) |
@@ -158,7 +162,7 @@ static __inline__ int test_and_change_bit(unsigned long nr, | |||
158 | 162 | ||
159 | static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) | 163 | static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) |
160 | { | 164 | { |
161 | __asm__ __volatile__(LWSYNC_ON_SMP "" ::: "memory"); | 165 | __asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory"); |
162 | __clear_bit(nr, addr); | 166 | __clear_bit(nr, addr); |
163 | } | 167 | } |
164 | 168 | ||
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 80f315e8a421..abb833b0e58f 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h | |||
@@ -381,9 +381,9 @@ extern const char *powerpc_base_platform; | |||
381 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) | 381 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) |
382 | 382 | ||
383 | /* 64-bit CPUs */ | 383 | /* 64-bit CPUs */ |
384 | #define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 384 | #define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | \ |
385 | CPU_FTR_IABR | CPU_FTR_PPC_LE) | 385 | CPU_FTR_IABR | CPU_FTR_PPC_LE) |
386 | #define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 386 | #define CPU_FTRS_RS64 (CPU_FTR_USE_TB | \ |
387 | CPU_FTR_IABR | \ | 387 | CPU_FTR_IABR | \ |
388 | CPU_FTR_MMCRA | CPU_FTR_CTRL) | 388 | CPU_FTR_MMCRA | CPU_FTR_CTRL) |
389 | #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 389 | #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 9696cc36d2dc..7c589ef81fb0 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | 12 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ |
13 | __asm__ __volatile ( \ | 13 | __asm__ __volatile ( \ |
14 | LWSYNC_ON_SMP \ | 14 | PPC_RELEASE_BARRIER \ |
15 | "1: lwarx %0,0,%2\n" \ | 15 | "1: lwarx %0,0,%2\n" \ |
16 | insn \ | 16 | insn \ |
17 | PPC405_ERR77(0, %2) \ | 17 | PPC405_ERR77(0, %2) \ |
@@ -90,14 +90,14 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | |||
90 | return -EFAULT; | 90 | return -EFAULT; |
91 | 91 | ||
92 | __asm__ __volatile__ ( | 92 | __asm__ __volatile__ ( |
93 | LWSYNC_ON_SMP | 93 | PPC_RELEASE_BARRIER |
94 | "1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ | 94 | "1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ |
95 | cmpw 0,%0,%3\n\ | 95 | cmpw 0,%0,%3\n\ |
96 | bne- 3f\n" | 96 | bne- 3f\n" |
97 | PPC405_ERR77(0,%2) | 97 | PPC405_ERR77(0,%2) |
98 | "2: stwcx. %4,0,%2\n\ | 98 | "2: stwcx. %4,0,%2\n\ |
99 | bne- 1b\n" | 99 | bne- 1b\n" |
100 | ISYNC_ON_SMP | 100 | PPC_ACQUIRE_BARRIER |
101 | "3: .section .fixup,\"ax\"\n\ | 101 | "3: .section .fixup,\"ax\"\n\ |
102 | 4: li %0,%5\n\ | 102 | 4: li %0,%5\n\ |
103 | b 3b\n\ | 103 | b 3b\n\ |
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index fb3c05a0cbbf..3147a2970125 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h | |||
@@ -1 +1,29 @@ | |||
1 | #include <asm-generic/hardirq.h> | 1 | #ifndef _ASM_POWERPC_HARDIRQ_H |
2 | #define _ASM_POWERPC_HARDIRQ_H | ||
3 | |||
4 | #include <linux/threads.h> | ||
5 | #include <linux/irq.h> | ||
6 | |||
7 | typedef struct { | ||
8 | unsigned int __softirq_pending; | ||
9 | unsigned int timer_irqs; | ||
10 | unsigned int pmu_irqs; | ||
11 | unsigned int mce_exceptions; | ||
12 | unsigned int spurious_irqs; | ||
13 | } ____cacheline_aligned irq_cpustat_t; | ||
14 | |||
15 | DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | ||
16 | |||
17 | #define __ARCH_IRQ_STAT | ||
18 | |||
19 | #define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending | ||
20 | |||
21 | static inline void ack_bad_irq(unsigned int irq) | ||
22 | { | ||
23 | printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); | ||
24 | } | ||
25 | |||
26 | extern u64 arch_irq_stat_cpu(unsigned int cpu); | ||
27 | #define arch_irq_stat_cpu arch_irq_stat_cpu | ||
28 | |||
29 | #endif /* _ASM_POWERPC_HARDIRQ_H */ | ||
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h index 84b457a3c1bc..ce58c80e1bcf 100644 --- a/arch/powerpc/include/asm/local.h +++ b/arch/powerpc/include/asm/local.h | |||
@@ -24,7 +24,7 @@ static __inline__ long local_add_return(long a, local_t *l) | |||
24 | long t; | 24 | long t; |
25 | 25 | ||
26 | __asm__ __volatile__( | 26 | __asm__ __volatile__( |
27 | "1:" PPC_LLARX "%0,0,%2 # local_add_return\n\ | 27 | "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\ |
28 | add %0,%1,%0\n" | 28 | add %0,%1,%0\n" |
29 | PPC405_ERR77(0,%2) | 29 | PPC405_ERR77(0,%2) |
30 | PPC_STLCX "%0,0,%2 \n\ | 30 | PPC_STLCX "%0,0,%2 \n\ |
@@ -43,7 +43,7 @@ static __inline__ long local_sub_return(long a, local_t *l) | |||
43 | long t; | 43 | long t; |
44 | 44 | ||
45 | __asm__ __volatile__( | 45 | __asm__ __volatile__( |
46 | "1:" PPC_LLARX "%0,0,%2 # local_sub_return\n\ | 46 | "1:" PPC_LLARX(%0,0,%2,0) " # local_sub_return\n\ |
47 | subf %0,%1,%0\n" | 47 | subf %0,%1,%0\n" |
48 | PPC405_ERR77(0,%2) | 48 | PPC405_ERR77(0,%2) |
49 | PPC_STLCX "%0,0,%2 \n\ | 49 | PPC_STLCX "%0,0,%2 \n\ |
@@ -60,7 +60,7 @@ static __inline__ long local_inc_return(local_t *l) | |||
60 | long t; | 60 | long t; |
61 | 61 | ||
62 | __asm__ __volatile__( | 62 | __asm__ __volatile__( |
63 | "1:" PPC_LLARX "%0,0,%1 # local_inc_return\n\ | 63 | "1:" PPC_LLARX(%0,0,%1,0) " # local_inc_return\n\ |
64 | addic %0,%0,1\n" | 64 | addic %0,%0,1\n" |
65 | PPC405_ERR77(0,%1) | 65 | PPC405_ERR77(0,%1) |
66 | PPC_STLCX "%0,0,%1 \n\ | 66 | PPC_STLCX "%0,0,%1 \n\ |
@@ -87,7 +87,7 @@ static __inline__ long local_dec_return(local_t *l) | |||
87 | long t; | 87 | long t; |
88 | 88 | ||
89 | __asm__ __volatile__( | 89 | __asm__ __volatile__( |
90 | "1:" PPC_LLARX "%0,0,%1 # local_dec_return\n\ | 90 | "1:" PPC_LLARX(%0,0,%1,0) " # local_dec_return\n\ |
91 | addic %0,%0,-1\n" | 91 | addic %0,%0,-1\n" |
92 | PPC405_ERR77(0,%1) | 92 | PPC405_ERR77(0,%1) |
93 | PPC_STLCX "%0,0,%1\n\ | 93 | PPC_STLCX "%0,0,%1\n\ |
@@ -117,7 +117,7 @@ static __inline__ int local_add_unless(local_t *l, long a, long u) | |||
117 | long t; | 117 | long t; |
118 | 118 | ||
119 | __asm__ __volatile__ ( | 119 | __asm__ __volatile__ ( |
120 | "1:" PPC_LLARX "%0,0,%1 # local_add_unless\n\ | 120 | "1:" PPC_LLARX(%0,0,%1,0) " # local_add_unless\n\ |
121 | cmpw 0,%0,%3 \n\ | 121 | cmpw 0,%0,%3 \n\ |
122 | beq- 2f \n\ | 122 | beq- 2f \n\ |
123 | add %0,%2,%0 \n" | 123 | add %0,%2,%0 \n" |
@@ -147,7 +147,7 @@ static __inline__ long local_dec_if_positive(local_t *l) | |||
147 | long t; | 147 | long t; |
148 | 148 | ||
149 | __asm__ __volatile__( | 149 | __asm__ __volatile__( |
150 | "1:" PPC_LLARX "%0,0,%1 # local_dec_if_positive\n\ | 150 | "1:" PPC_LLARX(%0,0,%1,0) " # local_dec_if_positive\n\ |
151 | cmpwi %0,1\n\ | 151 | cmpwi %0,1\n\ |
152 | addi %0,%0,-1\n\ | 152 | addi %0,%0,-1\n\ |
153 | blt- 2f\n" | 153 | blt- 2f\n" |
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h index dabc01c727b8..5399f7e18102 100644 --- a/arch/powerpc/include/asm/mutex.h +++ b/arch/powerpc/include/asm/mutex.h | |||
@@ -15,7 +15,7 @@ static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new) | |||
15 | PPC405_ERR77(0,%1) | 15 | PPC405_ERR77(0,%1) |
16 | " stwcx. %3,0,%1\n\ | 16 | " stwcx. %3,0,%1\n\ |
17 | bne- 1b" | 17 | bne- 1b" |
18 | ISYNC_ON_SMP | 18 | PPC_ACQUIRE_BARRIER |
19 | "\n\ | 19 | "\n\ |
20 | 2:" | 20 | 2:" |
21 | : "=&r" (t) | 21 | : "=&r" (t) |
@@ -35,7 +35,7 @@ static inline int __mutex_dec_return_lock(atomic_t *v) | |||
35 | PPC405_ERR77(0,%1) | 35 | PPC405_ERR77(0,%1) |
36 | " stwcx. %0,0,%1\n\ | 36 | " stwcx. %0,0,%1\n\ |
37 | bne- 1b" | 37 | bne- 1b" |
38 | ISYNC_ON_SMP | 38 | PPC_ACQUIRE_BARRIER |
39 | : "=&r" (t) | 39 | : "=&r" (t) |
40 | : "r" (&v->counter) | 40 | : "r" (&v->counter) |
41 | : "cc", "memory"); | 41 | : "cc", "memory"); |
@@ -48,7 +48,7 @@ static inline int __mutex_inc_return_unlock(atomic_t *v) | |||
48 | int t; | 48 | int t; |
49 | 49 | ||
50 | __asm__ __volatile__( | 50 | __asm__ __volatile__( |
51 | LWSYNC_ON_SMP | 51 | PPC_RELEASE_BARRIER |
52 | "1: lwarx %0,0,%1 # mutex unlock\n\ | 52 | "1: lwarx %0,0,%1 # mutex unlock\n\ |
53 | addic %0,%0,1\n" | 53 | addic %0,%0,1\n" |
54 | PPC405_ERR77(0,%1) | 54 | PPC405_ERR77(0,%1) |
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index ef9aa84cac5a..aea714797590 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -22,8 +22,10 @@ | |||
22 | #define PPC_INST_DCBZL 0x7c2007ec | 22 | #define PPC_INST_DCBZL 0x7c2007ec |
23 | #define PPC_INST_ISEL 0x7c00001e | 23 | #define PPC_INST_ISEL 0x7c00001e |
24 | #define PPC_INST_ISEL_MASK 0xfc00003e | 24 | #define PPC_INST_ISEL_MASK 0xfc00003e |
25 | #define PPC_INST_LDARX 0x7c0000a8 | ||
25 | #define PPC_INST_LSWI 0x7c0004aa | 26 | #define PPC_INST_LSWI 0x7c0004aa |
26 | #define PPC_INST_LSWX 0x7c00042a | 27 | #define PPC_INST_LSWX 0x7c00042a |
28 | #define PPC_INST_LWARX 0x7c000029 | ||
27 | #define PPC_INST_LWSYNC 0x7c2004ac | 29 | #define PPC_INST_LWSYNC 0x7c2004ac |
28 | #define PPC_INST_LXVD2X 0x7c000698 | 30 | #define PPC_INST_LXVD2X 0x7c000698 |
29 | #define PPC_INST_MCRXR 0x7c000400 | 31 | #define PPC_INST_MCRXR 0x7c000400 |
@@ -55,15 +57,31 @@ | |||
55 | #define __PPC_RA(a) (((a) & 0x1f) << 16) | 57 | #define __PPC_RA(a) (((a) & 0x1f) << 16) |
56 | #define __PPC_RB(b) (((b) & 0x1f) << 11) | 58 | #define __PPC_RB(b) (((b) & 0x1f) << 11) |
57 | #define __PPC_RS(s) (((s) & 0x1f) << 21) | 59 | #define __PPC_RS(s) (((s) & 0x1f) << 21) |
60 | #define __PPC_RT(s) __PPC_RS(s) | ||
58 | #define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) | 61 | #define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) |
59 | #define __PPC_T_TLB(t) (((t) & 0x3) << 21) | 62 | #define __PPC_T_TLB(t) (((t) & 0x3) << 21) |
60 | #define __PPC_WC(w) (((w) & 0x3) << 21) | 63 | #define __PPC_WC(w) (((w) & 0x3) << 21) |
64 | /* | ||
65 | * Only use the larx hint bit on 64bit CPUs. Once we verify it doesn't have | ||
66 | * any side effects on all 32bit processors, we can do this all the time. | ||
67 | */ | ||
68 | #ifdef CONFIG_PPC64 | ||
69 | #define __PPC_EH(eh) (((eh) & 0x1) << 0) | ||
70 | #else | ||
71 | #define __PPC_EH(eh) 0 | ||
72 | #endif | ||
61 | 73 | ||
62 | /* Deal with instructions that older assemblers aren't aware of */ | 74 | /* Deal with instructions that older assemblers aren't aware of */ |
63 | #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ | 75 | #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ |
64 | __PPC_RA(a) | __PPC_RB(b)) | 76 | __PPC_RA(a) | __PPC_RB(b)) |
65 | #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \ | 77 | #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \ |
66 | __PPC_RA(a) | __PPC_RB(b)) | 78 | __PPC_RA(a) | __PPC_RB(b)) |
79 | #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ | ||
80 | __PPC_RT(t) | __PPC_RA(a) | \ | ||
81 | __PPC_RB(b) | __PPC_EH(eh)) | ||
82 | #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ | ||
83 | __PPC_RT(t) | __PPC_RA(a) | \ | ||
84 | __PPC_RB(b) | __PPC_EH(eh)) | ||
67 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ | 85 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ |
68 | __PPC_RB(b)) | 86 | __PPC_RB(b)) |
69 | #define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI) | 87 | #define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI) |
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index 2828f9d0f66d..42fdff0e4b32 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h | |||
@@ -137,6 +137,11 @@ struct device_node * find_device_pe(struct device_node *dn); | |||
137 | void eeh_sysfs_add_device(struct pci_dev *pdev); | 137 | void eeh_sysfs_add_device(struct pci_dev *pdev); |
138 | void eeh_sysfs_remove_device(struct pci_dev *pdev); | 138 | void eeh_sysfs_remove_device(struct pci_dev *pdev); |
139 | 139 | ||
140 | static inline const char *eeh_pci_name(struct pci_dev *pdev) | ||
141 | { | ||
142 | return pdev ? pci_name(pdev) : "<null>"; | ||
143 | } | ||
144 | |||
140 | #endif /* CONFIG_EEH */ | 145 | #endif /* CONFIG_EEH */ |
141 | 146 | ||
142 | #else /* CONFIG_PCI */ | 147 | #else /* CONFIG_PCI */ |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 9eed29eee604..221ba6240464 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -161,9 +161,41 @@ struct thread_struct { | |||
161 | #ifdef CONFIG_PPC32 | 161 | #ifdef CONFIG_PPC32 |
162 | void *pgdir; /* root of page-table tree */ | 162 | void *pgdir; /* root of page-table tree */ |
163 | #endif | 163 | #endif |
164 | #if defined(CONFIG_4xx) || defined (CONFIG_BOOKE) | 164 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
165 | unsigned long dbcr0; /* debug control register values */ | 165 | /* |
166 | * The following help to manage the use of Debug Control Registers | ||
167 | * om the BookE platforms. | ||
168 | */ | ||
169 | unsigned long dbcr0; | ||
166 | unsigned long dbcr1; | 170 | unsigned long dbcr1; |
171 | #ifdef CONFIG_BOOKE | ||
172 | unsigned long dbcr2; | ||
173 | #endif | ||
174 | /* | ||
175 | * The stored value of the DBSR register will be the value at the | ||
176 | * last debug interrupt. This register can only be read from the | ||
177 | * user (will never be written to) and has value while helping to | ||
178 | * describe the reason for the last debug trap. Torez | ||
179 | */ | ||
180 | unsigned long dbsr; | ||
181 | /* | ||
182 | * The following will contain addresses used by debug applications | ||
183 | * to help trace and trap on particular address locations. | ||
184 | * The bits in the Debug Control Registers above help define which | ||
185 | * of the following registers will contain valid data and/or addresses. | ||
186 | */ | ||
187 | unsigned long iac1; | ||
188 | unsigned long iac2; | ||
189 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
190 | unsigned long iac3; | ||
191 | unsigned long iac4; | ||
192 | #endif | ||
193 | unsigned long dac1; | ||
194 | unsigned long dac2; | ||
195 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
196 | unsigned long dvc1; | ||
197 | unsigned long dvc2; | ||
198 | #endif | ||
167 | #endif | 199 | #endif |
168 | /* FP and VSX 0-31 register set */ | 200 | /* FP and VSX 0-31 register set */ |
169 | double fpr[32][TS_FPRWIDTH]; | 201 | double fpr[32][TS_FPRWIDTH]; |
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index cbd759e3cd78..b45108126562 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
@@ -24,6 +24,12 @@ | |||
24 | * 2 of the License, or (at your option) any later version. | 24 | * 2 of the License, or (at your option) any later version. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #ifdef __KERNEL__ | ||
28 | #include <linux/types.h> | ||
29 | #else | ||
30 | #include <stdint.h> | ||
31 | #endif | ||
32 | |||
27 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
28 | 34 | ||
29 | struct pt_regs { | 35 | struct pt_regs { |
@@ -294,4 +300,75 @@ extern void user_disable_single_step(struct task_struct *); | |||
294 | 300 | ||
295 | #define PTRACE_SINGLEBLOCK 0x100 /* resume execution until next branch */ | 301 | #define PTRACE_SINGLEBLOCK 0x100 /* resume execution until next branch */ |
296 | 302 | ||
303 | #define PPC_PTRACE_GETHWDBGINFO 0x89 | ||
304 | #define PPC_PTRACE_SETHWDEBUG 0x88 | ||
305 | #define PPC_PTRACE_DELHWDEBUG 0x87 | ||
306 | |||
307 | #ifndef __ASSEMBLY__ | ||
308 | |||
309 | struct ppc_debug_info { | ||
310 | uint32_t version; /* Only version 1 exists to date */ | ||
311 | uint32_t num_instruction_bps; | ||
312 | uint32_t num_data_bps; | ||
313 | uint32_t num_condition_regs; | ||
314 | uint32_t data_bp_alignment; | ||
315 | uint32_t sizeof_condition; /* size of the DVC register */ | ||
316 | uint64_t features; | ||
317 | }; | ||
318 | |||
319 | #endif /* __ASSEMBLY__ */ | ||
320 | |||
321 | /* | ||
322 | * features will have bits indication whether there is support for: | ||
323 | */ | ||
324 | #define PPC_DEBUG_FEATURE_INSN_BP_RANGE 0x0000000000000001 | ||
325 | #define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x0000000000000002 | ||
326 | #define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x0000000000000004 | ||
327 | #define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x0000000000000008 | ||
328 | |||
329 | #ifndef __ASSEMBLY__ | ||
330 | |||
331 | struct ppc_hw_breakpoint { | ||
332 | uint32_t version; /* currently, version must be 1 */ | ||
333 | uint32_t trigger_type; /* only some combinations allowed */ | ||
334 | uint32_t addr_mode; /* address match mode */ | ||
335 | uint32_t condition_mode; /* break/watchpoint condition flags */ | ||
336 | uint64_t addr; /* break/watchpoint address */ | ||
337 | uint64_t addr2; /* range end or mask */ | ||
338 | uint64_t condition_value; /* contents of the DVC register */ | ||
339 | }; | ||
340 | |||
341 | #endif /* __ASSEMBLY__ */ | ||
342 | |||
343 | /* | ||
344 | * Trigger Type | ||
345 | */ | ||
346 | #define PPC_BREAKPOINT_TRIGGER_EXECUTE 0x00000001 | ||
347 | #define PPC_BREAKPOINT_TRIGGER_READ 0x00000002 | ||
348 | #define PPC_BREAKPOINT_TRIGGER_WRITE 0x00000004 | ||
349 | #define PPC_BREAKPOINT_TRIGGER_RW \ | ||
350 | (PPC_BREAKPOINT_TRIGGER_READ | PPC_BREAKPOINT_TRIGGER_WRITE) | ||
351 | |||
352 | /* | ||
353 | * Address Mode | ||
354 | */ | ||
355 | #define PPC_BREAKPOINT_MODE_EXACT 0x00000000 | ||
356 | #define PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE 0x00000001 | ||
357 | #define PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE 0x00000002 | ||
358 | #define PPC_BREAKPOINT_MODE_MASK 0x00000003 | ||
359 | |||
360 | /* | ||
361 | * Condition Mode | ||
362 | */ | ||
363 | #define PPC_BREAKPOINT_CONDITION_MODE 0x00000003 | ||
364 | #define PPC_BREAKPOINT_CONDITION_NONE 0x00000000 | ||
365 | #define PPC_BREAKPOINT_CONDITION_AND 0x00000001 | ||
366 | #define PPC_BREAKPOINT_CONDITION_EXACT PPC_BREAKPOINT_CONDITION_AND | ||
367 | #define PPC_BREAKPOINT_CONDITION_OR 0x00000002 | ||
368 | #define PPC_BREAKPOINT_CONDITION_AND_OR 0x00000003 | ||
369 | #define PPC_BREAKPOINT_CONDITION_BE_ALL 0x00ff0000 | ||
370 | #define PPC_BREAKPOINT_CONDITION_BE_SHIFT 16 | ||
371 | #define PPC_BREAKPOINT_CONDITION_BE(n) \ | ||
372 | (1<<((n)+PPC_BREAKPOINT_CONDITION_BE_SHIFT)) | ||
373 | |||
297 | #endif /* _ASM_POWERPC_PTRACE_H */ | 374 | #endif /* _ASM_POWERPC_PTRACE_H */ |
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 3bf783505528..8808d307fe7e 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h | |||
@@ -248,6 +248,8 @@ | |||
248 | #define DBSR_RET 0x00008000 /* Return Debug Event */ | 248 | #define DBSR_RET 0x00008000 /* Return Debug Event */ |
249 | #define DBSR_CIRPT 0x00000040 /* Critical Interrupt Taken Event */ | 249 | #define DBSR_CIRPT 0x00000040 /* Critical Interrupt Taken Event */ |
250 | #define DBSR_CRET 0x00000020 /* Critical Return Debug Event */ | 250 | #define DBSR_CRET 0x00000020 /* Critical Return Debug Event */ |
251 | #define DBSR_IAC12ATS 0x00000002 /* Instr Address Compare 1/2 Toggle */ | ||
252 | #define DBSR_IAC34ATS 0x00000001 /* Instr Address Compare 3/4 Toggle */ | ||
251 | #endif | 253 | #endif |
252 | #ifdef CONFIG_40x | 254 | #ifdef CONFIG_40x |
253 | #define DBSR_IC 0x80000000 /* Instruction Completion */ | 255 | #define DBSR_IC 0x80000000 /* Instruction Completion */ |
@@ -313,6 +315,38 @@ | |||
313 | #define DBCR0_IA12T 0x00008000 /* Instr Addr 1-2 range Toggle */ | 315 | #define DBCR0_IA12T 0x00008000 /* Instr Addr 1-2 range Toggle */ |
314 | #define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ | 316 | #define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ |
315 | #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ | 317 | #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ |
318 | |||
319 | #define dbcr_iac_range(task) ((task)->thread.dbcr0) | ||
320 | #define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */ | ||
321 | #define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */ | ||
322 | #define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */ | ||
323 | #define DBCR_IAC34I DBCR0_IA34 /* Range Inclusive */ | ||
324 | #define DBCR_IAC34X (DBCR0_IA34 | DBCR0_IA34X) /* Range Exclusive */ | ||
325 | #define DBCR_IAC34MODE (DBCR0_IA34 | DBCR0_IA34X) /* IAC 3-4 Mode Bits */ | ||
326 | |||
327 | /* Bit definitions related to the DBCR1. */ | ||
328 | #define DBCR1_DAC1R 0x80000000 /* DAC1 Read Debug Event */ | ||
329 | #define DBCR1_DAC2R 0x40000000 /* DAC2 Read Debug Event */ | ||
330 | #define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */ | ||
331 | #define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */ | ||
332 | |||
333 | #define dbcr_dac(task) ((task)->thread.dbcr1) | ||
334 | #define DBCR_DAC1R DBCR1_DAC1R | ||
335 | #define DBCR_DAC1W DBCR1_DAC1W | ||
336 | #define DBCR_DAC2R DBCR1_DAC2R | ||
337 | #define DBCR_DAC2W DBCR1_DAC2W | ||
338 | |||
339 | /* | ||
340 | * Are there any active Debug Events represented in the | ||
341 | * Debug Control Registers? | ||
342 | */ | ||
343 | #define DBCR0_ACTIVE_EVENTS (DBCR0_ICMP | DBCR0_IAC1 | DBCR0_IAC2 | \ | ||
344 | DBCR0_IAC3 | DBCR0_IAC4) | ||
345 | #define DBCR1_ACTIVE_EVENTS (DBCR1_DAC1R | DBCR1_DAC2R | \ | ||
346 | DBCR1_DAC1W | DBCR1_DAC2W) | ||
347 | #define DBCR_ACTIVE_EVENTS(dbcr0, dbcr1) (((dbcr0) & DBCR0_ACTIVE_EVENTS) || \ | ||
348 | ((dbcr1) & DBCR1_ACTIVE_EVENTS)) | ||
349 | |||
316 | #elif defined(CONFIG_BOOKE) | 350 | #elif defined(CONFIG_BOOKE) |
317 | #define DBCR0_EDM 0x80000000 /* External Debug Mode */ | 351 | #define DBCR0_EDM 0x80000000 /* External Debug Mode */ |
318 | #define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ | 352 | #define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ |
@@ -342,19 +376,79 @@ | |||
342 | #define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */ | 376 | #define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */ |
343 | #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ | 377 | #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ |
344 | 378 | ||
379 | #define dbcr_dac(task) ((task)->thread.dbcr0) | ||
380 | #define DBCR_DAC1R DBCR0_DAC1R | ||
381 | #define DBCR_DAC1W DBCR0_DAC1W | ||
382 | #define DBCR_DAC2R DBCR0_DAC2R | ||
383 | #define DBCR_DAC2W DBCR0_DAC2W | ||
384 | |||
345 | /* Bit definitions related to the DBCR1. */ | 385 | /* Bit definitions related to the DBCR1. */ |
386 | #define DBCR1_IAC1US 0xC0000000 /* Instr Addr Cmp 1 Sup/User */ | ||
387 | #define DBCR1_IAC1ER 0x30000000 /* Instr Addr Cmp 1 Eff/Real */ | ||
388 | #define DBCR1_IAC1ER_01 0x10000000 /* reserved */ | ||
389 | #define DBCR1_IAC1ER_10 0x20000000 /* Instr Addr Cmp 1 Eff/Real MSR[IS]=0 */ | ||
390 | #define DBCR1_IAC1ER_11 0x30000000 /* Instr Addr Cmp 1 Eff/Real MSR[IS]=1 */ | ||
391 | #define DBCR1_IAC2US 0x0C000000 /* Instr Addr Cmp 2 Sup/User */ | ||
392 | #define DBCR1_IAC2ER 0x03000000 /* Instr Addr Cmp 2 Eff/Real */ | ||
393 | #define DBCR1_IAC2ER_01 0x01000000 /* reserved */ | ||
394 | #define DBCR1_IAC2ER_10 0x02000000 /* Instr Addr Cmp 2 Eff/Real MSR[IS]=0 */ | ||
395 | #define DBCR1_IAC2ER_11 0x03000000 /* Instr Addr Cmp 2 Eff/Real MSR[IS]=1 */ | ||
346 | #define DBCR1_IAC12M 0x00800000 /* Instr Addr 1-2 range enable */ | 396 | #define DBCR1_IAC12M 0x00800000 /* Instr Addr 1-2 range enable */ |
347 | #define DBCR1_IAC12MX 0x00C00000 /* Instr Addr 1-2 range eXclusive */ | 397 | #define DBCR1_IAC12MX 0x00C00000 /* Instr Addr 1-2 range eXclusive */ |
348 | #define DBCR1_IAC12AT 0x00010000 /* Instr Addr 1-2 range Toggle */ | 398 | #define DBCR1_IAC12AT 0x00010000 /* Instr Addr 1-2 range Toggle */ |
399 | #define DBCR1_IAC3US 0x0000C000 /* Instr Addr Cmp 3 Sup/User */ | ||
400 | #define DBCR1_IAC3ER 0x00003000 /* Instr Addr Cmp 3 Eff/Real */ | ||
401 | #define DBCR1_IAC3ER_01 0x00001000 /* reserved */ | ||
402 | #define DBCR1_IAC3ER_10 0x00002000 /* Instr Addr Cmp 3 Eff/Real MSR[IS]=0 */ | ||
403 | #define DBCR1_IAC3ER_11 0x00003000 /* Instr Addr Cmp 3 Eff/Real MSR[IS]=1 */ | ||
404 | #define DBCR1_IAC4US 0x00000C00 /* Instr Addr Cmp 4 Sup/User */ | ||
405 | #define DBCR1_IAC4ER 0x00000300 /* Instr Addr Cmp 4 Eff/Real */ | ||
406 | #define DBCR1_IAC4ER_01 0x00000100 /* Instr Addr Cmp 4 Eff/Real MSR[IS]=0 */ | ||
407 | #define DBCR1_IAC4ER_10 0x00000200 /* Instr Addr Cmp 4 Eff/Real MSR[IS]=0 */ | ||
408 | #define DBCR1_IAC4ER_11 0x00000300 /* Instr Addr Cmp 4 Eff/Real MSR[IS]=1 */ | ||
349 | #define DBCR1_IAC34M 0x00000080 /* Instr Addr 3-4 range enable */ | 409 | #define DBCR1_IAC34M 0x00000080 /* Instr Addr 3-4 range enable */ |
350 | #define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */ | 410 | #define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */ |
351 | #define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */ | 411 | #define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */ |
352 | 412 | ||
413 | #define dbcr_iac_range(task) ((task)->thread.dbcr1) | ||
414 | #define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */ | ||
415 | #define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */ | ||
416 | #define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */ | ||
417 | #define DBCR_IAC34I DBCR1_IAC34M /* Range Inclusive */ | ||
418 | #define DBCR_IAC34X DBCR1_IAC34MX /* Range Exclusive */ | ||
419 | #define DBCR_IAC34MODE DBCR1_IAC34MX /* IAC 3-4 Mode Bits */ | ||
420 | |||
353 | /* Bit definitions related to the DBCR2. */ | 421 | /* Bit definitions related to the DBCR2. */ |
422 | #define DBCR2_DAC1US 0xC0000000 /* Data Addr Cmp 1 Sup/User */ | ||
423 | #define DBCR2_DAC1ER 0x30000000 /* Data Addr Cmp 1 Eff/Real */ | ||
424 | #define DBCR2_DAC2US 0x00000000 /* Data Addr Cmp 2 Sup/User */ | ||
425 | #define DBCR2_DAC2ER 0x00000000 /* Data Addr Cmp 2 Eff/Real */ | ||
354 | #define DBCR2_DAC12M 0x00800000 /* DAC 1-2 range enable */ | 426 | #define DBCR2_DAC12M 0x00800000 /* DAC 1-2 range enable */ |
427 | #define DBCR2_DAC12MM 0x00400000 /* DAC 1-2 Mask mode*/ | ||
355 | #define DBCR2_DAC12MX 0x00C00000 /* DAC 1-2 range eXclusive */ | 428 | #define DBCR2_DAC12MX 0x00C00000 /* DAC 1-2 range eXclusive */ |
429 | #define DBCR2_DAC12MODE 0x00C00000 /* DAC 1-2 Mode Bits */ | ||
356 | #define DBCR2_DAC12A 0x00200000 /* DAC 1-2 Asynchronous */ | 430 | #define DBCR2_DAC12A 0x00200000 /* DAC 1-2 Asynchronous */ |
357 | #endif | 431 | #define DBCR2_DVC1M 0x000C0000 /* Data Value Comp 1 Mode */ |
432 | #define DBCR2_DVC1M_SHIFT 18 /* # of bits to shift DBCR2_DVC1M */ | ||
433 | #define DBCR2_DVC2M 0x00030000 /* Data Value Comp 2 Mode */ | ||
434 | #define DBCR2_DVC2M_SHIFT 16 /* # of bits to shift DBCR2_DVC2M */ | ||
435 | #define DBCR2_DVC1BE 0x00000F00 /* Data Value Comp 1 Byte */ | ||
436 | #define DBCR2_DVC1BE_SHIFT 8 /* # of bits to shift DBCR2_DVC1BE */ | ||
437 | #define DBCR2_DVC2BE 0x0000000F /* Data Value Comp 2 Byte */ | ||
438 | #define DBCR2_DVC2BE_SHIFT 0 /* # of bits to shift DBCR2_DVC2BE */ | ||
439 | |||
440 | /* | ||
441 | * Are there any active Debug Events represented in the | ||
442 | * Debug Control Registers? | ||
443 | */ | ||
444 | #define DBCR0_ACTIVE_EVENTS (DBCR0_ICMP | DBCR0_IAC1 | DBCR0_IAC2 | \ | ||
445 | DBCR0_IAC3 | DBCR0_IAC4 | DBCR0_DAC1R | \ | ||
446 | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W) | ||
447 | #define DBCR1_ACTIVE_EVENTS 0 | ||
448 | |||
449 | #define DBCR_ACTIVE_EVENTS(dbcr0, dbcr1) (((dbcr0) & DBCR0_ACTIVE_EVENTS) || \ | ||
450 | ((dbcr1) & DBCR1_ACTIVE_EVENTS)) | ||
451 | #endif /* #elif defined(CONFIG_BOOKE) */ | ||
358 | 452 | ||
359 | /* Bit definitions related to the TCR. */ | 453 | /* Bit definitions related to the TCR. */ |
360 | #define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */ | 454 | #define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */ |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 764094cff681..f9611bd69ed2 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #endif | 27 | #endif |
28 | #include <asm/asm-compat.h> | 28 | #include <asm/asm-compat.h> |
29 | #include <asm/synch.h> | 29 | #include <asm/synch.h> |
30 | #include <asm/ppc-opcode.h> | ||
30 | 31 | ||
31 | #define arch_spin_is_locked(x) ((x)->slock != 0) | 32 | #define arch_spin_is_locked(x) ((x)->slock != 0) |
32 | 33 | ||
@@ -60,13 +61,14 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) | |||
60 | 61 | ||
61 | token = LOCK_TOKEN; | 62 | token = LOCK_TOKEN; |
62 | __asm__ __volatile__( | 63 | __asm__ __volatile__( |
63 | "1: lwarx %0,0,%2\n\ | 64 | "1: " PPC_LWARX(%0,0,%2,1) "\n\ |
64 | cmpwi 0,%0,0\n\ | 65 | cmpwi 0,%0,0\n\ |
65 | bne- 2f\n\ | 66 | bne- 2f\n\ |
66 | stwcx. %1,0,%2\n\ | 67 | stwcx. %1,0,%2\n\ |
67 | bne- 1b\n\ | 68 | bne- 1b\n" |
68 | isync\n\ | 69 | PPC_ACQUIRE_BARRIER |
69 | 2:" : "=&r" (tmp) | 70 | "2:" |
71 | : "=&r" (tmp) | ||
70 | : "r" (token), "r" (&lock->slock) | 72 | : "r" (token), "r" (&lock->slock) |
71 | : "cr0", "memory"); | 73 | : "cr0", "memory"); |
72 | 74 | ||
@@ -144,7 +146,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
144 | { | 146 | { |
145 | SYNC_IO; | 147 | SYNC_IO; |
146 | __asm__ __volatile__("# arch_spin_unlock\n\t" | 148 | __asm__ __volatile__("# arch_spin_unlock\n\t" |
147 | LWSYNC_ON_SMP: : :"memory"); | 149 | PPC_RELEASE_BARRIER: : :"memory"); |
148 | lock->slock = 0; | 150 | lock->slock = 0; |
149 | } | 151 | } |
150 | 152 | ||
@@ -186,15 +188,15 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw) | |||
186 | long tmp; | 188 | long tmp; |
187 | 189 | ||
188 | __asm__ __volatile__( | 190 | __asm__ __volatile__( |
189 | "1: lwarx %0,0,%1\n" | 191 | "1: " PPC_LWARX(%0,0,%1,1) "\n" |
190 | __DO_SIGN_EXTEND | 192 | __DO_SIGN_EXTEND |
191 | " addic. %0,%0,1\n\ | 193 | " addic. %0,%0,1\n\ |
192 | ble- 2f\n" | 194 | ble- 2f\n" |
193 | PPC405_ERR77(0,%1) | 195 | PPC405_ERR77(0,%1) |
194 | " stwcx. %0,0,%1\n\ | 196 | " stwcx. %0,0,%1\n\ |
195 | bne- 1b\n\ | 197 | bne- 1b\n" |
196 | isync\n\ | 198 | PPC_ACQUIRE_BARRIER |
197 | 2:" : "=&r" (tmp) | 199 | "2:" : "=&r" (tmp) |
198 | : "r" (&rw->lock) | 200 | : "r" (&rw->lock) |
199 | : "cr0", "xer", "memory"); | 201 | : "cr0", "xer", "memory"); |
200 | 202 | ||
@@ -211,14 +213,14 @@ static inline long __arch_write_trylock(arch_rwlock_t *rw) | |||
211 | 213 | ||
212 | token = WRLOCK_TOKEN; | 214 | token = WRLOCK_TOKEN; |
213 | __asm__ __volatile__( | 215 | __asm__ __volatile__( |
214 | "1: lwarx %0,0,%2\n\ | 216 | "1: " PPC_LWARX(%0,0,%2,1) "\n\ |
215 | cmpwi 0,%0,0\n\ | 217 | cmpwi 0,%0,0\n\ |
216 | bne- 2f\n" | 218 | bne- 2f\n" |
217 | PPC405_ERR77(0,%1) | 219 | PPC405_ERR77(0,%1) |
218 | " stwcx. %1,0,%2\n\ | 220 | " stwcx. %1,0,%2\n\ |
219 | bne- 1b\n\ | 221 | bne- 1b\n" |
220 | isync\n\ | 222 | PPC_ACQUIRE_BARRIER |
221 | 2:" : "=&r" (tmp) | 223 | "2:" : "=&r" (tmp) |
222 | : "r" (token), "r" (&rw->lock) | 224 | : "r" (token), "r" (&rw->lock) |
223 | : "cr0", "memory"); | 225 | : "cr0", "memory"); |
224 | 226 | ||
@@ -269,7 +271,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
269 | 271 | ||
270 | __asm__ __volatile__( | 272 | __asm__ __volatile__( |
271 | "# read_unlock\n\t" | 273 | "# read_unlock\n\t" |
272 | LWSYNC_ON_SMP | 274 | PPC_RELEASE_BARRIER |
273 | "1: lwarx %0,0,%1\n\ | 275 | "1: lwarx %0,0,%1\n\ |
274 | addic %0,%0,-1\n" | 276 | addic %0,%0,-1\n" |
275 | PPC405_ERR77(0,%1) | 277 | PPC405_ERR77(0,%1) |
@@ -283,7 +285,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
283 | static inline void arch_write_unlock(arch_rwlock_t *rw) | 285 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
284 | { | 286 | { |
285 | __asm__ __volatile__("# write_unlock\n\t" | 287 | __asm__ __volatile__("# write_unlock\n\t" |
286 | LWSYNC_ON_SMP: : :"memory"); | 288 | PPC_RELEASE_BARRIER: : :"memory"); |
287 | rw->lock = 0; | 289 | rw->lock = 0; |
288 | } | 290 | } |
289 | 291 | ||
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index 28f6ddbff4cf..d7cab44643c5 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h | |||
@@ -37,11 +37,15 @@ static inline void isync(void) | |||
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | #ifdef CONFIG_SMP | 39 | #ifdef CONFIG_SMP |
40 | #define ISYNC_ON_SMP "\n\tisync\n" | 40 | #define __PPC_ACQUIRE_BARRIER \ |
41 | #define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n" | 41 | START_LWSYNC_SECTION(97); \ |
42 | isync; \ | ||
43 | MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); | ||
44 | #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) | ||
45 | #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" | ||
42 | #else | 46 | #else |
43 | #define ISYNC_ON_SMP | 47 | #define PPC_ACQUIRE_BARRIER |
44 | #define LWSYNC_ON_SMP | 48 | #define PPC_RELEASE_BARRIER |
45 | #endif | 49 | #endif |
46 | 50 | ||
47 | #endif /* __KERNEL__ */ | 51 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index bb8e006a47c6..a6297c67c3d6 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h | |||
@@ -112,8 +112,13 @@ static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | |||
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | extern int set_dabr(unsigned long dabr); | 114 | extern int set_dabr(unsigned long dabr); |
115 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
116 | extern void do_send_trap(struct pt_regs *regs, unsigned long address, | ||
117 | unsigned long error_code, int signal_code, int brkpt); | ||
118 | #else | ||
115 | extern void do_dabr(struct pt_regs *regs, unsigned long address, | 119 | extern void do_dabr(struct pt_regs *regs, unsigned long address, |
116 | unsigned long error_code); | 120 | unsigned long error_code); |
121 | #endif | ||
117 | extern void print_backtrace(unsigned long *); | 122 | extern void print_backtrace(unsigned long *); |
118 | extern void show_regs(struct pt_regs * regs); | 123 | extern void show_regs(struct pt_regs * regs); |
119 | extern void flush_instruction_cache(void); | 124 | extern void flush_instruction_cache(void); |
@@ -232,12 +237,12 @@ __xchg_u32(volatile void *p, unsigned long val) | |||
232 | unsigned long prev; | 237 | unsigned long prev; |
233 | 238 | ||
234 | __asm__ __volatile__( | 239 | __asm__ __volatile__( |
235 | LWSYNC_ON_SMP | 240 | PPC_RELEASE_BARRIER |
236 | "1: lwarx %0,0,%2 \n" | 241 | "1: lwarx %0,0,%2 \n" |
237 | PPC405_ERR77(0,%2) | 242 | PPC405_ERR77(0,%2) |
238 | " stwcx. %3,0,%2 \n\ | 243 | " stwcx. %3,0,%2 \n\ |
239 | bne- 1b" | 244 | bne- 1b" |
240 | ISYNC_ON_SMP | 245 | PPC_ACQUIRE_BARRIER |
241 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) | 246 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) |
242 | : "r" (p), "r" (val) | 247 | : "r" (p), "r" (val) |
243 | : "cc", "memory"); | 248 | : "cc", "memory"); |
@@ -275,12 +280,12 @@ __xchg_u64(volatile void *p, unsigned long val) | |||
275 | unsigned long prev; | 280 | unsigned long prev; |
276 | 281 | ||
277 | __asm__ __volatile__( | 282 | __asm__ __volatile__( |
278 | LWSYNC_ON_SMP | 283 | PPC_RELEASE_BARRIER |
279 | "1: ldarx %0,0,%2 \n" | 284 | "1: ldarx %0,0,%2 \n" |
280 | PPC405_ERR77(0,%2) | 285 | PPC405_ERR77(0,%2) |
281 | " stdcx. %3,0,%2 \n\ | 286 | " stdcx. %3,0,%2 \n\ |
282 | bne- 1b" | 287 | bne- 1b" |
283 | ISYNC_ON_SMP | 288 | PPC_ACQUIRE_BARRIER |
284 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) | 289 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) |
285 | : "r" (p), "r" (val) | 290 | : "r" (p), "r" (val) |
286 | : "cc", "memory"); | 291 | : "cc", "memory"); |
@@ -366,14 +371,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | |||
366 | unsigned int prev; | 371 | unsigned int prev; |
367 | 372 | ||
368 | __asm__ __volatile__ ( | 373 | __asm__ __volatile__ ( |
369 | LWSYNC_ON_SMP | 374 | PPC_RELEASE_BARRIER |
370 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | 375 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
371 | cmpw 0,%0,%3\n\ | 376 | cmpw 0,%0,%3\n\ |
372 | bne- 2f\n" | 377 | bne- 2f\n" |
373 | PPC405_ERR77(0,%2) | 378 | PPC405_ERR77(0,%2) |
374 | " stwcx. %4,0,%2\n\ | 379 | " stwcx. %4,0,%2\n\ |
375 | bne- 1b" | 380 | bne- 1b" |
376 | ISYNC_ON_SMP | 381 | PPC_ACQUIRE_BARRIER |
377 | "\n\ | 382 | "\n\ |
378 | 2:" | 383 | 2:" |
379 | : "=&r" (prev), "+m" (*p) | 384 | : "=&r" (prev), "+m" (*p) |
@@ -412,13 +417,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) | |||
412 | unsigned long prev; | 417 | unsigned long prev; |
413 | 418 | ||
414 | __asm__ __volatile__ ( | 419 | __asm__ __volatile__ ( |
415 | LWSYNC_ON_SMP | 420 | PPC_RELEASE_BARRIER |
416 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | 421 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
417 | cmpd 0,%0,%3\n\ | 422 | cmpd 0,%0,%3\n\ |
418 | bne- 2f\n\ | 423 | bne- 2f\n\ |
419 | stdcx. %4,0,%2\n\ | 424 | stdcx. %4,0,%2\n\ |
420 | bne- 1b" | 425 | bne- 1b" |
421 | ISYNC_ON_SMP | 426 | PPC_ACQUIRE_BARRIER |
422 | "\n\ | 427 | "\n\ |
423 | 2:" | 428 | 2:" |
424 | : "=&r" (prev), "+m" (*p) | 429 | : "=&r" (prev), "+m" (*p) |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9040330b0530..9ae77e52f9d3 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -73,8 +73,10 @@ | |||
73 | #define CREATE_TRACE_POINTS | 73 | #define CREATE_TRACE_POINTS |
74 | #include <asm/trace.h> | 74 | #include <asm/trace.h> |
75 | 75 | ||
76 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | ||
77 | EXPORT_PER_CPU_SYMBOL(irq_stat); | ||
78 | |||
76 | int __irq_offset_value; | 79 | int __irq_offset_value; |
77 | static int ppc_spurious_interrupts; | ||
78 | 80 | ||
79 | #ifdef CONFIG_PPC32 | 81 | #ifdef CONFIG_PPC32 |
80 | EXPORT_SYMBOL(__irq_offset_value); | 82 | EXPORT_SYMBOL(__irq_offset_value); |
@@ -180,30 +182,64 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
180 | EXPORT_SYMBOL(raw_local_irq_restore); | 182 | EXPORT_SYMBOL(raw_local_irq_restore); |
181 | #endif /* CONFIG_PPC64 */ | 183 | #endif /* CONFIG_PPC64 */ |
182 | 184 | ||
185 | static int show_other_interrupts(struct seq_file *p, int prec) | ||
186 | { | ||
187 | int j; | ||
188 | |||
189 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | ||
190 | if (tau_initialized) { | ||
191 | seq_printf(p, "%*s: ", prec, "TAU"); | ||
192 | for_each_online_cpu(j) | ||
193 | seq_printf(p, "%10u ", tau_interrupts(j)); | ||
194 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | ||
195 | } | ||
196 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ | ||
197 | |||
198 | seq_printf(p, "%*s: ", prec, "LOC"); | ||
199 | for_each_online_cpu(j) | ||
200 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); | ||
201 | seq_printf(p, " Local timer interrupts\n"); | ||
202 | |||
203 | seq_printf(p, "%*s: ", prec, "SPU"); | ||
204 | for_each_online_cpu(j) | ||
205 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); | ||
206 | seq_printf(p, " Spurious interrupts\n"); | ||
207 | |||
208 | seq_printf(p, "%*s: ", prec, "CNT"); | ||
209 | for_each_online_cpu(j) | ||
210 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); | ||
211 | seq_printf(p, " Performance monitoring interrupts\n"); | ||
212 | |||
213 | seq_printf(p, "%*s: ", prec, "MCE"); | ||
214 | for_each_online_cpu(j) | ||
215 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); | ||
216 | seq_printf(p, " Machine check exceptions\n"); | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
183 | int show_interrupts(struct seq_file *p, void *v) | 221 | int show_interrupts(struct seq_file *p, void *v) |
184 | { | 222 | { |
185 | int i = *(loff_t *)v, j; | 223 | unsigned long flags, any_count = 0; |
224 | int i = *(loff_t *) v, j, prec; | ||
186 | struct irqaction *action; | 225 | struct irqaction *action; |
187 | struct irq_desc *desc; | 226 | struct irq_desc *desc; |
188 | unsigned long flags; | ||
189 | 227 | ||
228 | if (i > nr_irqs) | ||
229 | return 0; | ||
230 | |||
231 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
232 | j *= 10; | ||
233 | |||
234 | if (i == nr_irqs) | ||
235 | return show_other_interrupts(p, prec); | ||
236 | |||
237 | /* print header */ | ||
190 | if (i == 0) { | 238 | if (i == 0) { |
191 | seq_puts(p, " "); | 239 | seq_printf(p, "%*s", prec + 8, ""); |
192 | for_each_online_cpu(j) | 240 | for_each_online_cpu(j) |
193 | seq_printf(p, "CPU%d ", j); | 241 | seq_printf(p, "CPU%-8d", j); |
194 | seq_putc(p, '\n'); | 242 | seq_putc(p, '\n'); |
195 | } else if (i == nr_irqs) { | ||
196 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | ||
197 | if (tau_initialized){ | ||
198 | seq_puts(p, "TAU: "); | ||
199 | for_each_online_cpu(j) | ||
200 | seq_printf(p, "%10u ", tau_interrupts(j)); | ||
201 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | ||
202 | } | ||
203 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/ | ||
204 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); | ||
205 | |||
206 | return 0; | ||
207 | } | 243 | } |
208 | 244 | ||
209 | desc = irq_to_desc(i); | 245 | desc = irq_to_desc(i); |
@@ -211,37 +247,48 @@ int show_interrupts(struct seq_file *p, void *v) | |||
211 | return 0; | 247 | return 0; |
212 | 248 | ||
213 | raw_spin_lock_irqsave(&desc->lock, flags); | 249 | raw_spin_lock_irqsave(&desc->lock, flags); |
214 | 250 | for_each_online_cpu(j) | |
251 | any_count |= kstat_irqs_cpu(i, j); | ||
215 | action = desc->action; | 252 | action = desc->action; |
216 | if (!action || !action->handler) | 253 | if (!action && !any_count) |
217 | goto skip; | 254 | goto out; |
218 | 255 | ||
219 | seq_printf(p, "%3d: ", i); | 256 | seq_printf(p, "%*d: ", prec, i); |
220 | #ifdef CONFIG_SMP | ||
221 | for_each_online_cpu(j) | 257 | for_each_online_cpu(j) |
222 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 258 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
223 | #else | ||
224 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
225 | #endif /* CONFIG_SMP */ | ||
226 | 259 | ||
227 | if (desc->chip) | 260 | if (desc->chip) |
228 | seq_printf(p, " %s ", desc->chip->name); | 261 | seq_printf(p, " %-16s", desc->chip->name); |
229 | else | 262 | else |
230 | seq_puts(p, " None "); | 263 | seq_printf(p, " %-16s", "None"); |
264 | seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); | ||
231 | 265 | ||
232 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); | 266 | if (action) { |
233 | seq_printf(p, " %s", action->name); | 267 | seq_printf(p, " %s", action->name); |
268 | while ((action = action->next) != NULL) | ||
269 | seq_printf(p, ", %s", action->name); | ||
270 | } | ||
234 | 271 | ||
235 | for (action = action->next; action; action = action->next) | ||
236 | seq_printf(p, ", %s", action->name); | ||
237 | seq_putc(p, '\n'); | 272 | seq_putc(p, '\n'); |
238 | 273 | out: | |
239 | skip: | ||
240 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 274 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
241 | |||
242 | return 0; | 275 | return 0; |
243 | } | 276 | } |
244 | 277 | ||
278 | /* | ||
279 | * /proc/stat helpers | ||
280 | */ | ||
281 | u64 arch_irq_stat_cpu(unsigned int cpu) | ||
282 | { | ||
283 | u64 sum = per_cpu(irq_stat, cpu).timer_irqs; | ||
284 | |||
285 | sum += per_cpu(irq_stat, cpu).pmu_irqs; | ||
286 | sum += per_cpu(irq_stat, cpu).mce_exceptions; | ||
287 | sum += per_cpu(irq_stat, cpu).spurious_irqs; | ||
288 | |||
289 | return sum; | ||
290 | } | ||
291 | |||
245 | #ifdef CONFIG_HOTPLUG_CPU | 292 | #ifdef CONFIG_HOTPLUG_CPU |
246 | void fixup_irqs(cpumask_t map) | 293 | void fixup_irqs(cpumask_t map) |
247 | { | 294 | { |
@@ -353,8 +400,7 @@ void do_IRQ(struct pt_regs *regs) | |||
353 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) | 400 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) |
354 | handle_one_irq(irq); | 401 | handle_one_irq(irq); |
355 | else if (irq != NO_IRQ_IGNORE) | 402 | else if (irq != NO_IRQ_IGNORE) |
356 | /* That's not SMP safe ... but who cares ? */ | 403 | __get_cpu_var(irq_stat).spurious_irqs++; |
357 | ppc_spurious_interrupts++; | ||
358 | 404 | ||
359 | irq_exit(); | 405 | irq_exit(); |
360 | set_irq_regs(old_regs); | 406 | set_irq_regs(old_regs); |
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index b6bd1eaa1c24..41bada0298c8 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -333,7 +333,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, | |||
333 | atomic_set(&kgdb_cpu_doing_single_step, -1); | 333 | atomic_set(&kgdb_cpu_doing_single_step, -1); |
334 | /* set the trace bit if we're stepping */ | 334 | /* set the trace bit if we're stepping */ |
335 | if (remcom_in_buffer[0] == 's') { | 335 | if (remcom_in_buffer[0] == 's') { |
336 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 336 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
337 | mtspr(SPRN_DBCR0, | 337 | mtspr(SPRN_DBCR0, |
338 | mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); | 338 | mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); |
339 | linux_regs->msr |= MSR_DE; | 339 | linux_regs->msr |= MSR_DE; |
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index c9329786073b..3fd1af902112 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
37 | #include <asm/system.h> | 37 | #include <asm/system.h> |
38 | 38 | ||
39 | #ifdef CONFIG_BOOKE | 39 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
40 | #define MSR_SINGLESTEP (MSR_DE) | 40 | #define MSR_SINGLESTEP (MSR_DE) |
41 | #else | 41 | #else |
42 | #define MSR_SINGLESTEP (MSR_SE) | 42 | #define MSR_SINGLESTEP (MSR_SE) |
@@ -110,7 +110,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
110 | * like Decrementer or External Interrupt */ | 110 | * like Decrementer or External Interrupt */ |
111 | regs->msr &= ~MSR_EE; | 111 | regs->msr &= ~MSR_EE; |
112 | regs->msr |= MSR_SINGLESTEP; | 112 | regs->msr |= MSR_SINGLESTEP; |
113 | #ifdef CONFIG_BOOKE | 113 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
114 | regs->msr &= ~MSR_CE; | 114 | regs->msr &= ~MSR_CE; |
115 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); | 115 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); |
116 | #endif | 116 | #endif |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7b816daf3eba..e4d71ced97ef 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -245,6 +245,24 @@ void discard_lazy_cpu_state(void) | |||
245 | } | 245 | } |
246 | #endif /* CONFIG_SMP */ | 246 | #endif /* CONFIG_SMP */ |
247 | 247 | ||
248 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
249 | void do_send_trap(struct pt_regs *regs, unsigned long address, | ||
250 | unsigned long error_code, int signal_code, int breakpt) | ||
251 | { | ||
252 | siginfo_t info; | ||
253 | |||
254 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | ||
255 | 11, SIGSEGV) == NOTIFY_STOP) | ||
256 | return; | ||
257 | |||
258 | /* Deliver the signal to userspace */ | ||
259 | info.si_signo = SIGTRAP; | ||
260 | info.si_errno = breakpt; /* breakpoint or watchpoint id */ | ||
261 | info.si_code = signal_code; | ||
262 | info.si_addr = (void __user *)address; | ||
263 | force_sig_info(SIGTRAP, &info, current); | ||
264 | } | ||
265 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | ||
248 | void do_dabr(struct pt_regs *regs, unsigned long address, | 266 | void do_dabr(struct pt_regs *regs, unsigned long address, |
249 | unsigned long error_code) | 267 | unsigned long error_code) |
250 | { | 268 | { |
@@ -257,12 +275,6 @@ void do_dabr(struct pt_regs *regs, unsigned long address, | |||
257 | if (debugger_dabr_match(regs)) | 275 | if (debugger_dabr_match(regs)) |
258 | return; | 276 | return; |
259 | 277 | ||
260 | /* Clear the DAC and struct entries. One shot trigger */ | ||
261 | #if defined(CONFIG_BOOKE) | ||
262 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W | ||
263 | | DBCR0_IDM)); | ||
264 | #endif | ||
265 | |||
266 | /* Clear the DABR */ | 278 | /* Clear the DABR */ |
267 | set_dabr(0); | 279 | set_dabr(0); |
268 | 280 | ||
@@ -273,9 +285,82 @@ void do_dabr(struct pt_regs *regs, unsigned long address, | |||
273 | info.si_addr = (void __user *)address; | 285 | info.si_addr = (void __user *)address; |
274 | force_sig_info(SIGTRAP, &info, current); | 286 | force_sig_info(SIGTRAP, &info, current); |
275 | } | 287 | } |
288 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
276 | 289 | ||
277 | static DEFINE_PER_CPU(unsigned long, current_dabr); | 290 | static DEFINE_PER_CPU(unsigned long, current_dabr); |
278 | 291 | ||
292 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
293 | /* | ||
294 | * Set the debug registers back to their default "safe" values. | ||
295 | */ | ||
296 | static void set_debug_reg_defaults(struct thread_struct *thread) | ||
297 | { | ||
298 | thread->iac1 = thread->iac2 = 0; | ||
299 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
300 | thread->iac3 = thread->iac4 = 0; | ||
301 | #endif | ||
302 | thread->dac1 = thread->dac2 = 0; | ||
303 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
304 | thread->dvc1 = thread->dvc2 = 0; | ||
305 | #endif | ||
306 | thread->dbcr0 = 0; | ||
307 | #ifdef CONFIG_BOOKE | ||
308 | /* | ||
309 | * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) | ||
310 | */ | ||
311 | thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \ | ||
312 | DBCR1_IAC3US | DBCR1_IAC4US; | ||
313 | /* | ||
314 | * Force Data Address Compare User/Supervisor bits to be User-only | ||
315 | * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. | ||
316 | */ | ||
317 | thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; | ||
318 | #else | ||
319 | thread->dbcr1 = 0; | ||
320 | #endif | ||
321 | } | ||
322 | |||
323 | static void prime_debug_regs(struct thread_struct *thread) | ||
324 | { | ||
325 | mtspr(SPRN_IAC1, thread->iac1); | ||
326 | mtspr(SPRN_IAC2, thread->iac2); | ||
327 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
328 | mtspr(SPRN_IAC3, thread->iac3); | ||
329 | mtspr(SPRN_IAC4, thread->iac4); | ||
330 | #endif | ||
331 | mtspr(SPRN_DAC1, thread->dac1); | ||
332 | mtspr(SPRN_DAC2, thread->dac2); | ||
333 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
334 | mtspr(SPRN_DVC1, thread->dvc1); | ||
335 | mtspr(SPRN_DVC2, thread->dvc2); | ||
336 | #endif | ||
337 | mtspr(SPRN_DBCR0, thread->dbcr0); | ||
338 | mtspr(SPRN_DBCR1, thread->dbcr1); | ||
339 | #ifdef CONFIG_BOOKE | ||
340 | mtspr(SPRN_DBCR2, thread->dbcr2); | ||
341 | #endif | ||
342 | } | ||
343 | /* | ||
344 | * Unless neither the old or new thread are making use of the | ||
345 | * debug registers, set the debug registers from the values | ||
346 | * stored in the new thread. | ||
347 | */ | ||
348 | static void switch_booke_debug_regs(struct thread_struct *new_thread) | ||
349 | { | ||
350 | if ((current->thread.dbcr0 & DBCR0_IDM) | ||
351 | || (new_thread->dbcr0 & DBCR0_IDM)) | ||
352 | prime_debug_regs(new_thread); | ||
353 | } | ||
354 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | ||
355 | static void set_debug_reg_defaults(struct thread_struct *thread) | ||
356 | { | ||
357 | if (thread->dabr) { | ||
358 | thread->dabr = 0; | ||
359 | set_dabr(0); | ||
360 | } | ||
361 | } | ||
362 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
363 | |||
279 | int set_dabr(unsigned long dabr) | 364 | int set_dabr(unsigned long dabr) |
280 | { | 365 | { |
281 | __get_cpu_var(current_dabr) = dabr; | 366 | __get_cpu_var(current_dabr) = dabr; |
@@ -284,7 +369,7 @@ int set_dabr(unsigned long dabr) | |||
284 | return ppc_md.set_dabr(dabr); | 369 | return ppc_md.set_dabr(dabr); |
285 | 370 | ||
286 | /* XXX should we have a CPU_FTR_HAS_DABR ? */ | 371 | /* XXX should we have a CPU_FTR_HAS_DABR ? */ |
287 | #if defined(CONFIG_BOOKE) | 372 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
288 | mtspr(SPRN_DAC1, dabr); | 373 | mtspr(SPRN_DAC1, dabr); |
289 | #elif defined(CONFIG_PPC_BOOK3S) | 374 | #elif defined(CONFIG_PPC_BOOK3S) |
290 | mtspr(SPRN_DABR, dabr); | 375 | mtspr(SPRN_DABR, dabr); |
@@ -371,10 +456,8 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
371 | 456 | ||
372 | #endif /* CONFIG_SMP */ | 457 | #endif /* CONFIG_SMP */ |
373 | 458 | ||
374 | #if defined(CONFIG_BOOKE) | 459 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
375 | /* If new thread DAC (HW breakpoint) is the same then leave it */ | 460 | switch_booke_debug_regs(&new->thread); |
376 | if (new->thread.dabr) | ||
377 | set_dabr(new->thread.dabr); | ||
378 | #else | 461 | #else |
379 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) | 462 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) |
380 | set_dabr(new->thread.dabr); | 463 | set_dabr(new->thread.dabr); |
@@ -514,7 +597,7 @@ void show_regs(struct pt_regs * regs) | |||
514 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); | 597 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); |
515 | trap = TRAP(regs); | 598 | trap = TRAP(regs); |
516 | if (trap == 0x300 || trap == 0x600) | 599 | if (trap == 0x300 || trap == 0x600) |
517 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 600 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
518 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); | 601 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); |
519 | #else | 602 | #else |
520 | printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); | 603 | printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); |
@@ -556,14 +639,7 @@ void flush_thread(void) | |||
556 | { | 639 | { |
557 | discard_lazy_cpu_state(); | 640 | discard_lazy_cpu_state(); |
558 | 641 | ||
559 | if (current->thread.dabr) { | 642 | set_debug_reg_defaults(¤t->thread); |
560 | current->thread.dabr = 0; | ||
561 | set_dabr(0); | ||
562 | |||
563 | #if defined(CONFIG_BOOKE) | ||
564 | current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W); | ||
565 | #endif | ||
566 | } | ||
567 | } | 643 | } |
568 | 644 | ||
569 | void | 645 | void |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index ef149880c145..d9b05866615f 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -46,7 +46,7 @@ | |||
46 | /* | 46 | /* |
47 | * Set of msr bits that gdb can change on behalf of a process. | 47 | * Set of msr bits that gdb can change on behalf of a process. |
48 | */ | 48 | */ |
49 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 49 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
50 | #define MSR_DEBUGCHANGE 0 | 50 | #define MSR_DEBUGCHANGE 0 |
51 | #else | 51 | #else |
52 | #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) | 52 | #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) |
@@ -703,7 +703,7 @@ void user_enable_single_step(struct task_struct *task) | |||
703 | struct pt_regs *regs = task->thread.regs; | 703 | struct pt_regs *regs = task->thread.regs; |
704 | 704 | ||
705 | if (regs != NULL) { | 705 | if (regs != NULL) { |
706 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 706 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
707 | task->thread.dbcr0 &= ~DBCR0_BT; | 707 | task->thread.dbcr0 &= ~DBCR0_BT; |
708 | task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; | 708 | task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; |
709 | regs->msr |= MSR_DE; | 709 | regs->msr |= MSR_DE; |
@@ -720,7 +720,7 @@ void user_enable_block_step(struct task_struct *task) | |||
720 | struct pt_regs *regs = task->thread.regs; | 720 | struct pt_regs *regs = task->thread.regs; |
721 | 721 | ||
722 | if (regs != NULL) { | 722 | if (regs != NULL) { |
723 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 723 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
724 | task->thread.dbcr0 &= ~DBCR0_IC; | 724 | task->thread.dbcr0 &= ~DBCR0_IC; |
725 | task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; | 725 | task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; |
726 | regs->msr |= MSR_DE; | 726 | regs->msr |= MSR_DE; |
@@ -737,17 +737,25 @@ void user_disable_single_step(struct task_struct *task) | |||
737 | struct pt_regs *regs = task->thread.regs; | 737 | struct pt_regs *regs = task->thread.regs; |
738 | 738 | ||
739 | if (regs != NULL) { | 739 | if (regs != NULL) { |
740 | #if defined(CONFIG_BOOKE) | 740 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
741 | /* If DAC don't clear DBCRO_IDM or MSR_DE */ | 741 | /* |
742 | if (task->thread.dabr) | 742 | * The logic to disable single stepping should be as |
743 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT); | 743 | * simple as turning off the Instruction Complete flag. |
744 | else { | 744 | * And, after doing so, if all debug flags are off, turn |
745 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM); | 745 | * off DBCR0(IDM) and MSR(DE) .... Torez |
746 | */ | ||
747 | task->thread.dbcr0 &= ~DBCR0_IC; | ||
748 | /* | ||
749 | * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set. | ||
750 | */ | ||
751 | if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, | ||
752 | task->thread.dbcr1)) { | ||
753 | /* | ||
754 | * All debug events were off..... | ||
755 | */ | ||
756 | task->thread.dbcr0 &= ~DBCR0_IDM; | ||
746 | regs->msr &= ~MSR_DE; | 757 | regs->msr &= ~MSR_DE; |
747 | } | 758 | } |
748 | #elif defined(CONFIG_40x) | ||
749 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM); | ||
750 | regs->msr &= ~MSR_DE; | ||
751 | #else | 759 | #else |
752 | regs->msr &= ~(MSR_SE | MSR_BE); | 760 | regs->msr &= ~(MSR_SE | MSR_BE); |
753 | #endif | 761 | #endif |
@@ -769,8 +777,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
769 | if ((data & ~0x7UL) >= TASK_SIZE) | 777 | if ((data & ~0x7UL) >= TASK_SIZE) |
770 | return -EIO; | 778 | return -EIO; |
771 | 779 | ||
772 | #ifndef CONFIG_BOOKE | 780 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS |
773 | |||
774 | /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. | 781 | /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. |
775 | * It was assumed, on previous implementations, that 3 bits were | 782 | * It was assumed, on previous implementations, that 3 bits were |
776 | * passed together with the data address, fitting the design of the | 783 | * passed together with the data address, fitting the design of the |
@@ -789,21 +796,22 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
789 | 796 | ||
790 | /* Move contents to the DABR register */ | 797 | /* Move contents to the DABR register */ |
791 | task->thread.dabr = data; | 798 | task->thread.dabr = data; |
792 | 799 | #else /* CONFIG_PPC_ADV_DEBUG_REGS */ | |
793 | #endif | ||
794 | #if defined(CONFIG_BOOKE) | ||
795 | |||
796 | /* As described above, it was assumed 3 bits were passed with the data | 800 | /* As described above, it was assumed 3 bits were passed with the data |
797 | * address, but we will assume only the mode bits will be passed | 801 | * address, but we will assume only the mode bits will be passed |
798 | * as to not cause alignment restrictions for DAC-based processors. | 802 | * as to not cause alignment restrictions for DAC-based processors. |
799 | */ | 803 | */ |
800 | 804 | ||
801 | /* DAC's hold the whole address without any mode flags */ | 805 | /* DAC's hold the whole address without any mode flags */ |
802 | task->thread.dabr = data & ~0x3UL; | 806 | task->thread.dac1 = data & ~0x3UL; |
803 | 807 | ||
804 | if (task->thread.dabr == 0) { | 808 | if (task->thread.dac1 == 0) { |
805 | task->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | DBCR0_IDM); | 809 | dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); |
806 | task->thread.regs->msr &= ~MSR_DE; | 810 | if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, |
811 | task->thread.dbcr1)) { | ||
812 | task->thread.regs->msr &= ~MSR_DE; | ||
813 | task->thread.dbcr0 &= ~DBCR0_IDM; | ||
814 | } | ||
807 | return 0; | 815 | return 0; |
808 | } | 816 | } |
809 | 817 | ||
@@ -814,17 +822,17 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
814 | 822 | ||
815 | /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 | 823 | /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 |
816 | register */ | 824 | register */ |
817 | task->thread.dbcr0 = DBCR0_IDM; | 825 | task->thread.dbcr0 |= DBCR0_IDM; |
818 | 826 | ||
819 | /* Check for write and read flags and set DBCR0 | 827 | /* Check for write and read flags and set DBCR0 |
820 | accordingly */ | 828 | accordingly */ |
829 | dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W); | ||
821 | if (data & 0x1UL) | 830 | if (data & 0x1UL) |
822 | task->thread.dbcr0 |= DBSR_DAC1R; | 831 | dbcr_dac(task) |= DBCR_DAC1R; |
823 | if (data & 0x2UL) | 832 | if (data & 0x2UL) |
824 | task->thread.dbcr0 |= DBSR_DAC1W; | 833 | dbcr_dac(task) |= DBCR_DAC1W; |
825 | |||
826 | task->thread.regs->msr |= MSR_DE; | 834 | task->thread.regs->msr |= MSR_DE; |
827 | #endif | 835 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
828 | return 0; | 836 | return 0; |
829 | } | 837 | } |
830 | 838 | ||
@@ -839,6 +847,394 @@ void ptrace_disable(struct task_struct *child) | |||
839 | user_disable_single_step(child); | 847 | user_disable_single_step(child); |
840 | } | 848 | } |
841 | 849 | ||
850 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
851 | static long set_intruction_bp(struct task_struct *child, | ||
852 | struct ppc_hw_breakpoint *bp_info) | ||
853 | { | ||
854 | int slot; | ||
855 | int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0); | ||
856 | int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0); | ||
857 | int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0); | ||
858 | int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0); | ||
859 | |||
860 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) | ||
861 | slot2_in_use = 1; | ||
862 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) | ||
863 | slot4_in_use = 1; | ||
864 | |||
865 | if (bp_info->addr >= TASK_SIZE) | ||
866 | return -EIO; | ||
867 | |||
868 | if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) { | ||
869 | |||
870 | /* Make sure range is valid. */ | ||
871 | if (bp_info->addr2 >= TASK_SIZE) | ||
872 | return -EIO; | ||
873 | |||
874 | /* We need a pair of IAC regsisters */ | ||
875 | if ((!slot1_in_use) && (!slot2_in_use)) { | ||
876 | slot = 1; | ||
877 | child->thread.iac1 = bp_info->addr; | ||
878 | child->thread.iac2 = bp_info->addr2; | ||
879 | child->thread.dbcr0 |= DBCR0_IAC1; | ||
880 | if (bp_info->addr_mode == | ||
881 | PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) | ||
882 | dbcr_iac_range(child) |= DBCR_IAC12X; | ||
883 | else | ||
884 | dbcr_iac_range(child) |= DBCR_IAC12I; | ||
885 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
886 | } else if ((!slot3_in_use) && (!slot4_in_use)) { | ||
887 | slot = 3; | ||
888 | child->thread.iac3 = bp_info->addr; | ||
889 | child->thread.iac4 = bp_info->addr2; | ||
890 | child->thread.dbcr0 |= DBCR0_IAC3; | ||
891 | if (bp_info->addr_mode == | ||
892 | PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) | ||
893 | dbcr_iac_range(child) |= DBCR_IAC34X; | ||
894 | else | ||
895 | dbcr_iac_range(child) |= DBCR_IAC34I; | ||
896 | #endif | ||
897 | } else | ||
898 | return -ENOSPC; | ||
899 | } else { | ||
900 | /* We only need one. If possible leave a pair free in | ||
901 | * case a range is needed later | ||
902 | */ | ||
903 | if (!slot1_in_use) { | ||
904 | /* | ||
905 | * Don't use iac1 if iac1-iac2 are free and either | ||
906 | * iac3 or iac4 (but not both) are free | ||
907 | */ | ||
908 | if (slot2_in_use || (slot3_in_use == slot4_in_use)) { | ||
909 | slot = 1; | ||
910 | child->thread.iac1 = bp_info->addr; | ||
911 | child->thread.dbcr0 |= DBCR0_IAC1; | ||
912 | goto out; | ||
913 | } | ||
914 | } | ||
915 | if (!slot2_in_use) { | ||
916 | slot = 2; | ||
917 | child->thread.iac2 = bp_info->addr; | ||
918 | child->thread.dbcr0 |= DBCR0_IAC2; | ||
919 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
920 | } else if (!slot3_in_use) { | ||
921 | slot = 3; | ||
922 | child->thread.iac3 = bp_info->addr; | ||
923 | child->thread.dbcr0 |= DBCR0_IAC3; | ||
924 | } else if (!slot4_in_use) { | ||
925 | slot = 4; | ||
926 | child->thread.iac4 = bp_info->addr; | ||
927 | child->thread.dbcr0 |= DBCR0_IAC4; | ||
928 | #endif | ||
929 | } else | ||
930 | return -ENOSPC; | ||
931 | } | ||
932 | out: | ||
933 | child->thread.dbcr0 |= DBCR0_IDM; | ||
934 | child->thread.regs->msr |= MSR_DE; | ||
935 | |||
936 | return slot; | ||
937 | } | ||
938 | |||
939 | static int del_instruction_bp(struct task_struct *child, int slot) | ||
940 | { | ||
941 | switch (slot) { | ||
942 | case 1: | ||
943 | if (child->thread.iac1 == 0) | ||
944 | return -ENOENT; | ||
945 | |||
946 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) { | ||
947 | /* address range - clear slots 1 & 2 */ | ||
948 | child->thread.iac2 = 0; | ||
949 | dbcr_iac_range(child) &= ~DBCR_IAC12MODE; | ||
950 | } | ||
951 | child->thread.iac1 = 0; | ||
952 | child->thread.dbcr0 &= ~DBCR0_IAC1; | ||
953 | break; | ||
954 | case 2: | ||
955 | if (child->thread.iac2 == 0) | ||
956 | return -ENOENT; | ||
957 | |||
958 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) | ||
959 | /* used in a range */ | ||
960 | return -EINVAL; | ||
961 | child->thread.iac2 = 0; | ||
962 | child->thread.dbcr0 &= ~DBCR0_IAC2; | ||
963 | break; | ||
964 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
965 | case 3: | ||
966 | if (child->thread.iac3 == 0) | ||
967 | return -ENOENT; | ||
968 | |||
969 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) { | ||
970 | /* address range - clear slots 3 & 4 */ | ||
971 | child->thread.iac4 = 0; | ||
972 | dbcr_iac_range(child) &= ~DBCR_IAC34MODE; | ||
973 | } | ||
974 | child->thread.iac3 = 0; | ||
975 | child->thread.dbcr0 &= ~DBCR0_IAC3; | ||
976 | break; | ||
977 | case 4: | ||
978 | if (child->thread.iac4 == 0) | ||
979 | return -ENOENT; | ||
980 | |||
981 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) | ||
982 | /* Used in a range */ | ||
983 | return -EINVAL; | ||
984 | child->thread.iac4 = 0; | ||
985 | child->thread.dbcr0 &= ~DBCR0_IAC4; | ||
986 | break; | ||
987 | #endif | ||
988 | default: | ||
989 | return -EINVAL; | ||
990 | } | ||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) | ||
995 | { | ||
996 | int byte_enable = | ||
997 | (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT) | ||
998 | & 0xf; | ||
999 | int condition_mode = | ||
1000 | bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE; | ||
1001 | int slot; | ||
1002 | |||
1003 | if (byte_enable && (condition_mode == 0)) | ||
1004 | return -EINVAL; | ||
1005 | |||
1006 | if (bp_info->addr >= TASK_SIZE) | ||
1007 | return -EIO; | ||
1008 | |||
1009 | if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) { | ||
1010 | slot = 1; | ||
1011 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
1012 | dbcr_dac(child) |= DBCR_DAC1R; | ||
1013 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
1014 | dbcr_dac(child) |= DBCR_DAC1W; | ||
1015 | child->thread.dac1 = (unsigned long)bp_info->addr; | ||
1016 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
1017 | if (byte_enable) { | ||
1018 | child->thread.dvc1 = | ||
1019 | (unsigned long)bp_info->condition_value; | ||
1020 | child->thread.dbcr2 |= | ||
1021 | ((byte_enable << DBCR2_DVC1BE_SHIFT) | | ||
1022 | (condition_mode << DBCR2_DVC1M_SHIFT)); | ||
1023 | } | ||
1024 | #endif | ||
1025 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1026 | } else if (child->thread.dbcr2 & DBCR2_DAC12MODE) { | ||
1027 | /* Both dac1 and dac2 are part of a range */ | ||
1028 | return -ENOSPC; | ||
1029 | #endif | ||
1030 | } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) { | ||
1031 | slot = 2; | ||
1032 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
1033 | dbcr_dac(child) |= DBCR_DAC2R; | ||
1034 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
1035 | dbcr_dac(child) |= DBCR_DAC2W; | ||
1036 | child->thread.dac2 = (unsigned long)bp_info->addr; | ||
1037 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
1038 | if (byte_enable) { | ||
1039 | child->thread.dvc2 = | ||
1040 | (unsigned long)bp_info->condition_value; | ||
1041 | child->thread.dbcr2 |= | ||
1042 | ((byte_enable << DBCR2_DVC2BE_SHIFT) | | ||
1043 | (condition_mode << DBCR2_DVC2M_SHIFT)); | ||
1044 | } | ||
1045 | #endif | ||
1046 | } else | ||
1047 | return -ENOSPC; | ||
1048 | child->thread.dbcr0 |= DBCR0_IDM; | ||
1049 | child->thread.regs->msr |= MSR_DE; | ||
1050 | |||
1051 | return slot + 4; | ||
1052 | } | ||
1053 | |||
1054 | static int del_dac(struct task_struct *child, int slot) | ||
1055 | { | ||
1056 | if (slot == 1) { | ||
1057 | if (child->thread.dac1 == 0) | ||
1058 | return -ENOENT; | ||
1059 | |||
1060 | child->thread.dac1 = 0; | ||
1061 | dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W); | ||
1062 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1063 | if (child->thread.dbcr2 & DBCR2_DAC12MODE) { | ||
1064 | child->thread.dac2 = 0; | ||
1065 | child->thread.dbcr2 &= ~DBCR2_DAC12MODE; | ||
1066 | } | ||
1067 | child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE); | ||
1068 | #endif | ||
1069 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
1070 | child->thread.dvc1 = 0; | ||
1071 | #endif | ||
1072 | } else if (slot == 2) { | ||
1073 | if (child->thread.dac1 == 0) | ||
1074 | return -ENOENT; | ||
1075 | |||
1076 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1077 | if (child->thread.dbcr2 & DBCR2_DAC12MODE) | ||
1078 | /* Part of a range */ | ||
1079 | return -EINVAL; | ||
1080 | child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE); | ||
1081 | #endif | ||
1082 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
1083 | child->thread.dvc2 = 0; | ||
1084 | #endif | ||
1085 | child->thread.dac2 = 0; | ||
1086 | dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W); | ||
1087 | } else | ||
1088 | return -EINVAL; | ||
1089 | |||
1090 | return 0; | ||
1091 | } | ||
1092 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
1093 | |||
1094 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1095 | static int set_dac_range(struct task_struct *child, | ||
1096 | struct ppc_hw_breakpoint *bp_info) | ||
1097 | { | ||
1098 | int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK; | ||
1099 | |||
1100 | /* We don't allow range watchpoints to be used with DVC */ | ||
1101 | if (bp_info->condition_mode) | ||
1102 | return -EINVAL; | ||
1103 | |||
1104 | /* | ||
1105 | * Best effort to verify the address range. The user/supervisor bits | ||
1106 | * prevent trapping in kernel space, but let's fail on an obvious bad | ||
1107 | * range. The simple test on the mask is not fool-proof, and any | ||
1108 | * exclusive range will spill over into kernel space. | ||
1109 | */ | ||
1110 | if (bp_info->addr >= TASK_SIZE) | ||
1111 | return -EIO; | ||
1112 | if (mode == PPC_BREAKPOINT_MODE_MASK) { | ||
1113 | /* | ||
1114 | * dac2 is a bitmask. Don't allow a mask that makes a | ||
1115 | * kernel space address from a valid dac1 value | ||
1116 | */ | ||
1117 | if (~((unsigned long)bp_info->addr2) >= TASK_SIZE) | ||
1118 | return -EIO; | ||
1119 | } else { | ||
1120 | /* | ||
1121 | * For range breakpoints, addr2 must also be a valid address | ||
1122 | */ | ||
1123 | if (bp_info->addr2 >= TASK_SIZE) | ||
1124 | return -EIO; | ||
1125 | } | ||
1126 | |||
1127 | if (child->thread.dbcr0 & | ||
1128 | (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W)) | ||
1129 | return -ENOSPC; | ||
1130 | |||
1131 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
1132 | child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM); | ||
1133 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
1134 | child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM); | ||
1135 | child->thread.dac1 = bp_info->addr; | ||
1136 | child->thread.dac2 = bp_info->addr2; | ||
1137 | if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) | ||
1138 | child->thread.dbcr2 |= DBCR2_DAC12M; | ||
1139 | else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) | ||
1140 | child->thread.dbcr2 |= DBCR2_DAC12MX; | ||
1141 | else /* PPC_BREAKPOINT_MODE_MASK */ | ||
1142 | child->thread.dbcr2 |= DBCR2_DAC12MM; | ||
1143 | child->thread.regs->msr |= MSR_DE; | ||
1144 | |||
1145 | return 5; | ||
1146 | } | ||
1147 | #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */ | ||
1148 | |||
1149 | static long ppc_set_hwdebug(struct task_struct *child, | ||
1150 | struct ppc_hw_breakpoint *bp_info) | ||
1151 | { | ||
1152 | if (bp_info->version != 1) | ||
1153 | return -ENOTSUPP; | ||
1154 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
1155 | /* | ||
1156 | * Check for invalid flags and combinations | ||
1157 | */ | ||
1158 | if ((bp_info->trigger_type == 0) || | ||
1159 | (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE | | ||
1160 | PPC_BREAKPOINT_TRIGGER_RW)) || | ||
1161 | (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) || | ||
1162 | (bp_info->condition_mode & | ||
1163 | ~(PPC_BREAKPOINT_CONDITION_MODE | | ||
1164 | PPC_BREAKPOINT_CONDITION_BE_ALL))) | ||
1165 | return -EINVAL; | ||
1166 | #if CONFIG_PPC_ADV_DEBUG_DVCS == 0 | ||
1167 | if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) | ||
1168 | return -EINVAL; | ||
1169 | #endif | ||
1170 | |||
1171 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) { | ||
1172 | if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) || | ||
1173 | (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) | ||
1174 | return -EINVAL; | ||
1175 | return set_intruction_bp(child, bp_info); | ||
1176 | } | ||
1177 | if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) | ||
1178 | return set_dac(child, bp_info); | ||
1179 | |||
1180 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1181 | return set_dac_range(child, bp_info); | ||
1182 | #else | ||
1183 | return -EINVAL; | ||
1184 | #endif | ||
1185 | #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */ | ||
1186 | /* | ||
1187 | * We only support one data breakpoint | ||
1188 | */ | ||
1189 | if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) || | ||
1190 | ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) || | ||
1191 | (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) || | ||
1192 | (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) || | ||
1193 | (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) | ||
1194 | return -EINVAL; | ||
1195 | |||
1196 | if (child->thread.dabr) | ||
1197 | return -ENOSPC; | ||
1198 | |||
1199 | if ((unsigned long)bp_info->addr >= TASK_SIZE) | ||
1200 | return -EIO; | ||
1201 | |||
1202 | child->thread.dabr = (unsigned long)bp_info->addr; | ||
1203 | |||
1204 | return 1; | ||
1205 | #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ | ||
1206 | } | ||
1207 | |||
1208 | static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) | ||
1209 | { | ||
1210 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
1211 | int rc; | ||
1212 | |||
1213 | if (data <= 4) | ||
1214 | rc = del_instruction_bp(child, (int)data); | ||
1215 | else | ||
1216 | rc = del_dac(child, (int)data - 4); | ||
1217 | |||
1218 | if (!rc) { | ||
1219 | if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0, | ||
1220 | child->thread.dbcr1)) { | ||
1221 | child->thread.dbcr0 &= ~DBCR0_IDM; | ||
1222 | child->thread.regs->msr &= ~MSR_DE; | ||
1223 | } | ||
1224 | } | ||
1225 | return rc; | ||
1226 | #else | ||
1227 | if (data != 1) | ||
1228 | return -EINVAL; | ||
1229 | if (child->thread.dabr == 0) | ||
1230 | return -ENOENT; | ||
1231 | |||
1232 | child->thread.dabr = 0; | ||
1233 | |||
1234 | return 0; | ||
1235 | #endif | ||
1236 | } | ||
1237 | |||
842 | /* | 1238 | /* |
843 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, | 1239 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, |
844 | * we mark them as obsolete now, they will be removed in a future version | 1240 | * we mark them as obsolete now, they will be removed in a future version |
@@ -932,13 +1328,77 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
932 | break; | 1328 | break; |
933 | } | 1329 | } |
934 | 1330 | ||
1331 | case PPC_PTRACE_GETHWDBGINFO: { | ||
1332 | struct ppc_debug_info dbginfo; | ||
1333 | |||
1334 | dbginfo.version = 1; | ||
1335 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
1336 | dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS; | ||
1337 | dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS; | ||
1338 | dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS; | ||
1339 | dbginfo.data_bp_alignment = 4; | ||
1340 | dbginfo.sizeof_condition = 4; | ||
1341 | dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE | | ||
1342 | PPC_DEBUG_FEATURE_INSN_BP_MASK; | ||
1343 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1344 | dbginfo.features |= | ||
1345 | PPC_DEBUG_FEATURE_DATA_BP_RANGE | | ||
1346 | PPC_DEBUG_FEATURE_DATA_BP_MASK; | ||
1347 | #endif | ||
1348 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | ||
1349 | dbginfo.num_instruction_bps = 0; | ||
1350 | dbginfo.num_data_bps = 1; | ||
1351 | dbginfo.num_condition_regs = 0; | ||
1352 | #ifdef CONFIG_PPC64 | ||
1353 | dbginfo.data_bp_alignment = 8; | ||
1354 | #else | ||
1355 | dbginfo.data_bp_alignment = 4; | ||
1356 | #endif | ||
1357 | dbginfo.sizeof_condition = 0; | ||
1358 | dbginfo.features = 0; | ||
1359 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
1360 | |||
1361 | if (!access_ok(VERIFY_WRITE, data, | ||
1362 | sizeof(struct ppc_debug_info))) | ||
1363 | return -EFAULT; | ||
1364 | ret = __copy_to_user((struct ppc_debug_info __user *)data, | ||
1365 | &dbginfo, sizeof(struct ppc_debug_info)) ? | ||
1366 | -EFAULT : 0; | ||
1367 | break; | ||
1368 | } | ||
1369 | |||
1370 | case PPC_PTRACE_SETHWDEBUG: { | ||
1371 | struct ppc_hw_breakpoint bp_info; | ||
1372 | |||
1373 | if (!access_ok(VERIFY_READ, data, | ||
1374 | sizeof(struct ppc_hw_breakpoint))) | ||
1375 | return -EFAULT; | ||
1376 | ret = __copy_from_user(&bp_info, | ||
1377 | (struct ppc_hw_breakpoint __user *)data, | ||
1378 | sizeof(struct ppc_hw_breakpoint)) ? | ||
1379 | -EFAULT : 0; | ||
1380 | if (!ret) | ||
1381 | ret = ppc_set_hwdebug(child, &bp_info); | ||
1382 | break; | ||
1383 | } | ||
1384 | |||
1385 | case PPC_PTRACE_DELHWDEBUG: { | ||
1386 | ret = ppc_del_hwdebug(child, addr, data); | ||
1387 | break; | ||
1388 | } | ||
1389 | |||
935 | case PTRACE_GET_DEBUGREG: { | 1390 | case PTRACE_GET_DEBUGREG: { |
936 | ret = -EINVAL; | 1391 | ret = -EINVAL; |
937 | /* We only support one DABR and no IABRS at the moment */ | 1392 | /* We only support one DABR and no IABRS at the moment */ |
938 | if (addr > 0) | 1393 | if (addr > 0) |
939 | break; | 1394 | break; |
1395 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
1396 | ret = put_user(child->thread.dac1, | ||
1397 | (unsigned long __user *)data); | ||
1398 | #else | ||
940 | ret = put_user(child->thread.dabr, | 1399 | ret = put_user(child->thread.dabr, |
941 | (unsigned long __user *)data); | 1400 | (unsigned long __user *)data); |
1401 | #endif | ||
942 | break; | 1402 | break; |
943 | } | 1403 | } |
944 | 1404 | ||
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 00b5078da9a3..a0afb555a7c9 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
@@ -140,17 +140,15 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs) | |||
140 | return 0; /* no signals delivered */ | 140 | return 0; /* no signals delivered */ |
141 | } | 141 | } |
142 | 142 | ||
143 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS | ||
143 | /* | 144 | /* |
144 | * Reenable the DABR before delivering the signal to | 145 | * Reenable the DABR before delivering the signal to |
145 | * user space. The DABR will have been cleared if it | 146 | * user space. The DABR will have been cleared if it |
146 | * triggered inside the kernel. | 147 | * triggered inside the kernel. |
147 | */ | 148 | */ |
148 | if (current->thread.dabr) { | 149 | if (current->thread.dabr) |
149 | set_dabr(current->thread.dabr); | 150 | set_dabr(current->thread.dabr); |
150 | #if defined(CONFIG_BOOKE) | ||
151 | mtspr(SPRN_DBCR0, current->thread.dbcr0); | ||
152 | #endif | 151 | #endif |
153 | } | ||
154 | 152 | ||
155 | if (is32) { | 153 | if (is32) { |
156 | if (ka.sa.sa_flags & SA_SIGINFO) | 154 | if (ka.sa.sa_flags & SA_SIGINFO) |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index d670429a1608..266610119f66 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -1078,7 +1078,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1078 | int i; | 1078 | int i; |
1079 | unsigned char tmp; | 1079 | unsigned char tmp; |
1080 | unsigned long new_msr = regs->msr; | 1080 | unsigned long new_msr = regs->msr; |
1081 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1081 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1082 | unsigned long new_dbcr0 = current->thread.dbcr0; | 1082 | unsigned long new_dbcr0 = current->thread.dbcr0; |
1083 | #endif | 1083 | #endif |
1084 | 1084 | ||
@@ -1087,13 +1087,17 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1087 | return -EFAULT; | 1087 | return -EFAULT; |
1088 | switch (op.dbg_type) { | 1088 | switch (op.dbg_type) { |
1089 | case SIG_DBG_SINGLE_STEPPING: | 1089 | case SIG_DBG_SINGLE_STEPPING: |
1090 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1090 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1091 | if (op.dbg_value) { | 1091 | if (op.dbg_value) { |
1092 | new_msr |= MSR_DE; | 1092 | new_msr |= MSR_DE; |
1093 | new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); | 1093 | new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); |
1094 | } else { | 1094 | } else { |
1095 | new_msr &= ~MSR_DE; | 1095 | new_dbcr0 &= ~DBCR0_IC; |
1096 | new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); | 1096 | if (!DBCR_ACTIVE_EVENTS(new_dbcr0, |
1097 | current->thread.dbcr1)) { | ||
1098 | new_msr &= ~MSR_DE; | ||
1099 | new_dbcr0 &= ~DBCR0_IDM; | ||
1100 | } | ||
1097 | } | 1101 | } |
1098 | #else | 1102 | #else |
1099 | if (op.dbg_value) | 1103 | if (op.dbg_value) |
@@ -1103,7 +1107,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1103 | #endif | 1107 | #endif |
1104 | break; | 1108 | break; |
1105 | case SIG_DBG_BRANCH_TRACING: | 1109 | case SIG_DBG_BRANCH_TRACING: |
1106 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1110 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1107 | return -EINVAL; | 1111 | return -EINVAL; |
1108 | #else | 1112 | #else |
1109 | if (op.dbg_value) | 1113 | if (op.dbg_value) |
@@ -1124,7 +1128,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1124 | failure is a problem, anyway, and it's very unlikely unless | 1128 | failure is a problem, anyway, and it's very unlikely unless |
1125 | the user is really doing something wrong. */ | 1129 | the user is really doing something wrong. */ |
1126 | regs->msr = new_msr; | 1130 | regs->msr = new_msr; |
1127 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1131 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1128 | current->thread.dbcr0 = new_dbcr0; | 1132 | current->thread.dbcr0 = new_dbcr0; |
1129 | #endif | 1133 | #endif |
1130 | 1134 | ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index ed1c0f58344a..1b16b9a3e49a 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -575,6 +575,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
575 | 575 | ||
576 | trace_timer_interrupt_entry(regs); | 576 | trace_timer_interrupt_entry(regs); |
577 | 577 | ||
578 | __get_cpu_var(irq_stat).timer_irqs++; | ||
579 | |||
578 | /* Ensure a positive value is written to the decrementer, or else | 580 | /* Ensure a positive value is written to the decrementer, or else |
579 | * some CPUs will continuue to take decrementer exceptions */ | 581 | * some CPUs will continuue to take decrementer exceptions */ |
580 | set_dec(DECREMENTER_MAX); | 582 | set_dec(DECREMENTER_MAX); |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 0a320dbd950a..987437e04e61 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -299,7 +299,7 @@ static inline int check_io_access(struct pt_regs *regs) | |||
299 | return 0; | 299 | return 0; |
300 | } | 300 | } |
301 | 301 | ||
302 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 302 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
303 | /* On 4xx, the reason for the machine check or program exception | 303 | /* On 4xx, the reason for the machine check or program exception |
304 | is in the ESR. */ | 304 | is in the ESR. */ |
305 | #define get_reason(regs) ((regs)->dsisr) | 305 | #define get_reason(regs) ((regs)->dsisr) |
@@ -483,6 +483,8 @@ void machine_check_exception(struct pt_regs *regs) | |||
483 | { | 483 | { |
484 | int recover = 0; | 484 | int recover = 0; |
485 | 485 | ||
486 | __get_cpu_var(irq_stat).mce_exceptions++; | ||
487 | |||
486 | /* See if any machine dependent calls. In theory, we would want | 488 | /* See if any machine dependent calls. In theory, we would want |
487 | * to call the CPU first, and call the ppc_md. one if the CPU | 489 | * to call the CPU first, and call the ppc_md. one if the CPU |
488 | * one returns a positive number. However there is existing code | 490 | * one returns a positive number. However there is existing code |
@@ -965,6 +967,8 @@ void vsx_unavailable_exception(struct pt_regs *regs) | |||
965 | 967 | ||
966 | void performance_monitor_exception(struct pt_regs *regs) | 968 | void performance_monitor_exception(struct pt_regs *regs) |
967 | { | 969 | { |
970 | __get_cpu_var(irq_stat).pmu_irqs++; | ||
971 | |||
968 | perf_irq(regs); | 972 | perf_irq(regs); |
969 | } | 973 | } |
970 | 974 | ||
@@ -1029,10 +1033,69 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
1029 | } | 1033 | } |
1030 | #endif /* CONFIG_8xx */ | 1034 | #endif /* CONFIG_8xx */ |
1031 | 1035 | ||
1032 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 1036 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1037 | static void handle_debug(struct pt_regs *regs, unsigned long debug_status) | ||
1038 | { | ||
1039 | int changed = 0; | ||
1040 | /* | ||
1041 | * Determine the cause of the debug event, clear the | ||
1042 | * event flags and send a trap to the handler. Torez | ||
1043 | */ | ||
1044 | if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { | ||
1045 | dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); | ||
1046 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1047 | current->thread.dbcr2 &= ~DBCR2_DAC12MODE; | ||
1048 | #endif | ||
1049 | do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, | ||
1050 | 5); | ||
1051 | changed |= 0x01; | ||
1052 | } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { | ||
1053 | dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); | ||
1054 | do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, | ||
1055 | 6); | ||
1056 | changed |= 0x01; | ||
1057 | } else if (debug_status & DBSR_IAC1) { | ||
1058 | current->thread.dbcr0 &= ~DBCR0_IAC1; | ||
1059 | dbcr_iac_range(current) &= ~DBCR_IAC12MODE; | ||
1060 | do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, | ||
1061 | 1); | ||
1062 | changed |= 0x01; | ||
1063 | } else if (debug_status & DBSR_IAC2) { | ||
1064 | current->thread.dbcr0 &= ~DBCR0_IAC2; | ||
1065 | do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, | ||
1066 | 2); | ||
1067 | changed |= 0x01; | ||
1068 | } else if (debug_status & DBSR_IAC3) { | ||
1069 | current->thread.dbcr0 &= ~DBCR0_IAC3; | ||
1070 | dbcr_iac_range(current) &= ~DBCR_IAC34MODE; | ||
1071 | do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, | ||
1072 | 3); | ||
1073 | changed |= 0x01; | ||
1074 | } else if (debug_status & DBSR_IAC4) { | ||
1075 | current->thread.dbcr0 &= ~DBCR0_IAC4; | ||
1076 | do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, | ||
1077 | 4); | ||
1078 | changed |= 0x01; | ||
1079 | } | ||
1080 | /* | ||
1081 | * At the point this routine was called, the MSR(DE) was turned off. | ||
1082 | * Check all other debug flags and see if that bit needs to be turned | ||
1083 | * back on or not. | ||
1084 | */ | ||
1085 | if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) | ||
1086 | regs->msr |= MSR_DE; | ||
1087 | else | ||
1088 | /* Make sure the IDM flag is off */ | ||
1089 | current->thread.dbcr0 &= ~DBCR0_IDM; | ||
1090 | |||
1091 | if (changed & 0x01) | ||
1092 | mtspr(SPRN_DBCR0, current->thread.dbcr0); | ||
1093 | } | ||
1033 | 1094 | ||
1034 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) | 1095 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) |
1035 | { | 1096 | { |
1097 | current->thread.dbsr = debug_status; | ||
1098 | |||
1036 | /* Hack alert: On BookE, Branch Taken stops on the branch itself, while | 1099 | /* Hack alert: On BookE, Branch Taken stops on the branch itself, while |
1037 | * on server, it stops on the target of the branch. In order to simulate | 1100 | * on server, it stops on the target of the branch. In order to simulate |
1038 | * the server behaviour, we thus restart right away with a single step | 1101 | * the server behaviour, we thus restart right away with a single step |
@@ -1076,29 +1139,23 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) | |||
1076 | if (debugger_sstep(regs)) | 1139 | if (debugger_sstep(regs)) |
1077 | return; | 1140 | return; |
1078 | 1141 | ||
1079 | if (user_mode(regs)) | ||
1080 | current->thread.dbcr0 &= ~(DBCR0_IC); | ||
1081 | |||
1082 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | ||
1083 | } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { | ||
1084 | regs->msr &= ~MSR_DE; | ||
1085 | |||
1086 | if (user_mode(regs)) { | 1142 | if (user_mode(regs)) { |
1087 | current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | | 1143 | current->thread.dbcr0 &= ~DBCR0_IC; |
1088 | DBCR0_IDM); | 1144 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1089 | } else { | 1145 | if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, |
1090 | /* Disable DAC interupts */ | 1146 | current->thread.dbcr1)) |
1091 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | | 1147 | regs->msr |= MSR_DE; |
1092 | DBSR_DAC1W | DBCR0_IDM)); | 1148 | else |
1093 | 1149 | /* Make sure the IDM bit is off */ | |
1094 | /* Clear the DAC event */ | 1150 | current->thread.dbcr0 &= ~DBCR0_IDM; |
1095 | mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W)); | 1151 | #endif |
1096 | } | 1152 | } |
1097 | /* Setup and send the trap to the handler */ | 1153 | |
1098 | do_dabr(regs, mfspr(SPRN_DAC1), debug_status); | 1154 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); |
1099 | } | 1155 | } else |
1156 | handle_debug(regs, debug_status); | ||
1100 | } | 1157 | } |
1101 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ | 1158 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
1102 | 1159 | ||
1103 | #if !defined(CONFIG_TAU_INT) | 1160 | #if !defined(CONFIG_TAU_INT) |
1104 | void TAUException(struct pt_regs *regs) | 1161 | void TAUException(struct pt_regs *regs) |
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index e68beac0a171..4d4eeb900486 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S | |||
@@ -43,62 +43,62 @@ END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ) | |||
43 | ld r7,16(r4) | 43 | ld r7,16(r4) |
44 | ldu r8,24(r4) | 44 | ldu r8,24(r4) |
45 | 1: std r5,8(r3) | 45 | 1: std r5,8(r3) |
46 | ld r9,8(r4) | ||
47 | std r6,16(r3) | 46 | std r6,16(r3) |
47 | ld r9,8(r4) | ||
48 | ld r10,16(r4) | 48 | ld r10,16(r4) |
49 | std r7,24(r3) | 49 | std r7,24(r3) |
50 | ld r11,24(r4) | ||
51 | std r8,32(r3) | 50 | std r8,32(r3) |
51 | ld r11,24(r4) | ||
52 | ld r12,32(r4) | 52 | ld r12,32(r4) |
53 | std r9,40(r3) | 53 | std r9,40(r3) |
54 | ld r5,40(r4) | ||
55 | std r10,48(r3) | 54 | std r10,48(r3) |
55 | ld r5,40(r4) | ||
56 | ld r6,48(r4) | 56 | ld r6,48(r4) |
57 | std r11,56(r3) | 57 | std r11,56(r3) |
58 | ld r7,56(r4) | ||
59 | std r12,64(r3) | 58 | std r12,64(r3) |
59 | ld r7,56(r4) | ||
60 | ld r8,64(r4) | 60 | ld r8,64(r4) |
61 | std r5,72(r3) | 61 | std r5,72(r3) |
62 | ld r9,72(r4) | ||
63 | std r6,80(r3) | 62 | std r6,80(r3) |
63 | ld r9,72(r4) | ||
64 | ld r10,80(r4) | 64 | ld r10,80(r4) |
65 | std r7,88(r3) | 65 | std r7,88(r3) |
66 | ld r11,88(r4) | ||
67 | std r8,96(r3) | 66 | std r8,96(r3) |
67 | ld r11,88(r4) | ||
68 | ld r12,96(r4) | 68 | ld r12,96(r4) |
69 | std r9,104(r3) | 69 | std r9,104(r3) |
70 | ld r5,104(r4) | ||
71 | std r10,112(r3) | 70 | std r10,112(r3) |
71 | ld r5,104(r4) | ||
72 | ld r6,112(r4) | 72 | ld r6,112(r4) |
73 | std r11,120(r3) | 73 | std r11,120(r3) |
74 | ld r7,120(r4) | ||
75 | stdu r12,128(r3) | 74 | stdu r12,128(r3) |
75 | ld r7,120(r4) | ||
76 | ldu r8,128(r4) | 76 | ldu r8,128(r4) |
77 | bdnz 1b | 77 | bdnz 1b |
78 | 78 | ||
79 | std r5,8(r3) | 79 | std r5,8(r3) |
80 | ld r9,8(r4) | ||
81 | std r6,16(r3) | 80 | std r6,16(r3) |
81 | ld r9,8(r4) | ||
82 | ld r10,16(r4) | 82 | ld r10,16(r4) |
83 | std r7,24(r3) | 83 | std r7,24(r3) |
84 | ld r11,24(r4) | ||
85 | std r8,32(r3) | 84 | std r8,32(r3) |
85 | ld r11,24(r4) | ||
86 | ld r12,32(r4) | 86 | ld r12,32(r4) |
87 | std r9,40(r3) | 87 | std r9,40(r3) |
88 | ld r5,40(r4) | ||
89 | std r10,48(r3) | 88 | std r10,48(r3) |
89 | ld r5,40(r4) | ||
90 | ld r6,48(r4) | 90 | ld r6,48(r4) |
91 | std r11,56(r3) | 91 | std r11,56(r3) |
92 | ld r7,56(r4) | ||
93 | std r12,64(r3) | 92 | std r12,64(r3) |
93 | ld r7,56(r4) | ||
94 | ld r8,64(r4) | 94 | ld r8,64(r4) |
95 | std r5,72(r3) | 95 | std r5,72(r3) |
96 | ld r9,72(r4) | ||
97 | std r6,80(r3) | 96 | std r6,80(r3) |
97 | ld r9,72(r4) | ||
98 | ld r10,80(r4) | 98 | ld r10,80(r4) |
99 | std r7,88(r3) | 99 | std r7,88(r3) |
100 | ld r11,88(r4) | ||
101 | std r8,96(r3) | 100 | std r8,96(r3) |
101 | ld r11,88(r4) | ||
102 | ld r12,96(r4) | 102 | ld r12,96(r4) |
103 | std r9,104(r3) | 103 | std r9,104(r3) |
104 | std r10,112(r3) | 104 | std r10,112(r3) |
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 693b14a778fa..578b625d6a3c 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S | |||
@@ -44,37 +44,55 @@ BEGIN_FTR_SECTION | |||
44 | andi. r0,r4,7 | 44 | andi. r0,r4,7 |
45 | bne .Lsrc_unaligned | 45 | bne .Lsrc_unaligned |
46 | END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | 46 | END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) |
47 | srdi r7,r5,4 | 47 | blt cr1,.Ldo_tail /* if < 16 bytes to copy */ |
48 | 20: ld r9,0(r4) | 48 | srdi r0,r5,5 |
49 | addi r4,r4,-8 | 49 | cmpdi cr1,r0,0 |
50 | mtctr r7 | 50 | 20: ld r7,0(r4) |
51 | andi. r5,r5,7 | 51 | 220: ld r6,8(r4) |
52 | bf cr7*4+0,22f | 52 | addi r4,r4,16 |
53 | addi r3,r3,8 | 53 | mtctr r0 |
54 | addi r4,r4,8 | 54 | andi. r0,r5,0x10 |
55 | mr r8,r9 | 55 | beq 22f |
56 | blt cr1,72f | 56 | addi r3,r3,16 |
57 | 21: ld r9,8(r4) | 57 | addi r4,r4,-16 |
58 | 70: std r8,8(r3) | 58 | mr r9,r7 |
59 | 22: ldu r8,16(r4) | 59 | mr r8,r6 |
60 | 71: stdu r9,16(r3) | 60 | beq cr1,72f |
61 | 21: ld r7,16(r4) | ||
62 | 221: ld r6,24(r4) | ||
63 | addi r4,r4,32 | ||
64 | 70: std r9,0(r3) | ||
65 | 270: std r8,8(r3) | ||
66 | 22: ld r9,0(r4) | ||
67 | 222: ld r8,8(r4) | ||
68 | 71: std r7,16(r3) | ||
69 | 271: std r6,24(r3) | ||
70 | addi r3,r3,32 | ||
61 | bdnz 21b | 71 | bdnz 21b |
62 | 72: std r8,8(r3) | 72 | 72: std r9,0(r3) |
73 | 272: std r8,8(r3) | ||
74 | andi. r5,r5,0xf | ||
63 | beq+ 3f | 75 | beq+ 3f |
64 | addi r3,r3,16 | 76 | addi r4,r4,16 |
65 | .Ldo_tail: | 77 | .Ldo_tail: |
66 | bf cr7*4+1,1f | 78 | addi r3,r3,16 |
67 | 23: lwz r9,8(r4) | 79 | bf cr7*4+0,246f |
80 | 244: ld r9,0(r4) | ||
81 | addi r4,r4,8 | ||
82 | 245: std r9,0(r3) | ||
83 | addi r3,r3,8 | ||
84 | 246: bf cr7*4+1,1f | ||
85 | 23: lwz r9,0(r4) | ||
68 | addi r4,r4,4 | 86 | addi r4,r4,4 |
69 | 73: stw r9,0(r3) | 87 | 73: stw r9,0(r3) |
70 | addi r3,r3,4 | 88 | addi r3,r3,4 |
71 | 1: bf cr7*4+2,2f | 89 | 1: bf cr7*4+2,2f |
72 | 44: lhz r9,8(r4) | 90 | 44: lhz r9,0(r4) |
73 | addi r4,r4,2 | 91 | addi r4,r4,2 |
74 | 74: sth r9,0(r3) | 92 | 74: sth r9,0(r3) |
75 | addi r3,r3,2 | 93 | addi r3,r3,2 |
76 | 2: bf cr7*4+3,3f | 94 | 2: bf cr7*4+3,3f |
77 | 45: lbz r9,8(r4) | 95 | 45: lbz r9,0(r4) |
78 | 75: stb r9,0(r3) | 96 | 75: stb r9,0(r3) |
79 | 3: li r3,0 | 97 | 3: li r3,0 |
80 | blr | 98 | blr |
@@ -220,7 +238,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
220 | 131: | 238 | 131: |
221 | addi r3,r3,8 | 239 | addi r3,r3,8 |
222 | 120: | 240 | 120: |
241 | 320: | ||
223 | 122: | 242 | 122: |
243 | 322: | ||
224 | 124: | 244 | 124: |
225 | 125: | 245 | 125: |
226 | 126: | 246 | 126: |
@@ -229,9 +249,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
229 | 129: | 249 | 129: |
230 | 133: | 250 | 133: |
231 | addi r3,r3,8 | 251 | addi r3,r3,8 |
232 | 121: | ||
233 | 132: | 252 | 132: |
234 | addi r3,r3,8 | 253 | addi r3,r3,8 |
254 | 121: | ||
255 | 321: | ||
256 | 344: | ||
235 | 134: | 257 | 134: |
236 | 135: | 258 | 135: |
237 | 138: | 259 | 138: |
@@ -303,18 +325,22 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
303 | 183: | 325 | 183: |
304 | add r3,r3,r7 | 326 | add r3,r3,r7 |
305 | b 1f | 327 | b 1f |
328 | 371: | ||
306 | 180: | 329 | 180: |
307 | addi r3,r3,8 | 330 | addi r3,r3,8 |
308 | 171: | 331 | 171: |
309 | 177: | 332 | 177: |
310 | addi r3,r3,8 | 333 | addi r3,r3,8 |
311 | 170: | 334 | 370: |
312 | 172: | 335 | 372: |
313 | 176: | 336 | 176: |
314 | 178: | 337 | 178: |
315 | addi r3,r3,4 | 338 | addi r3,r3,4 |
316 | 185: | 339 | 185: |
317 | addi r3,r3,4 | 340 | addi r3,r3,4 |
341 | 170: | ||
342 | 172: | ||
343 | 345: | ||
318 | 173: | 344 | 173: |
319 | 174: | 345 | 174: |
320 | 175: | 346 | 175: |
@@ -341,11 +367,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
341 | .section __ex_table,"a" | 367 | .section __ex_table,"a" |
342 | .align 3 | 368 | .align 3 |
343 | .llong 20b,120b | 369 | .llong 20b,120b |
370 | .llong 220b,320b | ||
344 | .llong 21b,121b | 371 | .llong 21b,121b |
372 | .llong 221b,321b | ||
345 | .llong 70b,170b | 373 | .llong 70b,170b |
374 | .llong 270b,370b | ||
346 | .llong 22b,122b | 375 | .llong 22b,122b |
376 | .llong 222b,322b | ||
347 | .llong 71b,171b | 377 | .llong 71b,171b |
378 | .llong 271b,371b | ||
348 | .llong 72b,172b | 379 | .llong 72b,172b |
380 | .llong 272b,372b | ||
381 | .llong 244b,344b | ||
382 | .llong 245b,345b | ||
349 | .llong 23b,123b | 383 | .llong 23b,123b |
350 | .llong 73b,173b | 384 | .llong 73b,173b |
351 | .llong 44b,144b | 385 | .llong 44b,144b |
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 7e8865bcd683..4dee652dfcad 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c | |||
@@ -112,7 +112,7 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) | |||
112 | 112 | ||
113 | void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) | 113 | void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) |
114 | { | 114 | { |
115 | unsigned int *start, *end, *dest; | 115 | int *start, *end, *dest; |
116 | 116 | ||
117 | if (!(value & CPU_FTR_LWSYNC)) | 117 | if (!(value & CPU_FTR_LWSYNC)) |
118 | return ; | 118 | return ; |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 056d23a1b105..9e1aa4f99fac 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -122,7 +122,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep) | |||
122 | unsigned long *word = &hptep->v; | 122 | unsigned long *word = &hptep->v; |
123 | 123 | ||
124 | while (1) { | 124 | while (1) { |
125 | if (!test_and_set_bit(HPTE_LOCK_BIT, word)) | 125 | if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) |
126 | break; | 126 | break; |
127 | while(test_bit(HPTE_LOCK_BIT, word)) | 127 | while(test_bit(HPTE_LOCK_BIT, word)) |
128 | cpu_relax(); | 128 | cpu_relax(); |
@@ -133,8 +133,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep) | |||
133 | { | 133 | { |
134 | unsigned long *word = &hptep->v; | 134 | unsigned long *word = &hptep->v; |
135 | 135 | ||
136 | asm volatile("lwsync":::"memory"); | 136 | clear_bit_unlock(HPTE_LOCK_BIT, word); |
137 | clear_bit(HPTE_LOCK_BIT, word); | ||
138 | } | 137 | } |
139 | 138 | ||
140 | static long native_hpte_insert(unsigned long hpte_group, unsigned long va, | 139 | static long native_hpte_insert(unsigned long hpte_group, unsigned long va, |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 282d9306361f..1ec06576f619 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
63 | if (huge) { | 63 | if (huge) { |
64 | #ifdef CONFIG_HUGETLB_PAGE | 64 | #ifdef CONFIG_HUGETLB_PAGE |
65 | psize = get_slice_psize(mm, addr); | 65 | psize = get_slice_psize(mm, addr); |
66 | /* Mask the address for the correct page size */ | ||
67 | addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); | ||
66 | #else | 68 | #else |
67 | BUG(); | 69 | BUG(); |
68 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ | 70 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ |
69 | #endif | 71 | #endif |
70 | } else | 72 | } else { |
71 | psize = pte_pagesize_index(mm, addr, pte); | 73 | psize = pte_pagesize_index(mm, addr, pte); |
74 | /* Mask the address for the standard page size. If we | ||
75 | * have a 64k page kernel, but the hardware does not | ||
76 | * support 64k pages, this might be different from the | ||
77 | * hardware page size encoded in the slice table. */ | ||
78 | addr &= PAGE_MASK; | ||
79 | } | ||
72 | 80 | ||
73 | /* Mask the address for the correct page size */ | ||
74 | addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); | ||
75 | 81 | ||
76 | /* Build full vaddr */ | 82 | /* Build full vaddr */ |
77 | if (!is_kernel_addr(addr)) { | 83 | if (!is_kernel_addr(addr)) { |
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index da9b20a63769..4ecf4cf9a51b 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c | |||
@@ -79,7 +79,7 @@ cpld_unmask_irq(unsigned int irq) | |||
79 | } | 79 | } |
80 | 80 | ||
81 | static struct irq_chip cpld_pic = { | 81 | static struct irq_chip cpld_pic = { |
82 | .name = " CPLD PIC ", | 82 | .name = "CPLD PIC", |
83 | .mask = cpld_mask_irq, | 83 | .mask = cpld_mask_irq, |
84 | .ack = cpld_mask_irq, | 84 | .ack = cpld_mask_irq, |
85 | .unmask = cpld_unmask_irq, | 85 | .unmask = cpld_unmask_irq, |
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index e5da5f62b24a..42e87f08aa01 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c | |||
@@ -232,7 +232,7 @@ static int socrates_fpga_pic_set_type(unsigned int virq, | |||
232 | } | 232 | } |
233 | 233 | ||
234 | static struct irq_chip socrates_fpga_pic_chip = { | 234 | static struct irq_chip socrates_fpga_pic_chip = { |
235 | .name = " FPGA-PIC ", | 235 | .name = "FPGA-PIC", |
236 | .ack = socrates_fpga_pic_ack, | 236 | .ack = socrates_fpga_pic_ack, |
237 | .mask = socrates_fpga_pic_mask, | 237 | .mask = socrates_fpga_pic_mask, |
238 | .mask_ack = socrates_fpga_pic_mask_ack, | 238 | .mask_ack = socrates_fpga_pic_mask_ack, |
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index 36052a9ebcda..8d29290f6070 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c | |||
@@ -110,7 +110,7 @@ static void beatic_end_irq(unsigned int irq_plug) | |||
110 | } | 110 | } |
111 | 111 | ||
112 | static struct irq_chip beatic_pic = { | 112 | static struct irq_chip beatic_pic = { |
113 | .name = " CELL-BEAT ", | 113 | .name = "CELL-BEAT", |
114 | .unmask = beatic_unmask_irq, | 114 | .unmask = beatic_unmask_irq, |
115 | .mask = beatic_mask_irq, | 115 | .mask = beatic_mask_irq, |
116 | .eoi = beatic_end_irq, | 116 | .eoi = beatic_end_irq, |
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 6829cf7e2bda..10eb1a443626 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -88,7 +88,7 @@ static void iic_eoi(unsigned int irq) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | static struct irq_chip iic_chip = { | 90 | static struct irq_chip iic_chip = { |
91 | .name = " CELL-IIC ", | 91 | .name = "CELL-IIC", |
92 | .mask = iic_mask, | 92 | .mask = iic_mask, |
93 | .unmask = iic_unmask, | 93 | .unmask = iic_unmask, |
94 | .eoi = iic_eoi, | 94 | .eoi = iic_eoi, |
@@ -133,7 +133,7 @@ static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) | |||
133 | 133 | ||
134 | 134 | ||
135 | static struct irq_chip iic_ioexc_chip = { | 135 | static struct irq_chip iic_ioexc_chip = { |
136 | .name = " CELL-IOEX", | 136 | .name = "CELL-IOEX", |
137 | .mask = iic_mask, | 137 | .mask = iic_mask, |
138 | .unmask = iic_unmask, | 138 | .unmask = iic_unmask, |
139 | .eoi = iic_ioexc_eoi, | 139 | .eoi = iic_ioexc_eoi, |
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 01244f254a11..5876e888e412 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c | |||
@@ -168,7 +168,7 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | static struct irq_chip spider_pic = { | 170 | static struct irq_chip spider_pic = { |
171 | .name = " SPIDER ", | 171 | .name = "SPIDER", |
172 | .unmask = spider_unmask_irq, | 172 | .unmask = spider_unmask_irq, |
173 | .mask = spider_mask_irq, | 173 | .mask = spider_mask_irq, |
174 | .ack = spider_ack_irq, | 174 | .ack = spider_ack_irq, |
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 86c4b29eea89..ba446bf355a9 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c | |||
@@ -273,7 +273,7 @@ static void iseries_end_IRQ(unsigned int irq) | |||
273 | } | 273 | } |
274 | 274 | ||
275 | static struct irq_chip iseries_pic = { | 275 | static struct irq_chip iseries_pic = { |
276 | .name = "iSeries irq controller", | 276 | .name = "iSeries", |
277 | .startup = iseries_startup_IRQ, | 277 | .startup = iseries_startup_IRQ, |
278 | .shutdown = iseries_shutdown_IRQ, | 278 | .shutdown = iseries_shutdown_IRQ, |
279 | .unmask = iseries_enable_IRQ, | 279 | .unmask = iseries_enable_IRQ, |
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 09e827296276..3b62896f9a5b 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -195,7 +195,7 @@ static int pmac_retrigger(unsigned int virq) | |||
195 | } | 195 | } |
196 | 196 | ||
197 | static struct irq_chip pmac_pic = { | 197 | static struct irq_chip pmac_pic = { |
198 | .name = " PMAC-PIC ", | 198 | .name = "PMAC-PIC", |
199 | .startup = pmac_startup_irq, | 199 | .startup = pmac_startup_irq, |
200 | .mask = pmac_mask_irq, | 200 | .mask = pmac_mask_irq, |
201 | .ack = pmac_ack_irq, | 201 | .ack = pmac_ack_irq, |
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index ccd8dd03b8c9..3304f32fc7b8 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -491,7 +491,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | |||
491 | pdn->eeh_mode & EEH_MODE_NOCHECK) { | 491 | pdn->eeh_mode & EEH_MODE_NOCHECK) { |
492 | ignored_check++; | 492 | ignored_check++; |
493 | pr_debug("EEH: Ignored check (%x) for %s %s\n", | 493 | pr_debug("EEH: Ignored check (%x) for %s %s\n", |
494 | pdn->eeh_mode, pci_name (dev), dn->full_name); | 494 | pdn->eeh_mode, eeh_pci_name(dev), dn->full_name); |
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
@@ -515,7 +515,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | |||
515 | printk (KERN_ERR "EEH: %d reads ignored for recovering device at " | 515 | printk (KERN_ERR "EEH: %d reads ignored for recovering device at " |
516 | "location=%s driver=%s pci addr=%s\n", | 516 | "location=%s driver=%s pci addr=%s\n", |
517 | pdn->eeh_check_count, location, | 517 | pdn->eeh_check_count, location, |
518 | dev->driver->name, pci_name(dev)); | 518 | dev->driver->name, eeh_pci_name(dev)); |
519 | printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n", | 519 | printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n", |
520 | dev->driver->name); | 520 | dev->driver->name); |
521 | dump_stack(); | 521 | dump_stack(); |
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c index 0c252c3a5f9f..b8d70f5d9aa9 100644 --- a/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/arch/powerpc/platforms/pseries/eeh_driver.c | |||
@@ -337,7 +337,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
337 | location = location ? location : "unknown"; | 337 | location = location ? location : "unknown"; |
338 | printk(KERN_ERR "EEH: Error: Cannot find partition endpoint " | 338 | printk(KERN_ERR "EEH: Error: Cannot find partition endpoint " |
339 | "for location=%s pci addr=%s\n", | 339 | "for location=%s pci addr=%s\n", |
340 | location, pci_name(event->dev)); | 340 | location, eeh_pci_name(event->dev)); |
341 | return NULL; | 341 | return NULL; |
342 | } | 342 | } |
343 | 343 | ||
@@ -368,7 +368,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
368 | pci_str = pci_name (frozen_pdn->pcidev); | 368 | pci_str = pci_name (frozen_pdn->pcidev); |
369 | drv_str = pcid_name (frozen_pdn->pcidev); | 369 | drv_str = pcid_name (frozen_pdn->pcidev); |
370 | } else { | 370 | } else { |
371 | pci_str = pci_name (event->dev); | 371 | pci_str = eeh_pci_name(event->dev); |
372 | drv_str = pcid_name (event->dev); | 372 | drv_str = pcid_name (event->dev); |
373 | } | 373 | } |
374 | 374 | ||
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c index ddb80f5d850b..ec5df8f519c7 100644 --- a/arch/powerpc/platforms/pseries/eeh_event.c +++ b/arch/powerpc/platforms/pseries/eeh_event.c | |||
@@ -80,7 +80,7 @@ static int eeh_event_handler(void * dummy) | |||
80 | eeh_mark_slot(event->dn, EEH_MODE_RECOVERING); | 80 | eeh_mark_slot(event->dn, EEH_MODE_RECOVERING); |
81 | 81 | ||
82 | printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n", | 82 | printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n", |
83 | pci_name(event->dev)); | 83 | eeh_pci_name(event->dev)); |
84 | 84 | ||
85 | pdn = handle_eeh_events(event); | 85 | pdn = handle_eeh_events(event); |
86 | 86 | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index e9a52ec05a0f..4ca641042ec3 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -424,7 +424,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) | |||
424 | } | 424 | } |
425 | 425 | ||
426 | static struct irq_chip xics_pic_direct = { | 426 | static struct irq_chip xics_pic_direct = { |
427 | .name = " XICS ", | 427 | .name = "XICS", |
428 | .startup = xics_startup, | 428 | .startup = xics_startup, |
429 | .mask = xics_mask_irq, | 429 | .mask = xics_mask_irq, |
430 | .unmask = xics_unmask_irq, | 430 | .unmask = xics_unmask_irq, |
@@ -433,7 +433,7 @@ static struct irq_chip xics_pic_direct = { | |||
433 | }; | 433 | }; |
434 | 434 | ||
435 | static struct irq_chip xics_pic_lpar = { | 435 | static struct irq_chip xics_pic_lpar = { |
436 | .name = " XICS ", | 436 | .name = "XICS", |
437 | .startup = xics_startup, | 437 | .startup = xics_startup, |
438 | .mask = xics_mask_irq, | 438 | .mask = xics_mask_irq, |
439 | .unmask = xics_unmask_irq, | 439 | .unmask = xics_unmask_irq, |
@@ -510,15 +510,13 @@ static void __init xics_init_host(void) | |||
510 | /* | 510 | /* |
511 | * XICS only has a single IPI, so encode the messages per CPU | 511 | * XICS only has a single IPI, so encode the messages per CPU |
512 | */ | 512 | */ |
513 | struct xics_ipi_struct { | 513 | static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); |
514 | unsigned long value; | ||
515 | } ____cacheline_aligned; | ||
516 | |||
517 | static struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; | ||
518 | 514 | ||
519 | static inline void smp_xics_do_message(int cpu, int msg) | 515 | static inline void smp_xics_do_message(int cpu, int msg) |
520 | { | 516 | { |
521 | set_bit(msg, &xics_ipi_message[cpu].value); | 517 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); |
518 | |||
519 | set_bit(msg, tgt); | ||
522 | mb(); | 520 | mb(); |
523 | if (firmware_has_feature(FW_FEATURE_LPAR)) | 521 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
524 | lpar_qirr_info(cpu, IPI_PRIORITY); | 522 | lpar_qirr_info(cpu, IPI_PRIORITY); |
@@ -544,25 +542,23 @@ void smp_xics_message_pass(int target, int msg) | |||
544 | 542 | ||
545 | static irqreturn_t xics_ipi_dispatch(int cpu) | 543 | static irqreturn_t xics_ipi_dispatch(int cpu) |
546 | { | 544 | { |
545 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
546 | |||
547 | WARN_ON(cpu_is_offline(cpu)); | 547 | WARN_ON(cpu_is_offline(cpu)); |
548 | 548 | ||
549 | mb(); /* order mmio clearing qirr */ | 549 | mb(); /* order mmio clearing qirr */ |
550 | while (xics_ipi_message[cpu].value) { | 550 | while (*tgt) { |
551 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, | 551 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { |
552 | &xics_ipi_message[cpu].value)) { | ||
553 | smp_message_recv(PPC_MSG_CALL_FUNCTION); | 552 | smp_message_recv(PPC_MSG_CALL_FUNCTION); |
554 | } | 553 | } |
555 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, | 554 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { |
556 | &xics_ipi_message[cpu].value)) { | ||
557 | smp_message_recv(PPC_MSG_RESCHEDULE); | 555 | smp_message_recv(PPC_MSG_RESCHEDULE); |
558 | } | 556 | } |
559 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, | 557 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { |
560 | &xics_ipi_message[cpu].value)) { | ||
561 | smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); | 558 | smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); |
562 | } | 559 | } |
563 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 560 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
564 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, | 561 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { |
565 | &xics_ipi_message[cpu].value)) { | ||
566 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | 562 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); |
567 | } | 563 | } |
568 | #endif | 564 | #endif |
@@ -784,9 +780,13 @@ static void xics_set_cpu_priority(unsigned char cppr) | |||
784 | { | 780 | { |
785 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | 781 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
786 | 782 | ||
787 | BUG_ON(os_cppr->index != 0); | 783 | /* |
784 | * we only really want to set the priority when there's | ||
785 | * just one cppr value on the stack | ||
786 | */ | ||
787 | WARN_ON(os_cppr->index != 0); | ||
788 | 788 | ||
789 | os_cppr->stack[os_cppr->index] = cppr; | 789 | os_cppr->stack[0] = cppr; |
790 | 790 | ||
791 | if (firmware_has_feature(FW_FEATURE_LPAR)) | 791 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
792 | lpar_cppr_info(cppr); | 792 | lpar_cppr_info(cppr); |
@@ -821,8 +821,14 @@ void xics_setup_cpu(void) | |||
821 | 821 | ||
822 | void xics_teardown_cpu(void) | 822 | void xics_teardown_cpu(void) |
823 | { | 823 | { |
824 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
824 | int cpu = smp_processor_id(); | 825 | int cpu = smp_processor_id(); |
825 | 826 | ||
827 | /* | ||
828 | * we have to reset the cppr index to 0 because we're | ||
829 | * not going to return from the IPI | ||
830 | */ | ||
831 | os_cppr->index = 0; | ||
826 | xics_set_cpu_priority(0); | 832 | xics_set_cpu_priority(0); |
827 | 833 | ||
828 | /* Clear any pending IPI request */ | 834 | /* Clear any pending IPI request */ |
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index a4b41dbde128..ecad10d4e928 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c | |||
@@ -77,7 +77,7 @@ static void cpm_end_irq(unsigned int irq) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | static struct irq_chip cpm_pic = { | 79 | static struct irq_chip cpm_pic = { |
80 | .name = " CPM PIC ", | 80 | .name = "CPM PIC", |
81 | .mask = cpm_mask_irq, | 81 | .mask = cpm_mask_irq, |
82 | .unmask = cpm_unmask_irq, | 82 | .unmask = cpm_unmask_irq, |
83 | .eoi = cpm_end_irq, | 83 | .eoi = cpm_end_irq, |
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index 1709ac5aac7c..fcea4ff825dd 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c | |||
@@ -198,7 +198,7 @@ err_sense: | |||
198 | } | 198 | } |
199 | 199 | ||
200 | static struct irq_chip cpm2_pic = { | 200 | static struct irq_chip cpm2_pic = { |
201 | .name = " CPM2 SIU ", | 201 | .name = "CPM2 SIU", |
202 | .mask = cpm2_mask_irq, | 202 | .mask = cpm2_mask_irq, |
203 | .unmask = cpm2_unmask_irq, | 203 | .unmask = cpm2_unmask_irq, |
204 | .ack = cpm2_ack, | 204 | .ack = cpm2_ack, |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index c6e11b077108..e094367d7739 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -47,7 +47,7 @@ static struct irq_chip fsl_msi_chip = { | |||
47 | .mask = mask_msi_irq, | 47 | .mask = mask_msi_irq, |
48 | .unmask = unmask_msi_irq, | 48 | .unmask = unmask_msi_irq, |
49 | .ack = fsl_msi_end_irq, | 49 | .ack = fsl_msi_end_irq, |
50 | .name = " FSL-MSI ", | 50 | .name = "FSL-MSI", |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, | 53 | static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, |
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index 0a55db8a5a29..d32581764bde 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c | |||
@@ -135,7 +135,7 @@ static void i8259_unmask_irq(unsigned int irq_nr) | |||
135 | } | 135 | } |
136 | 136 | ||
137 | static struct irq_chip i8259_pic = { | 137 | static struct irq_chip i8259_pic = { |
138 | .name = " i8259 ", | 138 | .name = "i8259", |
139 | .mask = i8259_mask_irq, | 139 | .mask = i8259_mask_irq, |
140 | .disable = i8259_mask_irq, | 140 | .disable = i8259_mask_irq, |
141 | .unmask = i8259_unmask_irq, | 141 | .unmask = i8259_unmask_irq, |
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 28cdddd2f89e..16486716970a 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c | |||
@@ -660,7 +660,7 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
660 | 660 | ||
661 | /* level interrupts and edge interrupts have different ack operations */ | 661 | /* level interrupts and edge interrupts have different ack operations */ |
662 | static struct irq_chip ipic_level_irq_chip = { | 662 | static struct irq_chip ipic_level_irq_chip = { |
663 | .name = " IPIC ", | 663 | .name = "IPIC", |
664 | .unmask = ipic_unmask_irq, | 664 | .unmask = ipic_unmask_irq, |
665 | .mask = ipic_mask_irq, | 665 | .mask = ipic_mask_irq, |
666 | .mask_ack = ipic_mask_irq, | 666 | .mask_ack = ipic_mask_irq, |
@@ -668,7 +668,7 @@ static struct irq_chip ipic_level_irq_chip = { | |||
668 | }; | 668 | }; |
669 | 669 | ||
670 | static struct irq_chip ipic_edge_irq_chip = { | 670 | static struct irq_chip ipic_edge_irq_chip = { |
671 | .name = " IPIC ", | 671 | .name = "IPIC", |
672 | .unmask = ipic_unmask_irq, | 672 | .unmask = ipic_unmask_irq, |
673 | .mask = ipic_mask_irq, | 673 | .mask = ipic_mask_irq, |
674 | .mask_ack = ipic_mask_irq_and_ack, | 674 | .mask_ack = ipic_mask_irq_and_ack, |
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index 69bd6f4dff83..8c27d261aba8 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c | |||
@@ -94,7 +94,7 @@ static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | static struct irq_chip mpc8xx_pic = { | 96 | static struct irq_chip mpc8xx_pic = { |
97 | .name = " MPC8XX SIU ", | 97 | .name = "MPC8XX SIU", |
98 | .unmask = mpc8xx_unmask_irq, | 98 | .unmask = mpc8xx_unmask_irq, |
99 | .mask = mpc8xx_mask_irq, | 99 | .mask = mpc8xx_mask_irq, |
100 | .ack = mpc8xx_ack, | 100 | .ack = mpc8xx_ack, |
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c index 0f6ab06f8474..3b6a9a43718f 100644 --- a/arch/powerpc/sysdev/mpic_pasemi_msi.c +++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c | |||
@@ -60,7 +60,7 @@ static struct irq_chip mpic_pasemi_msi_chip = { | |||
60 | .eoi = mpic_end_irq, | 60 | .eoi = mpic_end_irq, |
61 | .set_type = mpic_set_irq_type, | 61 | .set_type = mpic_set_irq_type, |
62 | .set_affinity = mpic_set_affinity, | 62 | .set_affinity = mpic_set_affinity, |
63 | .name = "PASEMI-MSI ", | 63 | .name = "PASEMI-MSI", |
64 | }; | 64 | }; |
65 | 65 | ||
66 | static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type) | 66 | static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type) |
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c index b6bd775d2e22..31acd3b1718b 100644 --- a/arch/powerpc/sysdev/mv64x60_dev.c +++ b/arch/powerpc/sysdev/mv64x60_dev.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/mv643xx.h> | 16 | #include <linux/mv643xx.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/of_platform.h> | 18 | #include <linux/of_platform.h> |
19 | #include <linux/dma-mapping.h> | ||
19 | 20 | ||
20 | #include <asm/prom.h> | 21 | #include <asm/prom.h> |
21 | 22 | ||
@@ -189,6 +190,7 @@ static int __init mv64x60_mpsc_device_setup(struct device_node *np, int id) | |||
189 | pdev = platform_device_alloc(MPSC_CTLR_NAME, port_number); | 190 | pdev = platform_device_alloc(MPSC_CTLR_NAME, port_number); |
190 | if (!pdev) | 191 | if (!pdev) |
191 | return -ENOMEM; | 192 | return -ENOMEM; |
193 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
192 | 194 | ||
193 | err = platform_device_add_resources(pdev, r, 5); | 195 | err = platform_device_add_resources(pdev, r, 5); |
194 | if (err) | 196 | if (err) |
@@ -302,6 +304,7 @@ static int __init mv64x60_eth_device_setup(struct device_node *np, int id, | |||
302 | if (!pdev) | 304 | if (!pdev) |
303 | return -ENOMEM; | 305 | return -ENOMEM; |
304 | 306 | ||
307 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
305 | err = platform_device_add_resources(pdev, r, 1); | 308 | err = platform_device_add_resources(pdev, r, 1); |
306 | if (err) | 309 | if (err) |
307 | goto error; | 310 | goto error; |
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 5f0b0368a209..d927da893ec4 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c | |||
@@ -237,7 +237,7 @@ static void qe_ic_mask_irq(unsigned int virq) | |||
237 | } | 237 | } |
238 | 238 | ||
239 | static struct irq_chip qe_ic_irq_chip = { | 239 | static struct irq_chip qe_ic_irq_chip = { |
240 | .name = " QEIC ", | 240 | .name = "QEIC", |
241 | .unmask = qe_ic_unmask_irq, | 241 | .unmask = qe_ic_unmask_irq, |
242 | .mask = qe_ic_mask_irq, | 242 | .mask = qe_ic_mask_irq, |
243 | .mask_ack = qe_ic_mask_irq, | 243 | .mask_ack = qe_ic_mask_irq, |
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 6f220a913e42..0038fb78f094 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c | |||
@@ -177,7 +177,7 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
177 | } | 177 | } |
178 | 178 | ||
179 | static struct irq_chip uic_irq_chip = { | 179 | static struct irq_chip uic_irq_chip = { |
180 | .name = " UIC ", | 180 | .name = "UIC", |
181 | .unmask = uic_unmask_irq, | 181 | .unmask = uic_unmask_irq, |
182 | .mask = uic_mask_irq, | 182 | .mask = uic_mask_irq, |
183 | .mask_ack = uic_mask_ack_irq, | 183 | .mask_ack = uic_mask_ack_irq, |