aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-18 04:16:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-18 04:16:10 -0500
commit148b00b63a3f747141207e7a409f14bd26d0820f (patch)
treee88b52035d193517536087692583a5ec194d5ee0 /arch
parente36d5058dbf85aacf2fadf508f275afd37c58576 (diff)
parenta313f4c55d4952f2105fe33a4957ed858e998359 (diff)
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc/signal32: Fix sigset_t conversion when copying to user powerpc: Fix atomic_xxx_return barrier semantics powerpc: Remove buggy 9-year-old test for binutils < 2.12.1 powerpc/book3e-64: Fix debug support for userspace powerpc: Remove extraneous CONFIG_PPC_ADV_DEBUG_REGS define powerpc: Revert show_regs() define for readability powerpc/ps3: Fix SMP lockdep boot warning powerpc/ps3: Fix lost SMP IPIs powerpc: Add hvcall.h include to book3s_hv.c powerpc/trace: Add a dummy stack frame for trace_hardirqs_off powerpc: Copy down exception vectors after feature fixups powerpc: panic if we can't instantiate RTAS powerpc/4xx: Fix typos in kexec config dependencies powerpc/fsl: MCU_MPC8349EMITX wants I2C built-in, modular won't do... powerpc/fsl_udc_core: Fix dumb typo carma-fpga: Missed switch from of_register_platform_driver() powerpc: Fix build breakage in jump_label.c
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/Makefile6
-rw-r--r--arch/powerpc/include/asm/atomic.h48
-rw-r--r--arch/powerpc/include/asm/bitops.h12
-rw-r--r--arch/powerpc/include/asm/futex.h7
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/include/asm/sections.h2
-rw-r--r--arch/powerpc/include/asm/synch.h9
-rw-r--r--arch/powerpc/kernel/entry_32.S15
-rw-r--r--arch/powerpc/kernel/jump_label.c2
-rw-r--r--arch/powerpc/kernel/kvm.c1
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/process.c24
-rw-r--r--arch/powerpc/kernel/prom_init.c6
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/lib/feature-fixups.c23
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c23
-rw-r--r--arch/powerpc/platforms/ps3/platform.h1
-rw-r--r--arch/powerpc/platforms/ps3/smp.c62
24 files changed, 149 insertions, 108 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b177caa56d95..951e18f5335b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -345,7 +345,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
345 345
346config KEXEC 346config KEXEC
347 bool "kexec system call (EXPERIMENTAL)" 347 bool "kexec system call (EXPERIMENTAL)"
348 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !47x)) && EXPERIMENTAL 348 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !PPC_47x)) && EXPERIMENTAL
349 help 349 help
350 kexec is a system call that implements the ability to shutdown your 350 kexec is a system call that implements the ability to shutdown your
351 current kernel, and to start another kernel. It is like a reboot 351 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 57af16edc192..70ba0c0a1223 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -255,12 +255,6 @@ checkbin:
255 echo 'disable kernel modules' ; \ 255 echo 'disable kernel modules' ; \
256 false ; \ 256 false ; \
257 fi 257 fi
258 @if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
259 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
260 echo 'correctly with old versions of binutils.' ; \
261 echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
262 false ; \
263 fi
264 258
265CLEAN_FILES += $(TOUT) 259CLEAN_FILES += $(TOUT)
266 260
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index e2a4c26ad377..02e41b53488d 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -49,13 +49,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
49 int t; 49 int t;
50 50
51 __asm__ __volatile__( 51 __asm__ __volatile__(
52 PPC_RELEASE_BARRIER 52 PPC_ATOMIC_ENTRY_BARRIER
53"1: lwarx %0,0,%2 # atomic_add_return\n\ 53"1: lwarx %0,0,%2 # atomic_add_return\n\
54 add %0,%1,%0\n" 54 add %0,%1,%0\n"
55 PPC405_ERR77(0,%2) 55 PPC405_ERR77(0,%2)
56" stwcx. %0,0,%2 \n\ 56" stwcx. %0,0,%2 \n\
57 bne- 1b" 57 bne- 1b"
58 PPC_ACQUIRE_BARRIER 58 PPC_ATOMIC_EXIT_BARRIER
59 : "=&r" (t) 59 : "=&r" (t)
60 : "r" (a), "r" (&v->counter) 60 : "r" (a), "r" (&v->counter)
61 : "cc", "memory"); 61 : "cc", "memory");
@@ -85,13 +85,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
85 int t; 85 int t;
86 86
87 __asm__ __volatile__( 87 __asm__ __volatile__(
88 PPC_RELEASE_BARRIER 88 PPC_ATOMIC_ENTRY_BARRIER
89"1: lwarx %0,0,%2 # atomic_sub_return\n\ 89"1: lwarx %0,0,%2 # atomic_sub_return\n\
90 subf %0,%1,%0\n" 90 subf %0,%1,%0\n"
91 PPC405_ERR77(0,%2) 91 PPC405_ERR77(0,%2)
92" stwcx. %0,0,%2 \n\ 92" stwcx. %0,0,%2 \n\
93 bne- 1b" 93 bne- 1b"
94 PPC_ACQUIRE_BARRIER 94 PPC_ATOMIC_EXIT_BARRIER
95 : "=&r" (t) 95 : "=&r" (t)
96 : "r" (a), "r" (&v->counter) 96 : "r" (a), "r" (&v->counter)
97 : "cc", "memory"); 97 : "cc", "memory");
@@ -119,13 +119,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
119 int t; 119 int t;
120 120
121 __asm__ __volatile__( 121 __asm__ __volatile__(
122 PPC_RELEASE_BARRIER 122 PPC_ATOMIC_ENTRY_BARRIER
123"1: lwarx %0,0,%1 # atomic_inc_return\n\ 123"1: lwarx %0,0,%1 # atomic_inc_return\n\
124 addic %0,%0,1\n" 124 addic %0,%0,1\n"
125 PPC405_ERR77(0,%1) 125 PPC405_ERR77(0,%1)
126" stwcx. %0,0,%1 \n\ 126" stwcx. %0,0,%1 \n\
127 bne- 1b" 127 bne- 1b"
128 PPC_ACQUIRE_BARRIER 128 PPC_ATOMIC_EXIT_BARRIER
129 : "=&r" (t) 129 : "=&r" (t)
130 : "r" (&v->counter) 130 : "r" (&v->counter)
131 : "cc", "xer", "memory"); 131 : "cc", "xer", "memory");
@@ -163,13 +163,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
163 int t; 163 int t;
164 164
165 __asm__ __volatile__( 165 __asm__ __volatile__(
166 PPC_RELEASE_BARRIER 166 PPC_ATOMIC_ENTRY_BARRIER
167"1: lwarx %0,0,%1 # atomic_dec_return\n\ 167"1: lwarx %0,0,%1 # atomic_dec_return\n\
168 addic %0,%0,-1\n" 168 addic %0,%0,-1\n"
169 PPC405_ERR77(0,%1) 169 PPC405_ERR77(0,%1)
170" stwcx. %0,0,%1\n\ 170" stwcx. %0,0,%1\n\
171 bne- 1b" 171 bne- 1b"
172 PPC_ACQUIRE_BARRIER 172 PPC_ATOMIC_EXIT_BARRIER
173 : "=&r" (t) 173 : "=&r" (t)
174 : "r" (&v->counter) 174 : "r" (&v->counter)
175 : "cc", "xer", "memory"); 175 : "cc", "xer", "memory");
@@ -194,7 +194,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
194 int t; 194 int t;
195 195
196 __asm__ __volatile__ ( 196 __asm__ __volatile__ (
197 PPC_RELEASE_BARRIER 197 PPC_ATOMIC_ENTRY_BARRIER
198"1: lwarx %0,0,%1 # __atomic_add_unless\n\ 198"1: lwarx %0,0,%1 # __atomic_add_unless\n\
199 cmpw 0,%0,%3 \n\ 199 cmpw 0,%0,%3 \n\
200 beq- 2f \n\ 200 beq- 2f \n\
@@ -202,7 +202,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
202 PPC405_ERR77(0,%2) 202 PPC405_ERR77(0,%2)
203" stwcx. %0,0,%1 \n\ 203" stwcx. %0,0,%1 \n\
204 bne- 1b \n" 204 bne- 1b \n"
205 PPC_ACQUIRE_BARRIER 205 PPC_ATOMIC_EXIT_BARRIER
206" subf %0,%2,%0 \n\ 206" subf %0,%2,%0 \n\
2072:" 2072:"
208 : "=&r" (t) 208 : "=&r" (t)
@@ -226,7 +226,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
226 int t; 226 int t;
227 227
228 __asm__ __volatile__( 228 __asm__ __volatile__(
229 PPC_RELEASE_BARRIER 229 PPC_ATOMIC_ENTRY_BARRIER
230"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ 230"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
231 cmpwi %0,1\n\ 231 cmpwi %0,1\n\
232 addi %0,%0,-1\n\ 232 addi %0,%0,-1\n\
@@ -234,7 +234,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
234 PPC405_ERR77(0,%1) 234 PPC405_ERR77(0,%1)
235" stwcx. %0,0,%1\n\ 235" stwcx. %0,0,%1\n\
236 bne- 1b" 236 bne- 1b"
237 PPC_ACQUIRE_BARRIER 237 PPC_ATOMIC_EXIT_BARRIER
238 "\n\ 238 "\n\
2392:" : "=&b" (t) 2392:" : "=&b" (t)
240 : "r" (&v->counter) 240 : "r" (&v->counter)
@@ -285,12 +285,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
285 long t; 285 long t;
286 286
287 __asm__ __volatile__( 287 __asm__ __volatile__(
288 PPC_RELEASE_BARRIER 288 PPC_ATOMIC_ENTRY_BARRIER
289"1: ldarx %0,0,%2 # atomic64_add_return\n\ 289"1: ldarx %0,0,%2 # atomic64_add_return\n\
290 add %0,%1,%0\n\ 290 add %0,%1,%0\n\
291 stdcx. %0,0,%2 \n\ 291 stdcx. %0,0,%2 \n\
292 bne- 1b" 292 bne- 1b"
293 PPC_ACQUIRE_BARRIER 293 PPC_ATOMIC_EXIT_BARRIER
294 : "=&r" (t) 294 : "=&r" (t)
295 : "r" (a), "r" (&v->counter) 295 : "r" (a), "r" (&v->counter)
296 : "cc", "memory"); 296 : "cc", "memory");
@@ -319,12 +319,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
319 long t; 319 long t;
320 320
321 __asm__ __volatile__( 321 __asm__ __volatile__(
322 PPC_RELEASE_BARRIER 322 PPC_ATOMIC_ENTRY_BARRIER
323"1: ldarx %0,0,%2 # atomic64_sub_return\n\ 323"1: ldarx %0,0,%2 # atomic64_sub_return\n\
324 subf %0,%1,%0\n\ 324 subf %0,%1,%0\n\
325 stdcx. %0,0,%2 \n\ 325 stdcx. %0,0,%2 \n\
326 bne- 1b" 326 bne- 1b"
327 PPC_ACQUIRE_BARRIER 327 PPC_ATOMIC_EXIT_BARRIER
328 : "=&r" (t) 328 : "=&r" (t)
329 : "r" (a), "r" (&v->counter) 329 : "r" (a), "r" (&v->counter)
330 : "cc", "memory"); 330 : "cc", "memory");
@@ -351,12 +351,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
351 long t; 351 long t;
352 352
353 __asm__ __volatile__( 353 __asm__ __volatile__(
354 PPC_RELEASE_BARRIER 354 PPC_ATOMIC_ENTRY_BARRIER
355"1: ldarx %0,0,%1 # atomic64_inc_return\n\ 355"1: ldarx %0,0,%1 # atomic64_inc_return\n\
356 addic %0,%0,1\n\ 356 addic %0,%0,1\n\
357 stdcx. %0,0,%1 \n\ 357 stdcx. %0,0,%1 \n\
358 bne- 1b" 358 bne- 1b"
359 PPC_ACQUIRE_BARRIER 359 PPC_ATOMIC_EXIT_BARRIER
360 : "=&r" (t) 360 : "=&r" (t)
361 : "r" (&v->counter) 361 : "r" (&v->counter)
362 : "cc", "xer", "memory"); 362 : "cc", "xer", "memory");
@@ -393,12 +393,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
393 long t; 393 long t;
394 394
395 __asm__ __volatile__( 395 __asm__ __volatile__(
396 PPC_RELEASE_BARRIER 396 PPC_ATOMIC_ENTRY_BARRIER
397"1: ldarx %0,0,%1 # atomic64_dec_return\n\ 397"1: ldarx %0,0,%1 # atomic64_dec_return\n\
398 addic %0,%0,-1\n\ 398 addic %0,%0,-1\n\
399 stdcx. %0,0,%1\n\ 399 stdcx. %0,0,%1\n\
400 bne- 1b" 400 bne- 1b"
401 PPC_ACQUIRE_BARRIER 401 PPC_ATOMIC_EXIT_BARRIER
402 : "=&r" (t) 402 : "=&r" (t)
403 : "r" (&v->counter) 403 : "r" (&v->counter)
404 : "cc", "xer", "memory"); 404 : "cc", "xer", "memory");
@@ -418,13 +418,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
418 long t; 418 long t;
419 419
420 __asm__ __volatile__( 420 __asm__ __volatile__(
421 PPC_RELEASE_BARRIER 421 PPC_ATOMIC_ENTRY_BARRIER
422"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ 422"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
423 addic. %0,%0,-1\n\ 423 addic. %0,%0,-1\n\
424 blt- 2f\n\ 424 blt- 2f\n\
425 stdcx. %0,0,%1\n\ 425 stdcx. %0,0,%1\n\
426 bne- 1b" 426 bne- 1b"
427 PPC_ACQUIRE_BARRIER 427 PPC_ATOMIC_EXIT_BARRIER
428 "\n\ 428 "\n\
4292:" : "=&r" (t) 4292:" : "=&r" (t)
430 : "r" (&v->counter) 430 : "r" (&v->counter)
@@ -450,14 +450,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
450 long t; 450 long t;
451 451
452 __asm__ __volatile__ ( 452 __asm__ __volatile__ (
453 PPC_RELEASE_BARRIER 453 PPC_ATOMIC_ENTRY_BARRIER
454"1: ldarx %0,0,%1 # __atomic_add_unless\n\ 454"1: ldarx %0,0,%1 # __atomic_add_unless\n\
455 cmpd 0,%0,%3 \n\ 455 cmpd 0,%0,%3 \n\
456 beq- 2f \n\ 456 beq- 2f \n\
457 add %0,%2,%0 \n" 457 add %0,%2,%0 \n"
458" stdcx. %0,0,%1 \n\ 458" stdcx. %0,0,%1 \n\
459 bne- 1b \n" 459 bne- 1b \n"
460 PPC_ACQUIRE_BARRIER 460 PPC_ATOMIC_EXIT_BARRIER
461" subf %0,%2,%0 \n\ 461" subf %0,%2,%0 \n\
4622:" 4622:"
463 : "=&r" (t) 463 : "=&r" (t)
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index e137afcc10fa..efdc92618b38 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -124,14 +124,14 @@ static __inline__ unsigned long fn( \
124 return (old & mask); \ 124 return (old & mask); \
125} 125}
126 126
127DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER, 127DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
128 PPC_ACQUIRE_BARRIER, 0) 128 PPC_ATOMIC_EXIT_BARRIER, 0)
129DEFINE_TESTOP(test_and_set_bits_lock, or, "", 129DEFINE_TESTOP(test_and_set_bits_lock, or, "",
130 PPC_ACQUIRE_BARRIER, 1) 130 PPC_ACQUIRE_BARRIER, 1)
131DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER, 131DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
132 PPC_ACQUIRE_BARRIER, 0) 132 PPC_ATOMIC_EXIT_BARRIER, 0)
133DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER, 133DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
134 PPC_ACQUIRE_BARRIER, 0) 134 PPC_ATOMIC_EXIT_BARRIER, 0)
135 135
136static __inline__ int test_and_set_bit(unsigned long nr, 136static __inline__ int test_and_set_bit(unsigned long nr,
137 volatile unsigned long *addr) 137 volatile unsigned long *addr)
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index c94e4a3fe2ef..2a9cf845473b 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -11,12 +11,13 @@
11 11
12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
13 __asm__ __volatile ( \ 13 __asm__ __volatile ( \
14 PPC_RELEASE_BARRIER \ 14 PPC_ATOMIC_ENTRY_BARRIER \
15"1: lwarx %0,0,%2\n" \ 15"1: lwarx %0,0,%2\n" \
16 insn \ 16 insn \
17 PPC405_ERR77(0, %2) \ 17 PPC405_ERR77(0, %2) \
18"2: stwcx. %1,0,%2\n" \ 18"2: stwcx. %1,0,%2\n" \
19 "bne- 1b\n" \ 19 "bne- 1b\n" \
20 PPC_ATOMIC_EXIT_BARRIER \
20 "li %1,0\n" \ 21 "li %1,0\n" \
21"3: .section .fixup,\"ax\"\n" \ 22"3: .section .fixup,\"ax\"\n" \
22"4: li %1,%3\n" \ 23"4: li %1,%3\n" \
@@ -92,14 +93,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
92 return -EFAULT; 93 return -EFAULT;
93 94
94 __asm__ __volatile__ ( 95 __asm__ __volatile__ (
95 PPC_RELEASE_BARRIER 96 PPC_ATOMIC_ENTRY_BARRIER
96"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ 97"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
97 cmpw 0,%1,%4\n\ 98 cmpw 0,%1,%4\n\
98 bne- 3f\n" 99 bne- 3f\n"
99 PPC405_ERR77(0,%3) 100 PPC405_ERR77(0,%3)
100"2: stwcx. %5,0,%3\n\ 101"2: stwcx. %5,0,%3\n\
101 bne- 1b\n" 102 bne- 1b\n"
102 PPC_ACQUIRE_BARRIER 103 PPC_ATOMIC_EXIT_BARRIER
103"3: .section .fixup,\"ax\"\n\ 104"3: .section .fixup,\"ax\"\n\
1044: li %0,%6\n\ 1054: li %0,%6\n\
105 b 3b\n\ 106 b 3b\n\
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 28cdbd9f399c..03c48e819c8e 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -31,7 +31,7 @@
31 31
32#define MSR_ MSR_ME | MSR_CE 32#define MSR_ MSR_ME | MSR_CE
33#define MSR_KERNEL MSR_ | MSR_64BIT 33#define MSR_KERNEL MSR_ | MSR_64BIT
34#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE 34#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
35#define MSR_USER64 MSR_USER32 | MSR_64BIT 35#define MSR_USER64 MSR_USER32 | MSR_64BIT
36#elif defined (CONFIG_40x) 36#elif defined (CONFIG_40x)
37#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) 37#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 6fbce725c710..a0f358d4a00c 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,7 +8,7 @@
8 8
9#ifdef __powerpc64__ 9#ifdef __powerpc64__
10 10
11extern char _end[]; 11extern char __end_interrupts[];
12 12
13static inline int in_kernel_text(unsigned long addr) 13static inline int in_kernel_text(unsigned long addr)
14{ 14{
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index d7cab44643c5..e682a7143edb 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -13,6 +13,7 @@
13extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; 13extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
14extern void do_lwsync_fixups(unsigned long value, void *fixup_start, 14extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
15 void *fixup_end); 15 void *fixup_end);
16extern void do_final_fixups(void);
16 17
17static inline void eieio(void) 18static inline void eieio(void)
18{ 19{
@@ -41,11 +42,15 @@ static inline void isync(void)
41 START_LWSYNC_SECTION(97); \ 42 START_LWSYNC_SECTION(97); \
42 isync; \ 43 isync; \
43 MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); 44 MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
44#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) 45#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
45#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" 46#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
47#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
48#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
46#else 49#else
47#define PPC_ACQUIRE_BARRIER 50#define PPC_ACQUIRE_BARRIER
48#define PPC_RELEASE_BARRIER 51#define PPC_RELEASE_BARRIER
52#define PPC_ATOMIC_ENTRY_BARRIER
53#define PPC_ATOMIC_EXIT_BARRIER
49#endif 54#endif
50 55
51#endif /* __KERNEL__ */ 56#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 56212bc0ab08..4f80cf1ce77b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -215,7 +215,22 @@ reenable_mmu: /* re-enable mmu so we can */
215 stw r9,8(r1) 215 stw r9,8(r1)
216 stw r11,12(r1) 216 stw r11,12(r1)
217 stw r3,ORIG_GPR3(r1) 217 stw r3,ORIG_GPR3(r1)
218 /*
219 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
220 * If from user mode there is only one stack frame on the stack, and
221 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
222 * stack frame to make trace_hardirqs_off happy.
223 */
224 andi. r12,r12,MSR_PR
225 beq 11f
226 stwu r1,-16(r1)
227 bl trace_hardirqs_off
228 addi r1,r1,16
229 b 12f
230
23111:
218 bl trace_hardirqs_off 232 bl trace_hardirqs_off
23312:
219 lwz r0,GPR0(r1) 234 lwz r0,GPR0(r1)
220 lwz r3,ORIG_GPR3(r1) 235 lwz r3,ORIG_GPR3(r1)
221 lwz r4,GPR4(r1) 236 lwz r4,GPR4(r1)
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index 368d158d665d..a1ed8a8c7cb4 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -11,6 +11,7 @@
11#include <linux/jump_label.h> 11#include <linux/jump_label.h>
12#include <asm/code-patching.h> 12#include <asm/code-patching.h>
13 13
14#ifdef HAVE_JUMP_LABEL
14void arch_jump_label_transform(struct jump_entry *entry, 15void arch_jump_label_transform(struct jump_entry *entry,
15 enum jump_label_type type) 16 enum jump_label_type type)
16{ 17{
@@ -21,3 +22,4 @@ void arch_jump_label_transform(struct jump_entry *entry,
21 else 22 else
22 patch_instruction(addr, PPC_INST_NOP); 23 patch_instruction(addr, PPC_INST_NOP);
23} 24}
25#endif
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 35f27646c4ff..2985338d0e10 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -132,7 +132,6 @@ static void kvm_patch_ins_b(u32 *inst, int addr)
132 /* On relocatable kernels interrupts handlers and our code 132 /* On relocatable kernels interrupts handlers and our code
133 can be in different regions, so we don't patch them */ 133 can be in different regions, so we don't patch them */
134 134
135 extern u32 __end_interrupts;
136 if ((ulong)inst < (ulong)&__end_interrupts) 135 if ((ulong)inst < (ulong)&__end_interrupts)
137 return; 136 return;
138#endif 137#endif
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index f7d760ab5ca1..7cd07b42ca1a 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -738,7 +738,7 @@ relocate_new_kernel:
738 mr r5, r31 738 mr r5, r31
739 739
740 li r0, 0 740 li r0, 0
741#elif defined(CONFIG_44x) && !defined(CONFIG_47x) 741#elif defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
742 742
743/* 743/*
744 * Code for setting up 1:1 mapping for PPC440x for KEXEC 744 * Code for setting up 1:1 mapping for PPC440x for KEXEC
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9054ca9ab4f9..6457574c0b2f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -486,28 +486,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
486 new_thread = &new->thread; 486 new_thread = &new->thread;
487 old_thread = &current->thread; 487 old_thread = &current->thread;
488 488
489#if defined(CONFIG_PPC_BOOK3E_64)
490 /* XXX Current Book3E code doesn't deal with kernel side DBCR0,
491 * we always hold the user values, so we set it now.
492 *
493 * However, we ensure the kernel MSR:DE is appropriately cleared too
494 * to avoid spurrious single step exceptions in the kernel.
495 *
496 * This will have to change to merge with the ppc32 code at some point,
497 * but I don't like much what ppc32 is doing today so there's some
498 * thinking needed there
499 */
500 if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) {
501 u32 dbcr0;
502
503 mtmsr(mfmsr() & ~MSR_DE);
504 isync();
505 dbcr0 = mfspr(SPRN_DBCR0);
506 dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0;
507 mtspr(SPRN_DBCR0, dbcr0);
508 }
509#endif /* CONFIG_PPC64_BOOK3E */
510
511#ifdef CONFIG_PPC64 489#ifdef CONFIG_PPC64
512 /* 490 /*
513 * Collect processor utilization data per process 491 * Collect processor utilization data per process
@@ -657,7 +635,7 @@ void show_regs(struct pt_regs * regs)
657 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 635 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
658 printk("CFAR: "REG"\n", regs->orig_gpr3); 636 printk("CFAR: "REG"\n", regs->orig_gpr3);
659 if (trap == 0x300 || trap == 0x600) 637 if (trap == 0x300 || trap == 0x600)
660#ifdef CONFIG_PPC_ADV_DEBUG_REGS 638#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
661 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); 639 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
662#else 640#else
663 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); 641 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index b4fa66127495..cc584865b3df 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1579,10 +1579,8 @@ static void __init prom_instantiate_rtas(void)
1579 return; 1579 return;
1580 1580
1581 base = alloc_down(size, PAGE_SIZE, 0); 1581 base = alloc_down(size, PAGE_SIZE, 0);
1582 if (base == 0) { 1582 if (base == 0)
1583 prom_printf("RTAS allocation failed !\n"); 1583 prom_panic("Could not allocate memory for RTAS\n");
1584 return;
1585 }
1586 1584
1587 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1585 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1588 if (!IHANDLE_VALID(rtas_inst)) { 1586 if (!IHANDLE_VALID(rtas_inst)) {
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index c1ce86357ecb..ac7610815113 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -107,6 +107,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
107 PTRRELOC(&__start___lwsync_fixup), 107 PTRRELOC(&__start___lwsync_fixup),
108 PTRRELOC(&__stop___lwsync_fixup)); 108 PTRRELOC(&__stop___lwsync_fixup));
109 109
110 do_final_fixups();
111
110 return KERNELBASE + offset; 112 return KERNELBASE + offset;
111} 113}
112 114
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 1a9dea80a69b..fb9bb46e7e88 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -359,6 +359,7 @@ void __init setup_system(void)
359 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 359 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
360 do_lwsync_fixups(cur_cpu_spec->cpu_features, 360 do_lwsync_fixups(cur_cpu_spec->cpu_features,
361 &__start___lwsync_fixup, &__stop___lwsync_fixup); 361 &__start___lwsync_fixup, &__stop___lwsync_fixup);
362 do_final_fixups();
362 363
363 /* 364 /*
364 * Unflatten the device-tree passed by prom_init or kexec 365 * Unflatten the device-tree passed by prom_init or kexec
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 78b76dc54dfb..836a5a19eb2c 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -97,7 +97,7 @@ static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
97 compat_sigset_t cset; 97 compat_sigset_t cset;
98 98
99 switch (_NSIG_WORDS) { 99 switch (_NSIG_WORDS) {
100 case 4: cset.sig[5] = set->sig[3] & 0xffffffffull; 100 case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
101 cset.sig[7] = set->sig[3] >> 32; 101 cset.sig[7] = set->sig[3] >> 32;
102 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull; 102 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
103 cset.sig[5] = set->sig[2] >> 32; 103 cset.sig[5] = set->sig[2] >> 32;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4e5908264d1a..5459d148a0f6 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1298,14 +1298,12 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1298 1298
1299 if (user_mode(regs)) { 1299 if (user_mode(regs)) {
1300 current->thread.dbcr0 &= ~DBCR0_IC; 1300 current->thread.dbcr0 &= ~DBCR0_IC;
1301#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1302 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, 1301 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
1303 current->thread.dbcr1)) 1302 current->thread.dbcr1))
1304 regs->msr |= MSR_DE; 1303 regs->msr |= MSR_DE;
1305 else 1304 else
1306 /* Make sure the IDM bit is off */ 1305 /* Make sure the IDM bit is off */
1307 current->thread.dbcr0 &= ~DBCR0_IDM; 1306 current->thread.dbcr0 &= ~DBCR0_IDM;
1308#endif
1309 } 1307 }
1310 1308
1311 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1309 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0cdbc07cec14..0cb137a9b038 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -44,6 +44,7 @@
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/cputhreads.h> 45#include <asm/cputhreads.h>
46#include <asm/page.h> 46#include <asm/page.h>
47#include <asm/hvcall.h>
47#include <linux/gfp.h> 48#include <linux/gfp.h>
48#include <linux/sched.h> 49#include <linux/sched.h>
49#include <linux/vmalloc.h> 50#include <linux/vmalloc.h>
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 0d08d0171392..7a8a7487cee8 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -18,6 +18,8 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <asm/cputable.h> 19#include <asm/cputable.h>
20#include <asm/code-patching.h> 20#include <asm/code-patching.h>
21#include <asm/page.h>
22#include <asm/sections.h>
21 23
22 24
23struct fixup_entry { 25struct fixup_entry {
@@ -128,6 +130,27 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
128 } 130 }
129} 131}
130 132
133void do_final_fixups(void)
134{
135#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
136 int *src, *dest;
137 unsigned long length;
138
139 if (PHYSICAL_START == 0)
140 return;
141
142 src = (int *)(KERNELBASE + PHYSICAL_START);
143 dest = (int *)KERNELBASE;
144 length = (__end_interrupts - _stext) / sizeof(int);
145
146 while (length--) {
147 patch_instruction(dest, *src);
148 src++;
149 dest++;
150 }
151#endif
152}
153
131#ifdef CONFIG_FTR_FIXUP_SELFTEST 154#ifdef CONFIG_FTR_FIXUP_SELFTEST
132 155
133#define check(x) \ 156#define check(x) \
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index e4588721ef34..3fe6d927ad70 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -347,7 +347,7 @@ config SIMPLE_GPIO
347 347
348config MCU_MPC8349EMITX 348config MCU_MPC8349EMITX
349 bool "MPC8349E-mITX MCU driver" 349 bool "MPC8349E-mITX MCU driver"
350 depends on I2C && PPC_83xx 350 depends on I2C=y && PPC_83xx
351 select GENERIC_GPIO 351 select GENERIC_GPIO
352 select ARCH_REQUIRE_GPIOLIB 352 select ARCH_REQUIRE_GPIOLIB
353 help 353 help
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 404bc52b7806..1d6f4f478fe2 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -88,6 +88,7 @@ struct ps3_private {
88 struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN))); 88 struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
89 u64 ppe_id; 89 u64 ppe_id;
90 u64 thread_id; 90 u64 thread_id;
91 unsigned long ipi_mask;
91}; 92};
92 93
93static DEFINE_PER_CPU(struct ps3_private, ps3_private); 94static DEFINE_PER_CPU(struct ps3_private, ps3_private);
@@ -144,7 +145,11 @@ static void ps3_chip_unmask(struct irq_data *d)
144static void ps3_chip_eoi(struct irq_data *d) 145static void ps3_chip_eoi(struct irq_data *d)
145{ 146{
146 const struct ps3_private *pd = irq_data_get_irq_chip_data(d); 147 const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
147 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq); 148
149 /* non-IPIs are EOIed here. */
150
151 if (!test_bit(63 - d->irq, &pd->ipi_mask))
152 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
148} 153}
149 154
150/** 155/**
@@ -691,6 +696,16 @@ void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
691 cpu, virq, pd->bmp.ipi_debug_brk_mask); 696 cpu, virq, pd->bmp.ipi_debug_brk_mask);
692} 697}
693 698
699void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
700{
701 struct ps3_private *pd = &per_cpu(ps3_private, cpu);
702
703 set_bit(63 - virq, &pd->ipi_mask);
704
705 DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
706 cpu, virq, pd->ipi_mask);
707}
708
694static unsigned int ps3_get_irq(void) 709static unsigned int ps3_get_irq(void)
695{ 710{
696 struct ps3_private *pd = &__get_cpu_var(ps3_private); 711 struct ps3_private *pd = &__get_cpu_var(ps3_private);
@@ -720,6 +735,12 @@ static unsigned int ps3_get_irq(void)
720 BUG(); 735 BUG();
721 } 736 }
722#endif 737#endif
738
739 /* IPIs are EOIed here. */
740
741 if (test_bit(63 - plug, &pd->ipi_mask))
742 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
743
723 return plug; 744 return plug;
724} 745}
725 746
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 9a196a88eda7..1a633ed0fe98 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -43,6 +43,7 @@ void ps3_mm_shutdown(void);
43void ps3_init_IRQ(void); 43void ps3_init_IRQ(void);
44void ps3_shutdown_IRQ(int cpu); 44void ps3_shutdown_IRQ(int cpu);
45void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq); 45void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
46void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
46 47
47/* smp */ 48/* smp */
48 49
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 4c44794faac0..efc1cd8c034a 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -59,46 +59,49 @@ static void ps3_smp_message_pass(int cpu, int msg)
59 59
60static int ps3_smp_probe(void) 60static int ps3_smp_probe(void)
61{ 61{
62 return 2; 62 int cpu;
63}
64 63
65static void __init ps3_smp_setup_cpu(int cpu) 64 for (cpu = 0; cpu < 2; cpu++) {
66{ 65 int result;
67 int result; 66 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
68 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); 67 int i;
69 int i;
70 68
71 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu); 69 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
72 70
73 /* 71 /*
74 * Check assumptions on ps3_ipi_virqs[] indexing. If this 72 * Check assumptions on ps3_ipi_virqs[] indexing. If this
75 * check fails, then a different mapping of PPC_MSG_ 73 * check fails, then a different mapping of PPC_MSG_
76 * to index needs to be setup. 74 * to index needs to be setup.
77 */ 75 */
78 76
79 BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); 77 BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
80 BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); 78 BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
81 BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2); 79 BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
82 BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); 80 BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
83 81
84 for (i = 0; i < MSG_COUNT; i++) { 82 for (i = 0; i < MSG_COUNT; i++) {
85 result = ps3_event_receive_port_setup(cpu, &virqs[i]); 83 result = ps3_event_receive_port_setup(cpu, &virqs[i]);
86 84
87 if (result) 85 if (result)
88 continue; 86 continue;
89 87
90 DBG("%s:%d: (%d, %d) => virq %u\n", 88 DBG("%s:%d: (%d, %d) => virq %u\n",
91 __func__, __LINE__, cpu, i, virqs[i]); 89 __func__, __LINE__, cpu, i, virqs[i]);
92 90
93 result = smp_request_message_ipi(virqs[i], i); 91 result = smp_request_message_ipi(virqs[i], i);
94 92
95 if (result) 93 if (result)
96 virqs[i] = NO_IRQ; 94 virqs[i] = NO_IRQ;
97 } 95 else
96 ps3_register_ipi_irq(cpu, virqs[i]);
97 }
98 98
99 ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]); 99 ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
100 100
101 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu); 101 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
102 }
103
104 return 2;
102} 105}
103 106
104void ps3_smp_cleanup_cpu(int cpu) 107void ps3_smp_cleanup_cpu(int cpu)
@@ -121,7 +124,6 @@ static struct smp_ops_t ps3_smp_ops = {
121 .probe = ps3_smp_probe, 124 .probe = ps3_smp_probe,
122 .message_pass = ps3_smp_message_pass, 125 .message_pass = ps3_smp_message_pass,
123 .kick_cpu = smp_generic_kick_cpu, 126 .kick_cpu = smp_generic_kick_cpu,
124 .setup_cpu = ps3_smp_setup_cpu,
125}; 127};
126 128
127void smp_init_ps3(void) 129void smp_init_ps3(void)