diff options
| author | Steve French <sfrench@us.ibm.com> | 2011-12-16 01:39:20 -0500 |
|---|---|---|
| committer | Steve French <sfrench@us.ibm.com> | 2011-12-16 01:39:20 -0500 |
| commit | aaf015890754d58dcb71a4aa44ed246bb082bcf6 (patch) | |
| tree | 17b51ff707fd1b3efec3a3ab872f0d7a7416aca5 /arch/powerpc/include | |
| parent | 9c32c63bb70b2fafc3b18bee29959c3bf245ceba (diff) | |
| parent | 8def5f51b012efb00e77ba2d04696cc0aadd0609 (diff) | |
Merge branch 'master' of git+ssh://git.samba.org/data/git/sfrench/cifs-2.6
Diffstat (limited to 'arch/powerpc/include')
| -rw-r--r-- | arch/powerpc/include/asm/atomic.h | 48 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/bitops.h | 12 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/floppy.h | 4 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/futex.h | 7 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/kvm.h | 8 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 2 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/lv1call.h | 2 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/reg_booke.h | 2 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/sections.h | 2 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/synch.h | 9 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/xics.h | 4 |
11 files changed, 48 insertions, 52 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index e2a4c26ad377..02e41b53488d 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h | |||
| @@ -49,13 +49,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v) | |||
| 49 | int t; | 49 | int t; |
| 50 | 50 | ||
| 51 | __asm__ __volatile__( | 51 | __asm__ __volatile__( |
| 52 | PPC_RELEASE_BARRIER | 52 | PPC_ATOMIC_ENTRY_BARRIER |
| 53 | "1: lwarx %0,0,%2 # atomic_add_return\n\ | 53 | "1: lwarx %0,0,%2 # atomic_add_return\n\ |
| 54 | add %0,%1,%0\n" | 54 | add %0,%1,%0\n" |
| 55 | PPC405_ERR77(0,%2) | 55 | PPC405_ERR77(0,%2) |
| 56 | " stwcx. %0,0,%2 \n\ | 56 | " stwcx. %0,0,%2 \n\ |
| 57 | bne- 1b" | 57 | bne- 1b" |
| 58 | PPC_ACQUIRE_BARRIER | 58 | PPC_ATOMIC_EXIT_BARRIER |
| 59 | : "=&r" (t) | 59 | : "=&r" (t) |
| 60 | : "r" (a), "r" (&v->counter) | 60 | : "r" (a), "r" (&v->counter) |
| 61 | : "cc", "memory"); | 61 | : "cc", "memory"); |
| @@ -85,13 +85,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v) | |||
| 85 | int t; | 85 | int t; |
| 86 | 86 | ||
| 87 | __asm__ __volatile__( | 87 | __asm__ __volatile__( |
| 88 | PPC_RELEASE_BARRIER | 88 | PPC_ATOMIC_ENTRY_BARRIER |
| 89 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ | 89 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ |
| 90 | subf %0,%1,%0\n" | 90 | subf %0,%1,%0\n" |
| 91 | PPC405_ERR77(0,%2) | 91 | PPC405_ERR77(0,%2) |
| 92 | " stwcx. %0,0,%2 \n\ | 92 | " stwcx. %0,0,%2 \n\ |
| 93 | bne- 1b" | 93 | bne- 1b" |
| 94 | PPC_ACQUIRE_BARRIER | 94 | PPC_ATOMIC_EXIT_BARRIER |
| 95 | : "=&r" (t) | 95 | : "=&r" (t) |
| 96 | : "r" (a), "r" (&v->counter) | 96 | : "r" (a), "r" (&v->counter) |
| 97 | : "cc", "memory"); | 97 | : "cc", "memory"); |
| @@ -119,13 +119,13 @@ static __inline__ int atomic_inc_return(atomic_t *v) | |||
| 119 | int t; | 119 | int t; |
| 120 | 120 | ||
| 121 | __asm__ __volatile__( | 121 | __asm__ __volatile__( |
| 122 | PPC_RELEASE_BARRIER | 122 | PPC_ATOMIC_ENTRY_BARRIER |
| 123 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ | 123 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ |
| 124 | addic %0,%0,1\n" | 124 | addic %0,%0,1\n" |
| 125 | PPC405_ERR77(0,%1) | 125 | PPC405_ERR77(0,%1) |
| 126 | " stwcx. %0,0,%1 \n\ | 126 | " stwcx. %0,0,%1 \n\ |
| 127 | bne- 1b" | 127 | bne- 1b" |
| 128 | PPC_ACQUIRE_BARRIER | 128 | PPC_ATOMIC_EXIT_BARRIER |
| 129 | : "=&r" (t) | 129 | : "=&r" (t) |
| 130 | : "r" (&v->counter) | 130 | : "r" (&v->counter) |
| 131 | : "cc", "xer", "memory"); | 131 | : "cc", "xer", "memory"); |
| @@ -163,13 +163,13 @@ static __inline__ int atomic_dec_return(atomic_t *v) | |||
| 163 | int t; | 163 | int t; |
| 164 | 164 | ||
| 165 | __asm__ __volatile__( | 165 | __asm__ __volatile__( |
| 166 | PPC_RELEASE_BARRIER | 166 | PPC_ATOMIC_ENTRY_BARRIER |
| 167 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ | 167 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ |
| 168 | addic %0,%0,-1\n" | 168 | addic %0,%0,-1\n" |
| 169 | PPC405_ERR77(0,%1) | 169 | PPC405_ERR77(0,%1) |
| 170 | " stwcx. %0,0,%1\n\ | 170 | " stwcx. %0,0,%1\n\ |
| 171 | bne- 1b" | 171 | bne- 1b" |
| 172 | PPC_ACQUIRE_BARRIER | 172 | PPC_ATOMIC_EXIT_BARRIER |
| 173 | : "=&r" (t) | 173 | : "=&r" (t) |
| 174 | : "r" (&v->counter) | 174 | : "r" (&v->counter) |
| 175 | : "cc", "xer", "memory"); | 175 | : "cc", "xer", "memory"); |
| @@ -194,7 +194,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |||
| 194 | int t; | 194 | int t; |
| 195 | 195 | ||
| 196 | __asm__ __volatile__ ( | 196 | __asm__ __volatile__ ( |
| 197 | PPC_RELEASE_BARRIER | 197 | PPC_ATOMIC_ENTRY_BARRIER |
| 198 | "1: lwarx %0,0,%1 # __atomic_add_unless\n\ | 198 | "1: lwarx %0,0,%1 # __atomic_add_unless\n\ |
| 199 | cmpw 0,%0,%3 \n\ | 199 | cmpw 0,%0,%3 \n\ |
| 200 | beq- 2f \n\ | 200 | beq- 2f \n\ |
| @@ -202,7 +202,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |||
| 202 | PPC405_ERR77(0,%2) | 202 | PPC405_ERR77(0,%2) |
| 203 | " stwcx. %0,0,%1 \n\ | 203 | " stwcx. %0,0,%1 \n\ |
| 204 | bne- 1b \n" | 204 | bne- 1b \n" |
| 205 | PPC_ACQUIRE_BARRIER | 205 | PPC_ATOMIC_EXIT_BARRIER |
| 206 | " subf %0,%2,%0 \n\ | 206 | " subf %0,%2,%0 \n\ |
| 207 | 2:" | 207 | 2:" |
| 208 | : "=&r" (t) | 208 | : "=&r" (t) |
| @@ -226,7 +226,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) | |||
| 226 | int t; | 226 | int t; |
| 227 | 227 | ||
| 228 | __asm__ __volatile__( | 228 | __asm__ __volatile__( |
| 229 | PPC_RELEASE_BARRIER | 229 | PPC_ATOMIC_ENTRY_BARRIER |
| 230 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ | 230 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ |
| 231 | cmpwi %0,1\n\ | 231 | cmpwi %0,1\n\ |
| 232 | addi %0,%0,-1\n\ | 232 | addi %0,%0,-1\n\ |
| @@ -234,7 +234,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) | |||
| 234 | PPC405_ERR77(0,%1) | 234 | PPC405_ERR77(0,%1) |
| 235 | " stwcx. %0,0,%1\n\ | 235 | " stwcx. %0,0,%1\n\ |
| 236 | bne- 1b" | 236 | bne- 1b" |
| 237 | PPC_ACQUIRE_BARRIER | 237 | PPC_ATOMIC_EXIT_BARRIER |
| 238 | "\n\ | 238 | "\n\ |
| 239 | 2:" : "=&b" (t) | 239 | 2:" : "=&b" (t) |
| 240 | : "r" (&v->counter) | 240 | : "r" (&v->counter) |
| @@ -285,12 +285,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v) | |||
| 285 | long t; | 285 | long t; |
| 286 | 286 | ||
| 287 | __asm__ __volatile__( | 287 | __asm__ __volatile__( |
| 288 | PPC_RELEASE_BARRIER | 288 | PPC_ATOMIC_ENTRY_BARRIER |
| 289 | "1: ldarx %0,0,%2 # atomic64_add_return\n\ | 289 | "1: ldarx %0,0,%2 # atomic64_add_return\n\ |
| 290 | add %0,%1,%0\n\ | 290 | add %0,%1,%0\n\ |
| 291 | stdcx. %0,0,%2 \n\ | 291 | stdcx. %0,0,%2 \n\ |
| 292 | bne- 1b" | 292 | bne- 1b" |
| 293 | PPC_ACQUIRE_BARRIER | 293 | PPC_ATOMIC_EXIT_BARRIER |
| 294 | : "=&r" (t) | 294 | : "=&r" (t) |
| 295 | : "r" (a), "r" (&v->counter) | 295 | : "r" (a), "r" (&v->counter) |
| 296 | : "cc", "memory"); | 296 | : "cc", "memory"); |
| @@ -319,12 +319,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v) | |||
| 319 | long t; | 319 | long t; |
| 320 | 320 | ||
| 321 | __asm__ __volatile__( | 321 | __asm__ __volatile__( |
| 322 | PPC_RELEASE_BARRIER | 322 | PPC_ATOMIC_ENTRY_BARRIER |
| 323 | "1: ldarx %0,0,%2 # atomic64_sub_return\n\ | 323 | "1: ldarx %0,0,%2 # atomic64_sub_return\n\ |
| 324 | subf %0,%1,%0\n\ | 324 | subf %0,%1,%0\n\ |
| 325 | stdcx. %0,0,%2 \n\ | 325 | stdcx. %0,0,%2 \n\ |
| 326 | bne- 1b" | 326 | bne- 1b" |
| 327 | PPC_ACQUIRE_BARRIER | 327 | PPC_ATOMIC_EXIT_BARRIER |
| 328 | : "=&r" (t) | 328 | : "=&r" (t) |
| 329 | : "r" (a), "r" (&v->counter) | 329 | : "r" (a), "r" (&v->counter) |
| 330 | : "cc", "memory"); | 330 | : "cc", "memory"); |
| @@ -351,12 +351,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v) | |||
| 351 | long t; | 351 | long t; |
| 352 | 352 | ||
| 353 | __asm__ __volatile__( | 353 | __asm__ __volatile__( |
| 354 | PPC_RELEASE_BARRIER | 354 | PPC_ATOMIC_ENTRY_BARRIER |
| 355 | "1: ldarx %0,0,%1 # atomic64_inc_return\n\ | 355 | "1: ldarx %0,0,%1 # atomic64_inc_return\n\ |
| 356 | addic %0,%0,1\n\ | 356 | addic %0,%0,1\n\ |
| 357 | stdcx. %0,0,%1 \n\ | 357 | stdcx. %0,0,%1 \n\ |
| 358 | bne- 1b" | 358 | bne- 1b" |
| 359 | PPC_ACQUIRE_BARRIER | 359 | PPC_ATOMIC_EXIT_BARRIER |
| 360 | : "=&r" (t) | 360 | : "=&r" (t) |
| 361 | : "r" (&v->counter) | 361 | : "r" (&v->counter) |
| 362 | : "cc", "xer", "memory"); | 362 | : "cc", "xer", "memory"); |
| @@ -393,12 +393,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v) | |||
| 393 | long t; | 393 | long t; |
| 394 | 394 | ||
| 395 | __asm__ __volatile__( | 395 | __asm__ __volatile__( |
| 396 | PPC_RELEASE_BARRIER | 396 | PPC_ATOMIC_ENTRY_BARRIER |
| 397 | "1: ldarx %0,0,%1 # atomic64_dec_return\n\ | 397 | "1: ldarx %0,0,%1 # atomic64_dec_return\n\ |
| 398 | addic %0,%0,-1\n\ | 398 | addic %0,%0,-1\n\ |
| 399 | stdcx. %0,0,%1\n\ | 399 | stdcx. %0,0,%1\n\ |
| 400 | bne- 1b" | 400 | bne- 1b" |
| 401 | PPC_ACQUIRE_BARRIER | 401 | PPC_ATOMIC_EXIT_BARRIER |
| 402 | : "=&r" (t) | 402 | : "=&r" (t) |
| 403 | : "r" (&v->counter) | 403 | : "r" (&v->counter) |
| 404 | : "cc", "xer", "memory"); | 404 | : "cc", "xer", "memory"); |
| @@ -418,13 +418,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) | |||
| 418 | long t; | 418 | long t; |
| 419 | 419 | ||
| 420 | __asm__ __volatile__( | 420 | __asm__ __volatile__( |
| 421 | PPC_RELEASE_BARRIER | 421 | PPC_ATOMIC_ENTRY_BARRIER |
| 422 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ | 422 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ |
| 423 | addic. %0,%0,-1\n\ | 423 | addic. %0,%0,-1\n\ |
| 424 | blt- 2f\n\ | 424 | blt- 2f\n\ |
| 425 | stdcx. %0,0,%1\n\ | 425 | stdcx. %0,0,%1\n\ |
| 426 | bne- 1b" | 426 | bne- 1b" |
| 427 | PPC_ACQUIRE_BARRIER | 427 | PPC_ATOMIC_EXIT_BARRIER |
| 428 | "\n\ | 428 | "\n\ |
| 429 | 2:" : "=&r" (t) | 429 | 2:" : "=&r" (t) |
| 430 | : "r" (&v->counter) | 430 | : "r" (&v->counter) |
| @@ -450,14 +450,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |||
| 450 | long t; | 450 | long t; |
| 451 | 451 | ||
| 452 | __asm__ __volatile__ ( | 452 | __asm__ __volatile__ ( |
| 453 | PPC_RELEASE_BARRIER | 453 | PPC_ATOMIC_ENTRY_BARRIER |
| 454 | "1: ldarx %0,0,%1 # __atomic_add_unless\n\ | 454 | "1: ldarx %0,0,%1 # __atomic_add_unless\n\ |
| 455 | cmpd 0,%0,%3 \n\ | 455 | cmpd 0,%0,%3 \n\ |
| 456 | beq- 2f \n\ | 456 | beq- 2f \n\ |
| 457 | add %0,%2,%0 \n" | 457 | add %0,%2,%0 \n" |
| 458 | " stdcx. %0,0,%1 \n\ | 458 | " stdcx. %0,0,%1 \n\ |
| 459 | bne- 1b \n" | 459 | bne- 1b \n" |
| 460 | PPC_ACQUIRE_BARRIER | 460 | PPC_ATOMIC_EXIT_BARRIER |
| 461 | " subf %0,%2,%0 \n\ | 461 | " subf %0,%2,%0 \n\ |
| 462 | 2:" | 462 | 2:" |
| 463 | : "=&r" (t) | 463 | : "=&r" (t) |
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index e137afcc10fa..efdc92618b38 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h | |||
| @@ -124,14 +124,14 @@ static __inline__ unsigned long fn( \ | |||
| 124 | return (old & mask); \ | 124 | return (old & mask); \ |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER, | 127 | DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER, |
| 128 | PPC_ACQUIRE_BARRIER, 0) | 128 | PPC_ATOMIC_EXIT_BARRIER, 0) |
| 129 | DEFINE_TESTOP(test_and_set_bits_lock, or, "", | 129 | DEFINE_TESTOP(test_and_set_bits_lock, or, "", |
| 130 | PPC_ACQUIRE_BARRIER, 1) | 130 | PPC_ACQUIRE_BARRIER, 1) |
| 131 | DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER, | 131 | DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER, |
| 132 | PPC_ACQUIRE_BARRIER, 0) | 132 | PPC_ATOMIC_EXIT_BARRIER, 0) |
| 133 | DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER, | 133 | DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, |
| 134 | PPC_ACQUIRE_BARRIER, 0) | 134 | PPC_ATOMIC_EXIT_BARRIER, 0) |
| 135 | 135 | ||
| 136 | static __inline__ int test_and_set_bit(unsigned long nr, | 136 | static __inline__ int test_and_set_bit(unsigned long nr, |
| 137 | volatile unsigned long *addr) | 137 | volatile unsigned long *addr) |
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h index 24bd34c57e9d..936a904ae78c 100644 --- a/arch/powerpc/include/asm/floppy.h +++ b/arch/powerpc/include/asm/floppy.h | |||
| @@ -108,10 +108,10 @@ static int fd_request_irq(void) | |||
| 108 | { | 108 | { |
| 109 | if (can_use_virtual_dma) | 109 | if (can_use_virtual_dma) |
| 110 | return request_irq(FLOPPY_IRQ, floppy_hardint, | 110 | return request_irq(FLOPPY_IRQ, floppy_hardint, |
| 111 | IRQF_DISABLED, "floppy", NULL); | 111 | 0, "floppy", NULL); |
| 112 | else | 112 | else |
| 113 | return request_irq(FLOPPY_IRQ, floppy_interrupt, | 113 | return request_irq(FLOPPY_IRQ, floppy_interrupt, |
| 114 | IRQF_DISABLED, "floppy", NULL); | 114 | 0, "floppy", NULL); |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) | 117 | static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) |
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index c94e4a3fe2ef..2a9cf845473b 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h | |||
| @@ -11,12 +11,13 @@ | |||
| 11 | 11 | ||
| 12 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | 12 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ |
| 13 | __asm__ __volatile ( \ | 13 | __asm__ __volatile ( \ |
| 14 | PPC_RELEASE_BARRIER \ | 14 | PPC_ATOMIC_ENTRY_BARRIER \ |
| 15 | "1: lwarx %0,0,%2\n" \ | 15 | "1: lwarx %0,0,%2\n" \ |
| 16 | insn \ | 16 | insn \ |
| 17 | PPC405_ERR77(0, %2) \ | 17 | PPC405_ERR77(0, %2) \ |
| 18 | "2: stwcx. %1,0,%2\n" \ | 18 | "2: stwcx. %1,0,%2\n" \ |
| 19 | "bne- 1b\n" \ | 19 | "bne- 1b\n" \ |
| 20 | PPC_ATOMIC_EXIT_BARRIER \ | ||
| 20 | "li %1,0\n" \ | 21 | "li %1,0\n" \ |
| 21 | "3: .section .fixup,\"ax\"\n" \ | 22 | "3: .section .fixup,\"ax\"\n" \ |
| 22 | "4: li %1,%3\n" \ | 23 | "4: li %1,%3\n" \ |
| @@ -92,14 +93,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
| 92 | return -EFAULT; | 93 | return -EFAULT; |
| 93 | 94 | ||
| 94 | __asm__ __volatile__ ( | 95 | __asm__ __volatile__ ( |
| 95 | PPC_RELEASE_BARRIER | 96 | PPC_ATOMIC_ENTRY_BARRIER |
| 96 | "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ | 97 | "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ |
| 97 | cmpw 0,%1,%4\n\ | 98 | cmpw 0,%1,%4\n\ |
| 98 | bne- 3f\n" | 99 | bne- 3f\n" |
| 99 | PPC405_ERR77(0,%3) | 100 | PPC405_ERR77(0,%3) |
| 100 | "2: stwcx. %5,0,%3\n\ | 101 | "2: stwcx. %5,0,%3\n\ |
| 101 | bne- 1b\n" | 102 | bne- 1b\n" |
| 102 | PPC_ACQUIRE_BARRIER | 103 | PPC_ATOMIC_EXIT_BARRIER |
| 103 | "3: .section .fixup,\"ax\"\n\ | 104 | "3: .section .fixup,\"ax\"\n\ |
| 104 | 4: li %0,%6\n\ | 105 | 4: li %0,%6\n\ |
| 105 | b 3b\n\ | 106 | b 3b\n\ |
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index 08fe69edcd10..0ad432bc81d6 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h | |||
| @@ -149,12 +149,6 @@ struct kvm_regs { | |||
| 149 | #define KVM_SREGS_E_UPDATE_DBSR (1 << 3) | 149 | #define KVM_SREGS_E_UPDATE_DBSR (1 << 3) |
| 150 | 150 | ||
| 151 | /* | 151 | /* |
| 152 | * Book3S special bits to indicate contents in the struct by maintaining | ||
| 153 | * backwards compatibility with older structs. If adding a new field, | ||
| 154 | * please make sure to add a flag for that new field */ | ||
| 155 | #define KVM_SREGS_S_HIOR (1 << 0) | ||
| 156 | |||
| 157 | /* | ||
| 158 | * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a | 152 | * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a |
| 159 | * previous KVM_GET_REGS. | 153 | * previous KVM_GET_REGS. |
| 160 | * | 154 | * |
| @@ -179,8 +173,6 @@ struct kvm_sregs { | |||
| 179 | __u64 ibat[8]; | 173 | __u64 ibat[8]; |
| 180 | __u64 dbat[8]; | 174 | __u64 dbat[8]; |
| 181 | } ppc32; | 175 | } ppc32; |
| 182 | __u64 flags; /* KVM_SREGS_S_ */ | ||
| 183 | __u64 hior; | ||
| 184 | } s; | 176 | } s; |
| 185 | struct { | 177 | struct { |
| 186 | union { | 178 | union { |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index a384ffdf33de..d4df013ad779 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
| @@ -90,8 +90,6 @@ struct kvmppc_vcpu_book3s { | |||
| 90 | #endif | 90 | #endif |
| 91 | int context_id[SID_CONTEXTS]; | 91 | int context_id[SID_CONTEXTS]; |
| 92 | 92 | ||
| 93 | bool hior_sregs; /* HIOR is set by SREGS, not PVR */ | ||
| 94 | |||
| 95 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; | 93 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; |
| 96 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; | 94 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; |
| 97 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; | 95 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; |
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h index 9cd5fc828a37..f77c708c67a0 100644 --- a/arch/powerpc/include/asm/lv1call.h +++ b/arch/powerpc/include/asm/lv1call.h | |||
| @@ -316,7 +316,7 @@ LV1_CALL(gpu_context_free, 1, 0, 218 ) | |||
| 316 | LV1_CALL(gpu_context_iomap, 5, 0, 221 ) | 316 | LV1_CALL(gpu_context_iomap, 5, 0, 221 ) |
| 317 | LV1_CALL(gpu_context_attribute, 6, 0, 225 ) | 317 | LV1_CALL(gpu_context_attribute, 6, 0, 225 ) |
| 318 | LV1_CALL(gpu_context_intr, 1, 1, 227 ) | 318 | LV1_CALL(gpu_context_intr, 1, 1, 227 ) |
| 319 | LV1_CALL(gpu_attribute, 5, 0, 228 ) | 319 | LV1_CALL(gpu_attribute, 3, 0, 228 ) |
| 320 | LV1_CALL(get_rtc, 0, 2, 232 ) | 320 | LV1_CALL(get_rtc, 0, 2, 232 ) |
| 321 | LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 ) | 321 | LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 ) |
| 322 | LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 ) | 322 | LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 ) |
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 28cdbd9f399c..03c48e819c8e 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | #define MSR_ MSR_ME | MSR_CE | 32 | #define MSR_ MSR_ME | MSR_CE |
| 33 | #define MSR_KERNEL MSR_ | MSR_64BIT | 33 | #define MSR_KERNEL MSR_ | MSR_64BIT |
| 34 | #define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE | 34 | #define MSR_USER32 MSR_ | MSR_PR | MSR_EE |
| 35 | #define MSR_USER64 MSR_USER32 | MSR_64BIT | 35 | #define MSR_USER64 MSR_USER32 | MSR_64BIT |
| 36 | #elif defined (CONFIG_40x) | 36 | #elif defined (CONFIG_40x) |
| 37 | #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) | 37 | #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) |
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 6fbce725c710..a0f358d4a00c 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | #ifdef __powerpc64__ | 9 | #ifdef __powerpc64__ |
| 10 | 10 | ||
| 11 | extern char _end[]; | 11 | extern char __end_interrupts[]; |
| 12 | 12 | ||
| 13 | static inline int in_kernel_text(unsigned long addr) | 13 | static inline int in_kernel_text(unsigned long addr) |
| 14 | { | 14 | { |
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index d7cab44643c5..e682a7143edb 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; | 13 | extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; |
| 14 | extern void do_lwsync_fixups(unsigned long value, void *fixup_start, | 14 | extern void do_lwsync_fixups(unsigned long value, void *fixup_start, |
| 15 | void *fixup_end); | 15 | void *fixup_end); |
| 16 | extern void do_final_fixups(void); | ||
| 16 | 17 | ||
| 17 | static inline void eieio(void) | 18 | static inline void eieio(void) |
| 18 | { | 19 | { |
| @@ -41,11 +42,15 @@ static inline void isync(void) | |||
| 41 | START_LWSYNC_SECTION(97); \ | 42 | START_LWSYNC_SECTION(97); \ |
| 42 | isync; \ | 43 | isync; \ |
| 43 | MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); | 44 | MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); |
| 44 | #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) | 45 | #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) |
| 45 | #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" | 46 | #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" |
| 47 | #define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n" | ||
| 48 | #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n" | ||
| 46 | #else | 49 | #else |
| 47 | #define PPC_ACQUIRE_BARRIER | 50 | #define PPC_ACQUIRE_BARRIER |
| 48 | #define PPC_RELEASE_BARRIER | 51 | #define PPC_RELEASE_BARRIER |
| 52 | #define PPC_ATOMIC_ENTRY_BARRIER | ||
| 53 | #define PPC_ATOMIC_EXIT_BARRIER | ||
| 49 | #endif | 54 | #endif |
| 50 | 55 | ||
| 51 | #endif /* __KERNEL__ */ | 56 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index bd6c401c0ee5..c48de98ba94e 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h | |||
| @@ -15,8 +15,8 @@ | |||
| 15 | #define DEFAULT_PRIORITY 5 | 15 | #define DEFAULT_PRIORITY 5 |
| 16 | 16 | ||
| 17 | /* | 17 | /* |
| 18 | * Mark IPIs as higher priority so we can take them inside interrupts that | 18 | * Mark IPIs as higher priority so we can take them inside interrupts |
| 19 | * arent marked IRQF_DISABLED | 19 | * FIXME: still true now? |
| 20 | */ | 20 | */ |
| 21 | #define IPI_PRIORITY 4 | 21 | #define IPI_PRIORITY 4 |
| 22 | 22 | ||
