diff options
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r-- | arch/powerpc/include/asm/compat.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/fsldma.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg.h | 9 | ||||
-rw-r--r-- | arch/powerpc/include/asm/rwsem.h | 64 | ||||
-rw-r--r-- | arch/powerpc/include/asm/systbl.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/unistd.h | 5 |
7 files changed, 55 insertions, 31 deletions
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 396d21a80058..a11d4eac4f97 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h | |||
@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
134 | return (u32)(unsigned long)uptr; | 134 | return (u32)(unsigned long)uptr; |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline void __user *compat_alloc_user_space(long len) | 137 | static inline void __user *arch_compat_alloc_user_space(long len) |
138 | { | 138 | { |
139 | struct pt_regs *regs = current->thread.regs; | 139 | struct pt_regs *regs = current->thread.regs; |
140 | unsigned long usp = regs->gpr[1]; | 140 | unsigned long usp = regs->gpr[1]; |
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h index a67aeed17d40..debc5ed96d6e 100644 --- a/arch/powerpc/include/asm/fsldma.h +++ b/arch/powerpc/include/asm/fsldma.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ | 11 | #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ |
12 | #define __ARCH_POWERPC_ASM_FSLDMA_H__ | 12 | #define __ARCH_POWERPC_ASM_FSLDMA_H__ |
13 | 13 | ||
14 | #include <linux/slab.h> | ||
14 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
15 | 16 | ||
16 | /* | 17 | /* |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 0e398cfee2c8..acac35d5b382 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -433,7 +433,7 @@ typedef struct { | |||
433 | * with. However gcc is not clever enough to compute the | 433 | * with. However gcc is not clever enough to compute the |
434 | * modulus (2^n-1) without a second multiply. | 434 | * modulus (2^n-1) without a second multiply. |
435 | */ | 435 | */ |
436 | #define vsid_scrample(protovsid, size) \ | 436 | #define vsid_scramble(protovsid, size) \ |
437 | ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size)) | 437 | ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size)) |
438 | 438 | ||
439 | #else /* 1 */ | 439 | #else /* 1 */ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d8be016d2ede..ff0005eec7dd 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -951,7 +951,14 @@ | |||
951 | #ifdef CONFIG_PPC64 | 951 | #ifdef CONFIG_PPC64 |
952 | 952 | ||
953 | extern void ppc64_runlatch_on(void); | 953 | extern void ppc64_runlatch_on(void); |
954 | extern void ppc64_runlatch_off(void); | 954 | extern void __ppc64_runlatch_off(void); |
955 | |||
956 | #define ppc64_runlatch_off() \ | ||
957 | do { \ | ||
958 | if (cpu_has_feature(CPU_FTR_CTRL) && \ | ||
959 | test_thread_flag(TIF_RUNLATCH)) \ | ||
960 | __ppc64_runlatch_off(); \ | ||
961 | } while (0) | ||
955 | 962 | ||
956 | extern unsigned long scom970_read(unsigned int address); | 963 | extern unsigned long scom970_read(unsigned int address); |
957 | extern void scom970_write(unsigned int address, unsigned long value); | 964 | extern void scom970_write(unsigned int address, unsigned long value); |
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h index 24cd9281ec37..8447d89fbe72 100644 --- a/arch/powerpc/include/asm/rwsem.h +++ b/arch/powerpc/include/asm/rwsem.h | |||
@@ -21,15 +21,20 @@ | |||
21 | /* | 21 | /* |
22 | * the semaphore definition | 22 | * the semaphore definition |
23 | */ | 23 | */ |
24 | struct rw_semaphore { | 24 | #ifdef CONFIG_PPC64 |
25 | /* XXX this should be able to be an atomic_t -- paulus */ | 25 | # define RWSEM_ACTIVE_MASK 0xffffffffL |
26 | signed int count; | 26 | #else |
27 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 27 | # define RWSEM_ACTIVE_MASK 0x0000ffffL |
28 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 28 | #endif |
29 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 29 | |
30 | #define RWSEM_WAITING_BIAS (-0x00010000) | 30 | #define RWSEM_UNLOCKED_VALUE 0x00000000L |
31 | #define RWSEM_ACTIVE_BIAS 0x00000001L | ||
32 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) | ||
31 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 33 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
32 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 34 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
35 | |||
36 | struct rw_semaphore { | ||
37 | long count; | ||
33 | spinlock_t wait_lock; | 38 | spinlock_t wait_lock; |
34 | struct list_head wait_list; | 39 | struct list_head wait_list; |
35 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 40 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
@@ -43,9 +48,13 @@ struct rw_semaphore { | |||
43 | # define __RWSEM_DEP_MAP_INIT(lockname) | 48 | # define __RWSEM_DEP_MAP_INIT(lockname) |
44 | #endif | 49 | #endif |
45 | 50 | ||
46 | #define __RWSEM_INITIALIZER(name) \ | 51 | #define __RWSEM_INITIALIZER(name) \ |
47 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | 52 | { \ |
48 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } | 53 | RWSEM_UNLOCKED_VALUE, \ |
54 | __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | ||
55 | LIST_HEAD_INIT((name).wait_list) \ | ||
56 | __RWSEM_DEP_MAP_INIT(name) \ | ||
57 | } | ||
49 | 58 | ||
50 | #define DECLARE_RWSEM(name) \ | 59 | #define DECLARE_RWSEM(name) \ |
51 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 60 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
@@ -70,13 +79,13 @@ extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
70 | */ | 79 | */ |
71 | static inline void __down_read(struct rw_semaphore *sem) | 80 | static inline void __down_read(struct rw_semaphore *sem) |
72 | { | 81 | { |
73 | if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) | 82 | if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) |
74 | rwsem_down_read_failed(sem); | 83 | rwsem_down_read_failed(sem); |
75 | } | 84 | } |
76 | 85 | ||
77 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 86 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
78 | { | 87 | { |
79 | int tmp; | 88 | long tmp; |
80 | 89 | ||
81 | while ((tmp = sem->count) >= 0) { | 90 | while ((tmp = sem->count) >= 0) { |
82 | if (tmp == cmpxchg(&sem->count, tmp, | 91 | if (tmp == cmpxchg(&sem->count, tmp, |
@@ -92,10 +101,10 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
92 | */ | 101 | */ |
93 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | 102 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
94 | { | 103 | { |
95 | int tmp; | 104 | long tmp; |
96 | 105 | ||
97 | tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, | 106 | tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, |
98 | (atomic_t *)(&sem->count)); | 107 | (atomic_long_t *)&sem->count); |
99 | if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) | 108 | if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) |
100 | rwsem_down_write_failed(sem); | 109 | rwsem_down_write_failed(sem); |
101 | } | 110 | } |
@@ -107,7 +116,7 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
107 | 116 | ||
108 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 117 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
109 | { | 118 | { |
110 | int tmp; | 119 | long tmp; |
111 | 120 | ||
112 | tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, | 121 | tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, |
113 | RWSEM_ACTIVE_WRITE_BIAS); | 122 | RWSEM_ACTIVE_WRITE_BIAS); |
@@ -119,9 +128,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
119 | */ | 128 | */ |
120 | static inline void __up_read(struct rw_semaphore *sem) | 129 | static inline void __up_read(struct rw_semaphore *sem) |
121 | { | 130 | { |
122 | int tmp; | 131 | long tmp; |
123 | 132 | ||
124 | tmp = atomic_dec_return((atomic_t *)(&sem->count)); | 133 | tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); |
125 | if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) | 134 | if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) |
126 | rwsem_wake(sem); | 135 | rwsem_wake(sem); |
127 | } | 136 | } |
@@ -131,17 +140,17 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
131 | */ | 140 | */ |
132 | static inline void __up_write(struct rw_semaphore *sem) | 141 | static inline void __up_write(struct rw_semaphore *sem) |
133 | { | 142 | { |
134 | if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, | 143 | if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, |
135 | (atomic_t *)(&sem->count)) < 0)) | 144 | (atomic_long_t *)&sem->count) < 0)) |
136 | rwsem_wake(sem); | 145 | rwsem_wake(sem); |
137 | } | 146 | } |
138 | 147 | ||
139 | /* | 148 | /* |
140 | * implement atomic add functionality | 149 | * implement atomic add functionality |
141 | */ | 150 | */ |
142 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | 151 | static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) |
143 | { | 152 | { |
144 | atomic_add(delta, (atomic_t *)(&sem->count)); | 153 | atomic_long_add(delta, (atomic_long_t *)&sem->count); |
145 | } | 154 | } |
146 | 155 | ||
147 | /* | 156 | /* |
@@ -149,9 +158,10 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | |||
149 | */ | 158 | */ |
150 | static inline void __downgrade_write(struct rw_semaphore *sem) | 159 | static inline void __downgrade_write(struct rw_semaphore *sem) |
151 | { | 160 | { |
152 | int tmp; | 161 | long tmp; |
153 | 162 | ||
154 | tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); | 163 | tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, |
164 | (atomic_long_t *)&sem->count); | ||
155 | if (tmp < 0) | 165 | if (tmp < 0) |
156 | rwsem_downgrade_wake(sem); | 166 | rwsem_downgrade_wake(sem); |
157 | } | 167 | } |
@@ -159,14 +169,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
159 | /* | 169 | /* |
160 | * implement exchange and add functionality | 170 | * implement exchange and add functionality |
161 | */ | 171 | */ |
162 | static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | 172 | static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) |
163 | { | 173 | { |
164 | return atomic_add_return(delta, (atomic_t *)(&sem->count)); | 174 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); |
165 | } | 175 | } |
166 | 176 | ||
167 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | 177 | static inline int rwsem_is_locked(struct rw_semaphore *sem) |
168 | { | 178 | { |
169 | return (sem->count != 0); | 179 | return sem->count != 0; |
170 | } | 180 | } |
171 | 181 | ||
172 | #endif /* __KERNEL__ */ | 182 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index a5ee345b6a5c..3d212669a130 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
@@ -326,3 +326,6 @@ SYSCALL_SPU(perf_event_open) | |||
326 | COMPAT_SYS_SPU(preadv) | 326 | COMPAT_SYS_SPU(preadv) |
327 | COMPAT_SYS_SPU(pwritev) | 327 | COMPAT_SYS_SPU(pwritev) |
328 | COMPAT_SYS(rt_tgsigqueueinfo) | 328 | COMPAT_SYS(rt_tgsigqueueinfo) |
329 | SYSCALL(fanotify_init) | ||
330 | COMPAT_SYS(fanotify_mark) | ||
331 | SYSCALL_SPU(prlimit64) | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index f0a10266e7f7..597e6f9d094a 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
@@ -345,10 +345,13 @@ | |||
345 | #define __NR_preadv 320 | 345 | #define __NR_preadv 320 |
346 | #define __NR_pwritev 321 | 346 | #define __NR_pwritev 321 |
347 | #define __NR_rt_tgsigqueueinfo 322 | 347 | #define __NR_rt_tgsigqueueinfo 322 |
348 | #define __NR_fanotify_init 323 | ||
349 | #define __NR_fanotify_mark 324 | ||
350 | #define __NR_prlimit64 325 | ||
348 | 351 | ||
349 | #ifdef __KERNEL__ | 352 | #ifdef __KERNEL__ |
350 | 353 | ||
351 | #define __NR_syscalls 323 | 354 | #define __NR_syscalls 326 |
352 | 355 | ||
353 | #define __NR__exit __NR_exit | 356 | #define __NR__exit __NR_exit |
354 | #define NR_syscalls __NR_syscalls | 357 | #define NR_syscalls __NR_syscalls |