aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/agp.h4
-rw-r--r--arch/powerpc/include/asm/device.h3
-rw-r--r--arch/powerpc/include/asm/kvm_host.h4
-rw-r--r--arch/powerpc/include/asm/qe.h1
-rw-r--r--arch/powerpc/include/asm/socket.h3
-rw-r--r--arch/powerpc/include/asm/spinlock.h20
6 files changed, 19 insertions, 16 deletions
diff --git a/arch/powerpc/include/asm/agp.h b/arch/powerpc/include/asm/agp.h
index 86455c4c31ee..416e12c2d505 100644
--- a/arch/powerpc/include/asm/agp.h
+++ b/arch/powerpc/include/asm/agp.h
@@ -8,10 +8,6 @@
8#define unmap_page_from_agp(page) 8#define unmap_page_from_agp(page)
9#define flush_agp_cache() mb() 9#define flush_agp_cache() mb()
10 10
11/* Convert a physical address to an address suitable for the GART. */
12#define phys_to_gart(x) (x)
13#define gart_to_phys(x) (x)
14
15/* GATT allocation. Returns/accepts GATT kernel virtual address. */ 11/* GATT allocation. Returns/accepts GATT kernel virtual address. */
16#define alloc_gatt_pages(order) \ 12#define alloc_gatt_pages(order) \
17 ((char *)__get_free_pages(GFP_KERNEL, (order))) 13 ((char *)__get_free_pages(GFP_KERNEL, (order)))
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 67fcd7f89d99..9dade15d1ab4 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -33,4 +33,7 @@ dev_archdata_get_node(const struct dev_archdata *ad)
33 return ad->of_node; 33 return ad->of_node;
34} 34}
35 35
36struct pdev_archdata {
37};
38
36#endif /* _ASM_POWERPC_DEVICE_H */ 39#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index fddc3ed715fa..c9c930ed11d7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,7 +34,8 @@
34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
35 35
36/* We don't currently support large pages. */ 36/* We don't currently support large pages. */
37#define KVM_PAGES_PER_HPAGE (1UL << 31) 37#define KVM_NR_PAGE_SIZES 1
38#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
38 39
39struct kvm; 40struct kvm;
40struct kvm_run; 41struct kvm_run;
@@ -153,7 +154,6 @@ struct kvm_vcpu_arch {
153 u32 pid; 154 u32 pid;
154 u32 swap_pid; 155 u32 swap_pid;
155 156
156 u32 pvr;
157 u32 ccr0; 157 u32 ccr0;
158 u32 ccr1; 158 u32 ccr1;
159 u32 dbcr0; 159 u32 dbcr0;
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index 157c5ca581c8..f388f0ab193f 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -154,6 +154,7 @@ int qe_get_snum(void);
154void qe_put_snum(u8 snum); 154void qe_put_snum(u8 snum);
155unsigned int qe_get_num_of_risc(void); 155unsigned int qe_get_num_of_risc(void);
156unsigned int qe_get_num_of_snums(void); 156unsigned int qe_get_num_of_snums(void);
157int qe_alive_during_sleep(void);
157 158
158/* we actually use cpm_muram implementation, define this for convenience */ 159/* we actually use cpm_muram implementation, define this for convenience */
159#define qe_muram_init cpm_muram_init 160#define qe_muram_init cpm_muram_init
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h
index 1e5cfad0e3f7..3ab8b3e6feb0 100644
--- a/arch/powerpc/include/asm/socket.h
+++ b/arch/powerpc/include/asm/socket.h
@@ -64,4 +64,7 @@
64#define SO_TIMESTAMPING 37 64#define SO_TIMESTAMPING 37
65#define SCM_TIMESTAMPING SO_TIMESTAMPING 65#define SCM_TIMESTAMPING SO_TIMESTAMPING
66 66
67#define SO_PROTOCOL 38
68#define SO_DOMAIN 39
69
67#endif /* _ASM_POWERPC_SOCKET_H */ 70#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index c3b193121f81..198266cf9e2d 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -54,7 +54,7 @@
54 * This returns the old value in the lock, so we succeeded 54 * This returns the old value in the lock, so we succeeded
55 * in getting the lock if the return value is 0. 55 * in getting the lock if the return value is 0.
56 */ 56 */
57static inline unsigned long __spin_trylock(raw_spinlock_t *lock) 57static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
58{ 58{
59 unsigned long tmp, token; 59 unsigned long tmp, token;
60 60
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock)
76static inline int __raw_spin_trylock(raw_spinlock_t *lock) 76static inline int __raw_spin_trylock(raw_spinlock_t *lock)
77{ 77{
78 CLEAR_IO_SYNC; 78 CLEAR_IO_SYNC;
79 return __spin_trylock(lock) == 0; 79 return arch_spin_trylock(lock) == 0;
80} 80}
81 81
82/* 82/*
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
108{ 108{
109 CLEAR_IO_SYNC; 109 CLEAR_IO_SYNC;
110 while (1) { 110 while (1) {
111 if (likely(__spin_trylock(lock) == 0)) 111 if (likely(arch_spin_trylock(lock) == 0))
112 break; 112 break;
113 do { 113 do {
114 HMT_low(); 114 HMT_low();
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
126 126
127 CLEAR_IO_SYNC; 127 CLEAR_IO_SYNC;
128 while (1) { 128 while (1) {
129 if (likely(__spin_trylock(lock) == 0)) 129 if (likely(arch_spin_trylock(lock) == 0))
130 break; 130 break;
131 local_save_flags(flags_dis); 131 local_save_flags(flags_dis);
132 local_irq_restore(flags); 132 local_irq_restore(flags);
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
181 * This returns the old value in the lock + 1, 181 * This returns the old value in the lock + 1,
182 * so we got a read lock if the return value is > 0. 182 * so we got a read lock if the return value is > 0.
183 */ 183 */
184static inline long __read_trylock(raw_rwlock_t *rw) 184static inline long arch_read_trylock(raw_rwlock_t *rw)
185{ 185{
186 long tmp; 186 long tmp;
187 187
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw)
205 * This returns the old value in the lock, 205 * This returns the old value in the lock,
206 * so we got the write lock if the return value is 0. 206 * so we got the write lock if the return value is 0.
207 */ 207 */
208static inline long __write_trylock(raw_rwlock_t *rw) 208static inline long arch_write_trylock(raw_rwlock_t *rw)
209{ 209{
210 long tmp, token; 210 long tmp, token;
211 211
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw)
228static inline void __raw_read_lock(raw_rwlock_t *rw) 228static inline void __raw_read_lock(raw_rwlock_t *rw)
229{ 229{
230 while (1) { 230 while (1) {
231 if (likely(__read_trylock(rw) > 0)) 231 if (likely(arch_read_trylock(rw) > 0))
232 break; 232 break;
233 do { 233 do {
234 HMT_low(); 234 HMT_low();
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
242static inline void __raw_write_lock(raw_rwlock_t *rw) 242static inline void __raw_write_lock(raw_rwlock_t *rw)
243{ 243{
244 while (1) { 244 while (1) {
245 if (likely(__write_trylock(rw) == 0)) 245 if (likely(arch_write_trylock(rw) == 0))
246 break; 246 break;
247 do { 247 do {
248 HMT_low(); 248 HMT_low();
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
255 255
256static inline int __raw_read_trylock(raw_rwlock_t *rw) 256static inline int __raw_read_trylock(raw_rwlock_t *rw)
257{ 257{
258 return __read_trylock(rw) > 0; 258 return arch_read_trylock(rw) > 0;
259} 259}
260 260
261static inline int __raw_write_trylock(raw_rwlock_t *rw) 261static inline int __raw_write_trylock(raw_rwlock_t *rw)
262{ 262{
263 return __write_trylock(rw) == 0; 263 return arch_write_trylock(rw) == 0;
264} 264}
265 265
266static inline void __raw_read_unlock(raw_rwlock_t *rw) 266static inline void __raw_read_unlock(raw_rwlock_t *rw)