diff options
author | Tejun Heo <tj@kernel.org> | 2010-01-04 19:17:33 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-01-04 19:17:33 -0500 |
commit | 32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch) | |
tree | b1ce838a37044bb38dfc128e2116ca35630e629a /arch/m32r/include | |
parent | 22b737f4c75197372d64afc6ed1bccd58c00e549 (diff) | |
parent | c5974b835a909ff15c3b7e6cf6789b5eb919f419 (diff) |
Merge branch 'master' into percpu
Conflicts:
arch/powerpc/platforms/pseries/hvCall.S
include/linux/percpu.h
Diffstat (limited to 'arch/m32r/include')
-rw-r--r-- | arch/m32r/include/asm/cacheflush.h | 3 | ||||
-rw-r--r-- | arch/m32r/include/asm/elf.h | 1 | ||||
-rw-r--r-- | arch/m32r/include/asm/io.h | 7 | ||||
-rw-r--r-- | arch/m32r/include/asm/socket.h | 2 | ||||
-rw-r--r-- | arch/m32r/include/asm/spinlock.h | 48 | ||||
-rw-r--r-- | arch/m32r/include/asm/spinlock_types.h | 8 |
6 files changed, 40 insertions, 29 deletions
diff --git a/arch/m32r/include/asm/cacheflush.h b/arch/m32r/include/asm/cacheflush.h index 78587c958146..8e8e04516c39 100644 --- a/arch/m32r/include/asm/cacheflush.h +++ b/arch/m32r/include/asm/cacheflush.h | |||
@@ -12,6 +12,7 @@ extern void _flush_cache_copyback_all(void); | |||
12 | #define flush_cache_dup_mm(mm) do { } while (0) | 12 | #define flush_cache_dup_mm(mm) do { } while (0) |
13 | #define flush_cache_range(vma, start, end) do { } while (0) | 13 | #define flush_cache_range(vma, start, end) do { } while (0) |
14 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 14 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
15 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
15 | #define flush_dcache_page(page) do { } while (0) | 16 | #define flush_dcache_page(page) do { } while (0) |
16 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 17 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
17 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 18 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
@@ -33,6 +34,7 @@ extern void smp_flush_cache_all(void); | |||
33 | #define flush_cache_dup_mm(mm) do { } while (0) | 34 | #define flush_cache_dup_mm(mm) do { } while (0) |
34 | #define flush_cache_range(vma, start, end) do { } while (0) | 35 | #define flush_cache_range(vma, start, end) do { } while (0) |
35 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 36 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
37 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
36 | #define flush_dcache_page(page) do { } while (0) | 38 | #define flush_dcache_page(page) do { } while (0) |
37 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 39 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
38 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 40 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
@@ -46,6 +48,7 @@ extern void smp_flush_cache_all(void); | |||
46 | #define flush_cache_dup_mm(mm) do { } while (0) | 48 | #define flush_cache_dup_mm(mm) do { } while (0) |
47 | #define flush_cache_range(vma, start, end) do { } while (0) | 49 | #define flush_cache_range(vma, start, end) do { } while (0) |
48 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 50 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
51 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
49 | #define flush_dcache_page(page) do { } while (0) | 52 | #define flush_dcache_page(page) do { } while (0) |
50 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 53 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
51 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 54 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h index 0cc34c94bf2b..2f85412ef730 100644 --- a/arch/m32r/include/asm/elf.h +++ b/arch/m32r/include/asm/elf.h | |||
@@ -102,7 +102,6 @@ typedef elf_fpreg_t elf_fpregset_t; | |||
102 | */ | 102 | */ |
103 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0 | 103 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0 |
104 | 104 | ||
105 | #define USE_ELF_CORE_DUMP | ||
106 | #define ELF_EXEC_PAGESIZE PAGE_SIZE | 105 | #define ELF_EXEC_PAGESIZE PAGE_SIZE |
107 | 106 | ||
108 | /* | 107 | /* |
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h index d06933bd6318..4010f1fc5b65 100644 --- a/arch/m32r/include/asm/io.h +++ b/arch/m32r/include/asm/io.h | |||
@@ -162,6 +162,13 @@ static inline void _writel(unsigned long l, unsigned long addr) | |||
162 | #define __raw_writew writew | 162 | #define __raw_writew writew |
163 | #define __raw_writel writel | 163 | #define __raw_writel writel |
164 | 164 | ||
165 | #define ioread8 read | ||
166 | #define ioread16 readw | ||
167 | #define ioread32 readl | ||
168 | #define iowrite8 writeb | ||
169 | #define iowrite16 writew | ||
170 | #define iowrite32 writel | ||
171 | |||
165 | #define mmiowb() | 172 | #define mmiowb() |
166 | 173 | ||
167 | #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ | 174 | #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ |
diff --git a/arch/m32r/include/asm/socket.h b/arch/m32r/include/asm/socket.h index 3390a864f224..469787c30098 100644 --- a/arch/m32r/include/asm/socket.h +++ b/arch/m32r/include/asm/socket.h | |||
@@ -60,4 +60,6 @@ | |||
60 | #define SO_PROTOCOL 38 | 60 | #define SO_PROTOCOL 38 |
61 | #define SO_DOMAIN 39 | 61 | #define SO_DOMAIN 39 |
62 | 62 | ||
63 | #define SO_RXQ_OVFL 40 | ||
64 | |||
63 | #endif /* _ASM_M32R_SOCKET_H */ | 65 | #endif /* _ASM_M32R_SOCKET_H */ |
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index dded923883b2..179a06489b10 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h | |||
@@ -24,19 +24,19 @@ | |||
24 | * We make no fairness assumptions. They have a cost. | 24 | * We make no fairness assumptions. They have a cost. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | 27 | #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
28 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 28 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
29 | #define __raw_spin_unlock_wait(x) \ | 29 | #define arch_spin_unlock_wait(x) \ |
30 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 30 | do { cpu_relax(); } while (arch_spin_is_locked(x)) |
31 | 31 | ||
32 | /** | 32 | /** |
33 | * __raw_spin_trylock - Try spin lock and return a result | 33 | * arch_spin_trylock - Try spin lock and return a result |
34 | * @lock: Pointer to the lock variable | 34 | * @lock: Pointer to the lock variable |
35 | * | 35 | * |
36 | * __raw_spin_trylock() tries to get the lock and returns a result. | 36 | * arch_spin_trylock() tries to get the lock and returns a result. |
37 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). | 37 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
38 | */ | 38 | */ |
39 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 39 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
40 | { | 40 | { |
41 | int oldval; | 41 | int oldval; |
42 | unsigned long tmp1, tmp2; | 42 | unsigned long tmp1, tmp2; |
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
50 | * } | 50 | * } |
51 | */ | 51 | */ |
52 | __asm__ __volatile__ ( | 52 | __asm__ __volatile__ ( |
53 | "# __raw_spin_trylock \n\t" | 53 | "# arch_spin_trylock \n\t" |
54 | "ldi %1, #0; \n\t" | 54 | "ldi %1, #0; \n\t" |
55 | "mvfc %2, psw; \n\t" | 55 | "mvfc %2, psw; \n\t" |
56 | "clrpsw #0x40 -> nop; \n\t" | 56 | "clrpsw #0x40 -> nop; \n\t" |
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
69 | return (oldval > 0); | 69 | return (oldval > 0); |
70 | } | 70 | } |
71 | 71 | ||
72 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 72 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
73 | { | 73 | { |
74 | unsigned long tmp0, tmp1; | 74 | unsigned long tmp0, tmp1; |
75 | 75 | ||
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
84 | * } | 84 | * } |
85 | */ | 85 | */ |
86 | __asm__ __volatile__ ( | 86 | __asm__ __volatile__ ( |
87 | "# __raw_spin_lock \n\t" | 87 | "# arch_spin_lock \n\t" |
88 | ".fillinsn \n" | 88 | ".fillinsn \n" |
89 | "1: \n\t" | 89 | "1: \n\t" |
90 | "mvfc %1, psw; \n\t" | 90 | "mvfc %1, psw; \n\t" |
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
111 | ); | 111 | ); |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 114 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
115 | { | 115 | { |
116 | mb(); | 116 | mb(); |
117 | lock->slock = 1; | 117 | lock->slock = 1; |
@@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
140 | * read_can_lock - would read_trylock() succeed? | 140 | * read_can_lock - would read_trylock() succeed? |
141 | * @lock: the rwlock in question. | 141 | * @lock: the rwlock in question. |
142 | */ | 142 | */ |
143 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) | 143 | #define arch_read_can_lock(x) ((int)(x)->lock > 0) |
144 | 144 | ||
145 | /** | 145 | /** |
146 | * write_can_lock - would write_trylock() succeed? | 146 | * write_can_lock - would write_trylock() succeed? |
147 | * @lock: the rwlock in question. | 147 | * @lock: the rwlock in question. |
148 | */ | 148 | */ |
149 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 149 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
150 | 150 | ||
151 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 151 | static inline void arch_read_lock(arch_rwlock_t *rw) |
152 | { | 152 | { |
153 | unsigned long tmp0, tmp1; | 153 | unsigned long tmp0, tmp1; |
154 | 154 | ||
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
199 | ); | 199 | ); |
200 | } | 200 | } |
201 | 201 | ||
202 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 202 | static inline void arch_write_lock(arch_rwlock_t *rw) |
203 | { | 203 | { |
204 | unsigned long tmp0, tmp1, tmp2; | 204 | unsigned long tmp0, tmp1, tmp2; |
205 | 205 | ||
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
252 | ); | 252 | ); |
253 | } | 253 | } |
254 | 254 | ||
255 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 255 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
256 | { | 256 | { |
257 | unsigned long tmp0, tmp1; | 257 | unsigned long tmp0, tmp1; |
258 | 258 | ||
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
274 | ); | 274 | ); |
275 | } | 275 | } |
276 | 276 | ||
277 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 277 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
278 | { | 278 | { |
279 | unsigned long tmp0, tmp1, tmp2; | 279 | unsigned long tmp0, tmp1, tmp2; |
280 | 280 | ||
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
298 | ); | 298 | ); |
299 | } | 299 | } |
300 | 300 | ||
301 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 301 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
302 | { | 302 | { |
303 | atomic_t *count = (atomic_t*)lock; | 303 | atomic_t *count = (atomic_t*)lock; |
304 | if (atomic_dec_return(count) >= 0) | 304 | if (atomic_dec_return(count) >= 0) |
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) | |||
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | 310 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
311 | { | 311 | { |
312 | atomic_t *count = (atomic_t *)lock; | 312 | atomic_t *count = (atomic_t *)lock; |
313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
@@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
316 | return 0; | 316 | return 0; |
317 | } | 317 | } |
318 | 318 | ||
319 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 319 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
320 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 320 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
321 | 321 | ||
322 | #define _raw_spin_relax(lock) cpu_relax() | 322 | #define arch_spin_relax(lock) cpu_relax() |
323 | #define _raw_read_relax(lock) cpu_relax() | 323 | #define arch_read_relax(lock) cpu_relax() |
324 | #define _raw_write_relax(lock) cpu_relax() | 324 | #define arch_write_relax(lock) cpu_relax() |
325 | 325 | ||
326 | #endif /* _ASM_M32R_SPINLOCK_H */ | 326 | #endif /* _ASM_M32R_SPINLOCK_H */ |
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 83f52105c0e4..92e27672661f 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h | |||
@@ -7,17 +7,17 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile int slock; | 9 | volatile int slock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 1 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile int lock; | 15 | volatile int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define RW_LOCK_BIAS 0x01000000 | 18 | #define RW_LOCK_BIAS 0x01000000 |
19 | #define RW_LOCK_BIAS_STR "0x01000000" | 19 | #define RW_LOCK_BIAS_STR "0x01000000" |
20 | 20 | ||
21 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 21 | #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
22 | 22 | ||
23 | #endif /* _ASM_M32R_SPINLOCK_TYPES_H */ | 23 | #endif /* _ASM_M32R_SPINLOCK_TYPES_H */ |