aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m32r/include')
-rw-r--r--arch/m32r/include/asm/atomic.h10
-rw-r--r--arch/m32r/include/asm/delay.h27
-rw-r--r--arch/m32r/include/asm/mmu_context.h2
-rw-r--r--arch/m32r/include/asm/processor.h1
-rw-r--r--arch/m32r/include/asm/ptrace.h2
-rw-r--r--arch/m32r/include/asm/spinlock.h2
6 files changed, 7 insertions, 37 deletions
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index d44a51e5271..1e7f29fb21f 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -239,15 +239,15 @@ static __inline__ int atomic_dec_return(atomic_t *v)
239#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 239#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
240 240
241/** 241/**
242 * atomic_add_unless - add unless the number is a given value 242 * __atomic_add_unless - add unless the number is a given value
243 * @v: pointer of type atomic_t 243 * @v: pointer of type atomic_t
244 * @a: the amount to add to v... 244 * @a: the amount to add to v...
245 * @u: ...unless v is equal to u. 245 * @u: ...unless v is equal to u.
246 * 246 *
247 * Atomically adds @a to @v, so long as it was not @u. 247 * Atomically adds @a to @v, so long as it was not @u.
248 * Returns non-zero if @v was not @u, and zero otherwise. 248 * Returns the old value of @v.
249 */ 249 */
250static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 250static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
251{ 251{
252 int c, old; 252 int c, old;
253 c = atomic_read(v); 253 c = atomic_read(v);
@@ -259,10 +259,9 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
259 break; 259 break;
260 c = old; 260 c = old;
261 } 261 }
262 return c != (u); 262 return c;
263} 263}
264 264
265#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
266 265
267static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) 266static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
268{ 267{
@@ -314,5 +313,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
314#define smp_mb__before_atomic_inc() barrier() 313#define smp_mb__before_atomic_inc() barrier()
315#define smp_mb__after_atomic_inc() barrier() 314#define smp_mb__after_atomic_inc() barrier()
316 315
317#include <asm-generic/atomic-long.h>
318#endif /* _ASM_M32R_ATOMIC_H */ 316#endif /* _ASM_M32R_ATOMIC_H */
diff --git a/arch/m32r/include/asm/delay.h b/arch/m32r/include/asm/delay.h
index 9dd9e999ea6..9670e127b7b 100644
--- a/arch/m32r/include/asm/delay.h
+++ b/arch/m32r/include/asm/delay.h
@@ -1,26 +1 @@
1#ifndef _ASM_M32R_DELAY_H #include <asm-generic/delay.h>
2#define _ASM_M32R_DELAY_H
3
4/*
5 * Copyright (C) 1993 Linus Torvalds
6 *
7 * Delay routines calling functions in arch/m32r/lib/delay.c
8 */
9
10extern void __bad_udelay(void);
11extern void __bad_ndelay(void);
12
13extern void __udelay(unsigned long usecs);
14extern void __ndelay(unsigned long nsecs);
15extern void __const_udelay(unsigned long xloops);
16extern void __delay(unsigned long loops);
17
18#define udelay(n) (__builtin_constant_p(n) ? \
19 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
20 __udelay(n))
21
22#define ndelay(n) (__builtin_constant_p(n) ? \
23 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
24 __ndelay(n))
25
26#endif /* _ASM_M32R_DELAY_H */
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index a70a3df3363..a979a419816 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -11,7 +11,7 @@
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13 13
14#include <asm/atomic.h> 14#include <linux/atomic.h>
15#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
16#include <asm/mmu.h> 16#include <asm/mmu.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h
index 8397c249989..e1f46d75746 100644
--- a/arch/m32r/include/asm/processor.h
+++ b/arch/m32r/include/asm/processor.h
@@ -106,7 +106,6 @@ struct thread_struct {
106 106
107#define start_thread(regs, new_pc, new_spu) \ 107#define start_thread(regs, new_pc, new_spu) \
108 do { \ 108 do { \
109 set_fs(USER_DS); \
110 regs->psw = (regs->psw | USERPS_BPSW) & 0x0000FFFFUL; \ 109 regs->psw = (regs->psw | USERPS_BPSW) & 0x0000FFFFUL; \
111 regs->bpc = new_pc; \ 110 regs->bpc = new_pc; \
112 regs->spu = new_spu; \ 111 regs->spu = new_spu; \
diff --git a/arch/m32r/include/asm/ptrace.h b/arch/m32r/include/asm/ptrace.h
index 840a1231ede..527527584dd 100644
--- a/arch/m32r/include/asm/ptrace.h
+++ b/arch/m32r/include/asm/ptrace.h
@@ -138,8 +138,6 @@ extern void init_debug_traps(struct task_struct *);
138#define instruction_pointer(regs) ((regs)->bpc) 138#define instruction_pointer(regs) ((regs)->bpc)
139#define profile_pc(regs) instruction_pointer(regs) 139#define profile_pc(regs) instruction_pointer(regs)
140 140
141extern void show_regs(struct pt_regs *);
142
143extern void withdraw_debug_trap(struct pt_regs *regs); 141extern void withdraw_debug_trap(struct pt_regs *regs);
144 142
145#define task_pt_regs(task) \ 143#define task_pt_regs(task) \
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index 179a06489b1..b0ea2f26da3 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13#include <asm/atomic.h> 13#include <linux/atomic.h>
14#include <asm/page.h> 14#include <asm/page.h>
15 15
16/* 16/*