aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-m68knommu/atomic.h4
-rw-r--r--include/asm-m68knommu/delay.h4
-rw-r--r--include/asm-m68knommu/io.h8
-rw-r--r--include/asm-m68knommu/mcfwdebug.h2
-rw-r--r--include/asm-m68knommu/mmu_context.h4
-rw-r--r--include/asm-m68knommu/processor.h4
-rw-r--r--include/asm-m68knommu/semaphore.h10
-rw-r--r--include/asm-m68knommu/tlbflush.h4
8 files changed, 20 insertions, 20 deletions
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index b1957fba083b..a83631ed8c8f 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -100,7 +100,7 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
100#define smp_mb__before_atomic_inc() barrier() 100#define smp_mb__before_atomic_inc() barrier()
101#define smp_mb__after_atomic_inc() barrier() 101#define smp_mb__after_atomic_inc() barrier()
102 102
103extern __inline__ int atomic_add_return(int i, atomic_t * v) 103static inline int atomic_add_return(int i, atomic_t * v)
104{ 104{
105 unsigned long temp, flags; 105 unsigned long temp, flags;
106 106
@@ -115,7 +115,7 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v)
115 115
116#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 116#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
117 117
118extern __inline__ int atomic_sub_return(int i, atomic_t * v) 118static inline int atomic_sub_return(int i, atomic_t * v)
119{ 119{
120 unsigned long temp, flags; 120 unsigned long temp, flags;
121 121
diff --git a/include/asm-m68knommu/delay.h b/include/asm-m68knommu/delay.h
index e3a976254672..04a20fd051cf 100644
--- a/include/asm-m68knommu/delay.h
+++ b/include/asm-m68knommu/delay.h
@@ -8,7 +8,7 @@
8 8
9#include <asm/param.h> 9#include <asm/param.h>
10 10
11extern __inline__ void __delay(unsigned long loops) 11static inline void __delay(unsigned long loops)
12{ 12{
13#if defined(CONFIG_COLDFIRE) 13#if defined(CONFIG_COLDFIRE)
14 /* The coldfire runs this loop at significantly different speeds 14 /* The coldfire runs this loop at significantly different speeds
@@ -48,7 +48,7 @@ extern __inline__ void __delay(unsigned long loops)
48 48
49extern unsigned long loops_per_jiffy; 49extern unsigned long loops_per_jiffy;
50 50
51extern __inline__ void _udelay(unsigned long usecs) 51static inline void _udelay(unsigned long usecs)
52{ 52{
53#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \ 53#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
54 defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \ 54 defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \
diff --git a/include/asm-m68knommu/io.h b/include/asm-m68knommu/io.h
index 30fade4149b8..e08f2ee4b4a2 100644
--- a/include/asm-m68knommu/io.h
+++ b/include/asm-m68knommu/io.h
@@ -147,19 +147,19 @@ static inline void io_insl(unsigned int addr, void *buf, int len)
147extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag); 147extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
148extern void __iounmap(void *addr, unsigned long size); 148extern void __iounmap(void *addr, unsigned long size);
149 149
150extern inline void *ioremap(unsigned long physaddr, unsigned long size) 150static inline void *ioremap(unsigned long physaddr, unsigned long size)
151{ 151{
152 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); 152 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
153} 153}
154extern inline void *ioremap_nocache(unsigned long physaddr, unsigned long size) 154static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
155{ 155{
156 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); 156 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
157} 157}
158extern inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size) 158static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size)
159{ 159{
160 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); 160 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
161} 161}
162extern inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size) 162static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size)
163{ 163{
164 return __ioremap(physaddr, size, IOMAP_FULL_CACHING); 164 return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
165} 165}
diff --git a/include/asm-m68knommu/mcfwdebug.h b/include/asm-m68knommu/mcfwdebug.h
index c425dd568155..6ceae103596b 100644
--- a/include/asm-m68knommu/mcfwdebug.h
+++ b/include/asm-m68knommu/mcfwdebug.h
@@ -90,7 +90,7 @@
90 * that the debug module instructions (2 longs) must be long word aligned and 90 * that the debug module instructions (2 longs) must be long word aligned and
91 * some pointer fiddling is performed to ensure this. 91 * some pointer fiddling is performed to ensure this.
92 */ 92 */
93extern inline void wdebug(int reg, unsigned long data) { 93static inline void wdebug(int reg, unsigned long data) {
94 unsigned short dbg_spc[6]; 94 unsigned short dbg_spc[6];
95 unsigned short *dbg; 95 unsigned short *dbg;
96 96
diff --git a/include/asm-m68knommu/mmu_context.h b/include/asm-m68knommu/mmu_context.h
index 9bc0fd49b8aa..1e080eca9ca8 100644
--- a/include/asm-m68knommu/mmu_context.h
+++ b/include/asm-m68knommu/mmu_context.h
@@ -10,7 +10,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10{ 10{
11} 11}
12 12
13extern inline int 13static inline int
14init_new_context(struct task_struct *tsk, struct mm_struct *mm) 14init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15{ 15{
16 // mm->context = virt_to_phys(mm->pgd); 16 // mm->context = virt_to_phys(mm->pgd);
@@ -25,7 +25,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
25 25
26#define deactivate_mm(tsk,mm) do { } while (0) 26#define deactivate_mm(tsk,mm) do { } while (0)
27 27
28extern inline void activate_mm(struct mm_struct *prev_mm, 28static inline void activate_mm(struct mm_struct *prev_mm,
29 struct mm_struct *next_mm) 29 struct mm_struct *next_mm)
30{ 30{
31} 31}
diff --git a/include/asm-m68knommu/processor.h b/include/asm-m68knommu/processor.h
index 85a054e758b1..ba393b1a023b 100644
--- a/include/asm-m68knommu/processor.h
+++ b/include/asm-m68knommu/processor.h
@@ -21,7 +21,7 @@
21#include <asm/ptrace.h> 21#include <asm/ptrace.h>
22#include <asm/current.h> 22#include <asm/current.h>
23 23
24extern inline unsigned long rdusp(void) 24static inline unsigned long rdusp(void)
25{ 25{
26#ifdef CONFIG_COLDFIRE 26#ifdef CONFIG_COLDFIRE
27 extern unsigned int sw_usp; 27 extern unsigned int sw_usp;
@@ -33,7 +33,7 @@ extern inline unsigned long rdusp(void)
33#endif 33#endif
34} 34}
35 35
36extern inline void wrusp(unsigned long usp) 36static inline void wrusp(unsigned long usp)
37{ 37{
38#ifdef CONFIG_COLDFIRE 38#ifdef CONFIG_COLDFIRE
39 extern unsigned int sw_usp; 39 extern unsigned int sw_usp;
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h
index 17aee15906a6..5cc1fdd86f50 100644
--- a/include/asm-m68knommu/semaphore.h
+++ b/include/asm-m68knommu/semaphore.h
@@ -41,7 +41,7 @@ struct semaphore {
41#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) 41#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
42#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) 42#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
43 43
44extern inline void sema_init (struct semaphore *sem, int val) 44static inline void sema_init (struct semaphore *sem, int val)
45{ 45{
46 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val); 46 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
47} 47}
@@ -73,7 +73,7 @@ extern spinlock_t semaphore_wake_lock;
73 * "down_failed" is a special asm handler that calls the C 73 * "down_failed" is a special asm handler that calls the C
74 * routine that actually waits. See arch/m68k/lib/semaphore.S 74 * routine that actually waits. See arch/m68k/lib/semaphore.S
75 */ 75 */
76extern inline void down(struct semaphore * sem) 76static inline void down(struct semaphore * sem)
77{ 77{
78 might_sleep(); 78 might_sleep();
79 __asm__ __volatile__( 79 __asm__ __volatile__(
@@ -88,7 +88,7 @@ extern inline void down(struct semaphore * sem)
88 : "cc", "%a0", "%a1", "memory"); 88 : "cc", "%a0", "%a1", "memory");
89} 89}
90 90
91extern inline int down_interruptible(struct semaphore * sem) 91static inline int down_interruptible(struct semaphore * sem)
92{ 92{
93 int ret; 93 int ret;
94 94
@@ -107,7 +107,7 @@ extern inline int down_interruptible(struct semaphore * sem)
107 return(ret); 107 return(ret);
108} 108}
109 109
110extern inline int down_trylock(struct semaphore * sem) 110static inline int down_trylock(struct semaphore * sem)
111{ 111{
112 register struct semaphore *sem1 __asm__ ("%a1") = sem; 112 register struct semaphore *sem1 __asm__ ("%a1") = sem;
113 register int result __asm__ ("%d0"); 113 register int result __asm__ ("%d0");
@@ -135,7 +135,7 @@ extern inline int down_trylock(struct semaphore * sem)
135 * The default case (no contention) will result in NO 135 * The default case (no contention) will result in NO
136 * jumps for both down() and up(). 136 * jumps for both down() and up().
137 */ 137 */
138extern inline void up(struct semaphore * sem) 138static inline void up(struct semaphore * sem)
139{ 139{
140 __asm__ __volatile__( 140 __asm__ __volatile__(
141 "| atomic up operation\n\t" 141 "| atomic up operation\n\t"
diff --git a/include/asm-m68knommu/tlbflush.h b/include/asm-m68knommu/tlbflush.h
index bf7004e1afe0..de858db28b00 100644
--- a/include/asm-m68knommu/tlbflush.h
+++ b/include/asm-m68knommu/tlbflush.h
@@ -47,12 +47,12 @@ static inline void flush_tlb_range(struct mm_struct *mm,
47 BUG(); 47 BUG();
48} 48}
49 49
50extern inline void flush_tlb_kernel_page(unsigned long addr) 50static inline void flush_tlb_kernel_page(unsigned long addr)
51{ 51{
52 BUG(); 52 BUG();
53} 53}
54 54
55extern inline void flush_tlb_pgtables(struct mm_struct *mm, 55static inline void flush_tlb_pgtables(struct mm_struct *mm,
56 unsigned long start, unsigned long end) 56 unsigned long start, unsigned long end)
57{ 57{
58 BUG(); 58 BUG();