aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-26 23:31:01 -0400
committerPaul Mundt <lethal@linux-sh.org>2006-09-26 23:31:01 -0400
commite4c2cfee5d5cf3e4c16b423be23551aeddf2717b (patch)
tree656b3feabfb595448b04f4111ea82f6e395d1e0a /include
parenta56d276c05a80ce727902076a3b4c6247705e2df (diff)
sh: Various cosmetic cleanups.
We had quite a bit of whitespace damage, clean most of it up.. Signed-off-by: Stuart Menefy <stuart.menefy@st.com> Signed-off-by: Arthur Othieno <a.othieno@bluewin.ch> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-sh/atomic.h1
-rw-r--r--include/asm-sh/bitops.h16
-rw-r--r--include/asm-sh/checksum.h2
-rw-r--r--include/asm-sh/fixmap.h2
-rw-r--r--include/asm-sh/watchdog.h3
5 files changed, 12 insertions, 12 deletions
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index fb627de217f2..049eb2dda6b6 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -14,6 +14,7 @@ typedef struct { volatile int counter; } atomic_t;
14#define atomic_read(v) ((v)->counter) 14#define atomic_read(v) ((v)->counter)
15#define atomic_set(v,i) ((v)->counter = (i)) 15#define atomic_set(v,i) ((v)->counter = (i))
16 16
17#include <linux/compiler.h>
17#include <asm/system.h> 18#include <asm/system.h>
18 19
19/* 20/*
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
index e34f82508568..1c16792cee1d 100644
--- a/include/asm-sh/bitops.h
+++ b/include/asm-sh/bitops.h
@@ -6,7 +6,7 @@
6/* For __swab32 */ 6/* For __swab32 */
7#include <asm/byteorder.h> 7#include <asm/byteorder.h>
8 8
9static __inline__ void set_bit(int nr, volatile void * addr) 9static inline void set_bit(int nr, volatile void * addr)
10{ 10{
11 int mask; 11 int mask;
12 volatile unsigned int *a = addr; 12 volatile unsigned int *a = addr;
@@ -24,7 +24,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
24 */ 24 */
25#define smp_mb__before_clear_bit() barrier() 25#define smp_mb__before_clear_bit() barrier()
26#define smp_mb__after_clear_bit() barrier() 26#define smp_mb__after_clear_bit() barrier()
27static __inline__ void clear_bit(int nr, volatile void * addr) 27static inline void clear_bit(int nr, volatile void * addr)
28{ 28{
29 int mask; 29 int mask;
30 volatile unsigned int *a = addr; 30 volatile unsigned int *a = addr;
@@ -37,7 +37,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
37 local_irq_restore(flags); 37 local_irq_restore(flags);
38} 38}
39 39
40static __inline__ void change_bit(int nr, volatile void * addr) 40static inline void change_bit(int nr, volatile void * addr)
41{ 41{
42 int mask; 42 int mask;
43 volatile unsigned int *a = addr; 43 volatile unsigned int *a = addr;
@@ -50,7 +50,7 @@ static __inline__ void change_bit(int nr, volatile void * addr)
50 local_irq_restore(flags); 50 local_irq_restore(flags);
51} 51}
52 52
53static __inline__ int test_and_set_bit(int nr, volatile void * addr) 53static inline int test_and_set_bit(int nr, volatile void * addr)
54{ 54{
55 int mask, retval; 55 int mask, retval;
56 volatile unsigned int *a = addr; 56 volatile unsigned int *a = addr;
@@ -66,7 +66,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
66 return retval; 66 return retval;
67} 67}
68 68
69static __inline__ int test_and_clear_bit(int nr, volatile void * addr) 69static inline int test_and_clear_bit(int nr, volatile void * addr)
70{ 70{
71 int mask, retval; 71 int mask, retval;
72 volatile unsigned int *a = addr; 72 volatile unsigned int *a = addr;
@@ -82,7 +82,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
82 return retval; 82 return retval;
83} 83}
84 84
85static __inline__ int test_and_change_bit(int nr, volatile void * addr) 85static inline int test_and_change_bit(int nr, volatile void * addr)
86{ 86{
87 int mask, retval; 87 int mask, retval;
88 volatile unsigned int *a = addr; 88 volatile unsigned int *a = addr;
@@ -100,7 +100,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
100 100
101#include <asm-generic/bitops/non-atomic.h> 101#include <asm-generic/bitops/non-atomic.h>
102 102
103static __inline__ unsigned long ffz(unsigned long word) 103static inline unsigned long ffz(unsigned long word)
104{ 104{
105 unsigned long result; 105 unsigned long result;
106 106
@@ -120,7 +120,7 @@ static __inline__ unsigned long ffz(unsigned long word)
120 * 120 *
121 * Undefined if no bit exists, so code should check against 0 first. 121 * Undefined if no bit exists, so code should check against 0 first.
122 */ 122 */
123static __inline__ unsigned long __ffs(unsigned long word) 123static inline unsigned long __ffs(unsigned long word)
124{ 124{
125 unsigned long result; 125 unsigned long result;
126 126
diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h
index fa03b30c4269..08168afe6746 100644
--- a/include/asm-sh/checksum.h
+++ b/include/asm-sh/checksum.h
@@ -159,6 +159,7 @@ static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len)
159} 159}
160 160
161#define _HAVE_ARCH_IPV6_CSUM 161#define _HAVE_ARCH_IPV6_CSUM
162#ifdef CONFIG_IPV6
162static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, 163static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
163 struct in6_addr *daddr, 164 struct in6_addr *daddr,
164 __u32 len, 165 __u32 len,
@@ -194,6 +195,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
194 195
195 return csum_fold(sum); 196 return csum_fold(sum);
196} 197}
198#endif
197 199
198/* 200/*
199 * Copy and checksum to user 201 * Copy and checksum to user
diff --git a/include/asm-sh/fixmap.h b/include/asm-sh/fixmap.h
index 412bccaa07e6..458e9fa59545 100644
--- a/include/asm-sh/fixmap.h
+++ b/include/asm-sh/fixmap.h
@@ -25,7 +25,7 @@
25 * addresses. The point is to have a constant address at 25 * addresses. The point is to have a constant address at
26 * compile time, but to set the physical address only 26 * compile time, but to set the physical address only
27 * in the boot process. We allocate these special addresses 27 * in the boot process. We allocate these special addresses
28 * from the end of virtual memory (0xfffff000) backwards. 28 * from the end of P3 backwards.
29 * Also this lets us do fail-safe vmalloc(), we 29 * Also this lets us do fail-safe vmalloc(), we
30 * can guarantee that these special addresses and 30 * can guarantee that these special addresses and
31 * vmalloc()-ed addresses never overlap. 31 * vmalloc()-ed addresses never overlap.
diff --git a/include/asm-sh/watchdog.h b/include/asm-sh/watchdog.h
index 09ca41972a11..d19ea62ef8c6 100644
--- a/include/asm-sh/watchdog.h
+++ b/include/asm-sh/watchdog.h
@@ -62,7 +62,6 @@
62 62
63/** 63/**
64 * sh_wdt_read_cnt - Read from Counter 64 * sh_wdt_read_cnt - Read from Counter
65 *
66 * Reads back the WTCNT value. 65 * Reads back the WTCNT value.
67 */ 66 */
68static inline __u8 sh_wdt_read_cnt(void) 67static inline __u8 sh_wdt_read_cnt(void)
@@ -72,7 +71,6 @@ static inline __u8 sh_wdt_read_cnt(void)
72 71
73/** 72/**
74 * sh_wdt_write_cnt - Write to Counter 73 * sh_wdt_write_cnt - Write to Counter
75 *
76 * @val: Value to write 74 * @val: Value to write
77 * 75 *
78 * Writes the given value @val to the lower byte of the timer counter. 76 * Writes the given value @val to the lower byte of the timer counter.
@@ -95,7 +93,6 @@ static inline __u8 sh_wdt_read_csr(void)
95 93
96/** 94/**
97 * sh_wdt_write_csr - Write to Control/Status Register 95 * sh_wdt_write_csr - Write to Control/Status Register
98 *
99 * @val: Value to write 96 * @val: Value to write
100 * 97 *
101 * Writes the given value @val to the lower byte of the control/status 98 * Writes the given value @val to the lower byte of the control/status