diff options
-rw-r--r-- | include/asm-generic/iomap.h | 32 | ||||
-rw-r--r-- | include/asm-generic/mutex-dec.h | 6 | ||||
-rw-r--r-- | include/asm-generic/mutex-xchg.h | 6 |
3 files changed, 22 insertions, 22 deletions
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index cde592fca441..67dc84cd1343 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h | |||
@@ -25,17 +25,17 @@ | |||
25 | * in the low address range. Architectures for which this is not | 25 | * in the low address range. Architectures for which this is not |
26 | * true can't use this generic implementation. | 26 | * true can't use this generic implementation. |
27 | */ | 27 | */ |
28 | extern unsigned int fastcall ioread8(void __iomem *); | 28 | extern unsigned int ioread8(void __iomem *); |
29 | extern unsigned int fastcall ioread16(void __iomem *); | 29 | extern unsigned int ioread16(void __iomem *); |
30 | extern unsigned int fastcall ioread16be(void __iomem *); | 30 | extern unsigned int ioread16be(void __iomem *); |
31 | extern unsigned int fastcall ioread32(void __iomem *); | 31 | extern unsigned int ioread32(void __iomem *); |
32 | extern unsigned int fastcall ioread32be(void __iomem *); | 32 | extern unsigned int ioread32be(void __iomem *); |
33 | 33 | ||
34 | extern void fastcall iowrite8(u8, void __iomem *); | 34 | extern void iowrite8(u8, void __iomem *); |
35 | extern void fastcall iowrite16(u16, void __iomem *); | 35 | extern void iowrite16(u16, void __iomem *); |
36 | extern void fastcall iowrite16be(u16, void __iomem *); | 36 | extern void iowrite16be(u16, void __iomem *); |
37 | extern void fastcall iowrite32(u32, void __iomem *); | 37 | extern void iowrite32(u32, void __iomem *); |
38 | extern void fastcall iowrite32be(u32, void __iomem *); | 38 | extern void iowrite32be(u32, void __iomem *); |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * "string" versions of the above. Note that they | 41 | * "string" versions of the above. Note that they |
@@ -48,13 +48,13 @@ extern void fastcall iowrite32be(u32, void __iomem *); | |||
48 | * memory across multiple ports, use "memcpy_toio()" | 48 | * memory across multiple ports, use "memcpy_toio()" |
49 | * and friends. | 49 | * and friends. |
50 | */ | 50 | */ |
51 | extern void fastcall ioread8_rep(void __iomem *port, void *buf, unsigned long count); | 51 | extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count); |
52 | extern void fastcall ioread16_rep(void __iomem *port, void *buf, unsigned long count); | 52 | extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count); |
53 | extern void fastcall ioread32_rep(void __iomem *port, void *buf, unsigned long count); | 53 | extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count); |
54 | 54 | ||
55 | extern void fastcall iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); | 55 | extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); |
56 | extern void fastcall iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); | 56 | extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); |
57 | extern void fastcall iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); | 57 | extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); |
58 | 58 | ||
59 | /* Create a virtual mapping cookie for an IO port range */ | 59 | /* Create a virtual mapping cookie for an IO port range */ |
60 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); | 60 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); |
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index 0134151656af..ed108be6743f 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * 1 even when the "1" assertion wasn't true. | 18 | * 1 even when the "1" assertion wasn't true. |
19 | */ | 19 | */ |
20 | static inline void | 20 | static inline void |
21 | __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | 21 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
22 | { | 22 | { |
23 | if (unlikely(atomic_dec_return(count) < 0)) | 23 | if (unlikely(atomic_dec_return(count) < 0)) |
24 | fail_fn(count); | 24 | fail_fn(count); |
@@ -37,7 +37,7 @@ __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | |||
37 | * or anything the slow path function returns. | 37 | * or anything the slow path function returns. |
38 | */ | 38 | */ |
39 | static inline int | 39 | static inline int |
40 | __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) | 40 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) |
41 | { | 41 | { |
42 | if (unlikely(atomic_dec_return(count) < 0)) | 42 | if (unlikely(atomic_dec_return(count) < 0)) |
43 | return fail_fn(count); | 43 | return fail_fn(count); |
@@ -61,7 +61,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t * | |||
61 | * to return 0 otherwise. | 61 | * to return 0 otherwise. |
62 | */ | 62 | */ |
63 | static inline void | 63 | static inline void |
64 | __mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | 64 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
65 | { | 65 | { |
66 | smp_mb(); | 66 | smp_mb(); |
67 | if (unlikely(atomic_inc_return(count) <= 0)) | 67 | if (unlikely(atomic_inc_return(count) <= 0)) |
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index 6a7e8c141b53..7b9cd2cbfebe 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h | |||
@@ -23,7 +23,7 @@ | |||
23 | * even when the "1" assertion wasn't true. | 23 | * even when the "1" assertion wasn't true. |
24 | */ | 24 | */ |
25 | static inline void | 25 | static inline void |
26 | __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | 26 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
27 | { | 27 | { |
28 | if (unlikely(atomic_xchg(count, 0) != 1)) | 28 | if (unlikely(atomic_xchg(count, 0) != 1)) |
29 | fail_fn(count); | 29 | fail_fn(count); |
@@ -42,7 +42,7 @@ __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | |||
42 | * or anything the slow path function returns | 42 | * or anything the slow path function returns |
43 | */ | 43 | */ |
44 | static inline int | 44 | static inline int |
45 | __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) | 45 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) |
46 | { | 46 | { |
47 | if (unlikely(atomic_xchg(count, 0) != 1)) | 47 | if (unlikely(atomic_xchg(count, 0) != 1)) |
48 | return fail_fn(count); | 48 | return fail_fn(count); |
@@ -65,7 +65,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t * | |||
65 | * to return 0 otherwise. | 65 | * to return 0 otherwise. |
66 | */ | 66 | */ |
67 | static inline void | 67 | static inline void |
68 | __mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | 68 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
69 | { | 69 | { |
70 | smp_mb(); | 70 | smp_mb(); |
71 | if (unlikely(atomic_xchg(count, 1) != 0)) | 71 | if (unlikely(atomic_xchg(count, 1) != 0)) |