diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic.h | 165 | ||||
-rw-r--r-- | include/asm-generic/io.h | 300 |
2 files changed, 465 insertions, 0 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h new file mode 100644 index 000000000000..c99c64dc5f3d --- /dev/null +++ b/include/asm-generic/atomic.h | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * Generic C implementation of atomic counter operations | ||
3 | * Originally implemented for MN10300. | ||
4 | * | ||
5 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
6 | * Written by David Howells (dhowells@redhat.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public Licence | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the Licence, or (at your option) any later version. | ||
12 | */ | ||
13 | #ifndef __ASM_GENERIC_ATOMIC_H | ||
14 | #define __ASM_GENERIC_ATOMIC_H | ||
15 | |||
16 | #ifdef CONFIG_SMP | ||
17 | #error not SMP safe | ||
18 | #endif | ||
19 | |||
20 | /* | ||
21 | * Atomic operations that C can't guarantee us. Useful for | ||
22 | * resource counting etc.. | ||
23 | */ | ||
24 | |||
25 | #define ATOMIC_INIT(i) { (i) } | ||
26 | |||
27 | #ifdef __KERNEL__ | ||
28 | |||
29 | /** | ||
30 | * atomic_read - read atomic variable | ||
31 | * @v: pointer of type atomic_t | ||
32 | * | ||
33 | * Atomically reads the value of @v. Note that the guaranteed | ||
34 | * useful range of an atomic_t is only 24 bits. | ||
35 | */ | ||
36 | #define atomic_read(v) ((v)->counter) | ||
37 | |||
38 | /** | ||
39 | * atomic_set - set atomic variable | ||
40 | * @v: pointer of type atomic_t | ||
41 | * @i: required value | ||
42 | * | ||
43 | * Atomically sets the value of @v to @i. Note that the guaranteed | ||
44 | * useful range of an atomic_t is only 24 bits. | ||
45 | */ | ||
46 | #define atomic_set(v, i) (((v)->counter) = (i)) | ||
47 | |||
48 | #include <asm/system.h> | ||
49 | |||
50 | /** | ||
51 | * atomic_add_return - add integer to atomic variable | ||
52 | * @i: integer value to add | ||
53 | * @v: pointer of type atomic_t | ||
54 | * | ||
55 | * Atomically adds @i to @v and returns the result | ||
56 | * Note that the guaranteed useful range of an atomic_t is only 24 bits. | ||
57 | */ | ||
58 | static inline int atomic_add_return(int i, atomic_t *v) | ||
59 | { | ||
60 | unsigned long flags; | ||
61 | int temp; | ||
62 | |||
63 | local_irq_save(flags); | ||
64 | temp = v->counter; | ||
65 | temp += i; | ||
66 | v->counter = temp; | ||
67 | local_irq_restore(flags); | ||
68 | |||
69 | return temp; | ||
70 | } | ||
71 | |||
72 | /** | ||
73 | * atomic_sub_return - subtract integer from atomic variable | ||
74 | * @i: integer value to subtract | ||
75 | * @v: pointer of type atomic_t | ||
76 | * | ||
77 | * Atomically subtracts @i from @v and returns the result | ||
78 | * Note that the guaranteed useful range of an atomic_t is only 24 bits. | ||
79 | */ | ||
80 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
81 | { | ||
82 | unsigned long flags; | ||
83 | int temp; | ||
84 | |||
85 | local_irq_save(flags); | ||
86 | temp = v->counter; | ||
87 | temp -= i; | ||
88 | v->counter = temp; | ||
89 | local_irq_restore(flags); | ||
90 | |||
91 | return temp; | ||
92 | } | ||
93 | |||
94 | static inline int atomic_add_negative(int i, atomic_t *v) | ||
95 | { | ||
96 | return atomic_add_return(i, v) < 0; | ||
97 | } | ||
98 | |||
99 | static inline void atomic_add(int i, atomic_t *v) | ||
100 | { | ||
101 | atomic_add_return(i, v); | ||
102 | } | ||
103 | |||
104 | static inline void atomic_sub(int i, atomic_t *v) | ||
105 | { | ||
106 | atomic_sub_return(i, v); | ||
107 | } | ||
108 | |||
109 | static inline void atomic_inc(atomic_t *v) | ||
110 | { | ||
111 | atomic_add_return(1, v); | ||
112 | } | ||
113 | |||
114 | static inline void atomic_dec(atomic_t *v) | ||
115 | { | ||
116 | atomic_sub_return(1, v); | ||
117 | } | ||
118 | |||
119 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
120 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
121 | |||
122 | #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) | ||
123 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | ||
124 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | ||
125 | |||
126 | #define atomic_add_unless(v, a, u) \ | ||
127 | ({ \ | ||
128 | int c, old; \ | ||
129 | c = atomic_read(v); \ | ||
130 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | ||
131 | c = old; \ | ||
132 | c != (u); \ | ||
133 | }) | ||
134 | |||
135 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
136 | |||
137 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | ||
138 | { | ||
139 | unsigned long flags; | ||
140 | |||
141 | mask = ~mask; | ||
142 | local_irq_save(flags); | ||
143 | *addr &= mask; | ||
144 | local_irq_restore(flags); | ||
145 | } | ||
146 | |||
147 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) | ||
148 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | ||
149 | |||
150 | #define cmpxchg_local(ptr, o, n) \ | ||
151 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | ||
152 | (unsigned long)(n), sizeof(*(ptr)))) | ||
153 | |||
154 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
155 | |||
156 | /* Assume that atomic operations are already serializing */ | ||
157 | #define smp_mb__before_atomic_dec() barrier() | ||
158 | #define smp_mb__after_atomic_dec() barrier() | ||
159 | #define smp_mb__before_atomic_inc() barrier() | ||
160 | #define smp_mb__after_atomic_inc() barrier() | ||
161 | |||
162 | #include <asm-generic/atomic-long.h> | ||
163 | |||
164 | #endif /* __KERNEL__ */ | ||
165 | #endif /* __ASM_GENERIC_ATOMIC_H */ | ||
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h new file mode 100644 index 000000000000..bcee6365dca0 --- /dev/null +++ b/include/asm-generic/io.h | |||
@@ -0,0 +1,300 @@ | |||
1 | /* Generic I/O port emulation, based on MN10300 code | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef __ASM_GENERIC_IO_H | ||
12 | #define __ASM_GENERIC_IO_H | ||
13 | |||
14 | #include <asm/page.h> /* I/O is all done through memory accesses */ | ||
15 | #include <asm/cacheflush.h> | ||
16 | #include <linux/types.h> | ||
17 | |||
18 | #ifdef CONFIG_GENERIC_IOMAP | ||
19 | #include <asm-generic/iomap.h> | ||
20 | #endif | ||
21 | |||
22 | #define mmiowb() do {} while (0) | ||
23 | |||
24 | /*****************************************************************************/ | ||
25 | /* | ||
26 | * readX/writeX() are used to access memory mapped devices. On some | ||
27 | * architectures the memory mapped IO stuff needs to be accessed | ||
28 | * differently. On the simple architectures, we just read/write the | ||
29 | * memory location directly. | ||
30 | */ | ||
31 | static inline u8 __raw_readb(const volatile void __iomem *addr) | ||
32 | { | ||
33 | return *(const volatile u8 __force *) addr; | ||
34 | } | ||
35 | |||
36 | static inline u16 __raw_readw(const volatile void __iomem *addr) | ||
37 | { | ||
38 | return *(const volatile u16 __force *) addr; | ||
39 | } | ||
40 | |||
41 | static inline u32 __raw_readl(const volatile void __iomem *addr) | ||
42 | { | ||
43 | return *(const volatile u32 __force *) addr; | ||
44 | } | ||
45 | |||
46 | #define readb __raw_readb | ||
47 | #define readw(addr) __le16_to_cpu(__raw_readw(addr)) | ||
48 | #define readl(addr) __le32_to_cpu(__raw_readl(addr)) | ||
49 | |||
50 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) | ||
51 | { | ||
52 | *(volatile u8 __force *) addr = b; | ||
53 | } | ||
54 | |||
55 | static inline void __raw_writew(u16 b, volatile void __iomem *addr) | ||
56 | { | ||
57 | *(volatile u16 __force *) addr = b; | ||
58 | } | ||
59 | |||
60 | static inline void __raw_writel(u32 b, volatile void __iomem *addr) | ||
61 | { | ||
62 | *(volatile u32 __force *) addr = b; | ||
63 | } | ||
64 | |||
65 | #define writeb __raw_writeb | ||
66 | #define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) | ||
67 | #define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr) | ||
68 | |||
69 | #ifdef CONFIG_64BIT | ||
70 | static inline u64 __raw_readq(const volatile void __iomem *addr) | ||
71 | { | ||
72 | return *(const volatile u64 __force *) addr; | ||
73 | } | ||
74 | #define readq(addr) __le64_to_cpu(__raw_readq(addr)) | ||
75 | |||
76 | static inline void __raw_writeq(u64 b, volatile void __iomem *addr) | ||
77 | { | ||
78 | *(volatile u64 __force *) addr = b; | ||
79 | } | ||
80 | #define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr) | ||
81 | #endif | ||
82 | |||
83 | /*****************************************************************************/ | ||
84 | /* | ||
85 | * traditional input/output functions | ||
86 | */ | ||
87 | |||
88 | static inline u8 inb(unsigned long addr) | ||
89 | { | ||
90 | return readb((volatile void __iomem *) addr); | ||
91 | } | ||
92 | |||
93 | static inline u16 inw(unsigned long addr) | ||
94 | { | ||
95 | return readw((volatile void __iomem *) addr); | ||
96 | } | ||
97 | |||
98 | static inline u32 inl(unsigned long addr) | ||
99 | { | ||
100 | return readl((volatile void __iomem *) addr); | ||
101 | } | ||
102 | |||
103 | static inline void outb(u8 b, unsigned long addr) | ||
104 | { | ||
105 | writeb(b, (volatile void __iomem *) addr); | ||
106 | } | ||
107 | |||
108 | static inline void outw(u16 b, unsigned long addr) | ||
109 | { | ||
110 | writew(b, (volatile void __iomem *) addr); | ||
111 | } | ||
112 | |||
113 | static inline void outl(u32 b, unsigned long addr) | ||
114 | { | ||
115 | writel(b, (volatile void __iomem *) addr); | ||
116 | } | ||
117 | |||
118 | #define inb_p(addr) inb(addr) | ||
119 | #define inw_p(addr) inw(addr) | ||
120 | #define inl_p(addr) inl(addr) | ||
121 | #define outb_p(x, addr) outb((x), (addr)) | ||
122 | #define outw_p(x, addr) outw((x), (addr)) | ||
123 | #define outl_p(x, addr) outl((x), (addr)) | ||
124 | |||
125 | static inline void insb(unsigned long addr, void *buffer, int count) | ||
126 | { | ||
127 | if (count) { | ||
128 | u8 *buf = buffer; | ||
129 | do { | ||
130 | u8 x = inb(addr); | ||
131 | *buf++ = x; | ||
132 | } while (--count); | ||
133 | } | ||
134 | } | ||
135 | |||
136 | static inline void insw(unsigned long addr, void *buffer, int count) | ||
137 | { | ||
138 | if (count) { | ||
139 | u16 *buf = buffer; | ||
140 | do { | ||
141 | u16 x = inw(addr); | ||
142 | *buf++ = x; | ||
143 | } while (--count); | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static inline void insl(unsigned long addr, void *buffer, int count) | ||
148 | { | ||
149 | if (count) { | ||
150 | u32 *buf = buffer; | ||
151 | do { | ||
152 | u32 x = inl(addr); | ||
153 | *buf++ = x; | ||
154 | } while (--count); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | static inline void outsb(unsigned long addr, const void *buffer, int count) | ||
159 | { | ||
160 | if (count) { | ||
161 | const u8 *buf = buffer; | ||
162 | do { | ||
163 | outb(*buf++, addr); | ||
164 | } while (--count); | ||
165 | } | ||
166 | } | ||
167 | |||
168 | static inline void outsw(unsigned long addr, const void *buffer, int count) | ||
169 | { | ||
170 | if (count) { | ||
171 | const u16 *buf = buffer; | ||
172 | do { | ||
173 | outw(*buf++, addr); | ||
174 | } while (--count); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | static inline void outsl(unsigned long addr, const void *buffer, int count) | ||
179 | { | ||
180 | if (count) { | ||
181 | const u32 *buf = buffer; | ||
182 | do { | ||
183 | outl(*buf++, addr); | ||
184 | } while (--count); | ||
185 | } | ||
186 | } | ||
187 | |||
188 | #ifndef CONFIG_GENERIC_IOMAP | ||
189 | #define ioread8(addr) readb(addr) | ||
190 | #define ioread16(addr) readw(addr) | ||
191 | #define ioread32(addr) readl(addr) | ||
192 | |||
193 | #define iowrite8(v, addr) writeb((v), (addr)) | ||
194 | #define iowrite16(v, addr) writew((v), (addr)) | ||
195 | #define iowrite32(v, addr) writel((v), (addr)) | ||
196 | |||
197 | #define ioread8_rep(p, dst, count) \ | ||
198 | insb((unsigned long) (p), (dst), (count)) | ||
199 | #define ioread16_rep(p, dst, count) \ | ||
200 | insw((unsigned long) (p), (dst), (count)) | ||
201 | #define ioread32_rep(p, dst, count) \ | ||
202 | insl((unsigned long) (p), (dst), (count)) | ||
203 | |||
204 | #define iowrite8_rep(p, src, count) \ | ||
205 | outsb((unsigned long) (p), (src), (count)) | ||
206 | #define iowrite16_rep(p, src, count) \ | ||
207 | outsw((unsigned long) (p), (src), (count)) | ||
208 | #define iowrite32_rep(p, src, count) \ | ||
209 | outsl((unsigned long) (p), (src), (count)) | ||
210 | #endif /* CONFIG_GENERIC_IOMAP */ | ||
211 | |||
212 | |||
213 | #define IO_SPACE_LIMIT 0xffffffff | ||
214 | |||
215 | #ifdef __KERNEL__ | ||
216 | |||
217 | #include <linux/vmalloc.h> | ||
218 | #define __io_virt(x) ((void __force *) (x)) | ||
219 | |||
220 | #ifndef CONFIG_GENERIC_IOMAP | ||
221 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ | ||
222 | struct pci_dev; | ||
223 | extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | ||
224 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) | ||
225 | { | ||
226 | } | ||
227 | #endif /* CONFIG_GENERIC_IOMAP */ | ||
228 | |||
229 | /* | ||
230 | * Change virtual addresses to physical addresses and vv. | ||
231 | * These are pretty trivial | ||
232 | */ | ||
233 | static inline unsigned long virt_to_phys(volatile void *address) | ||
234 | { | ||
235 | return __pa((unsigned long)address); | ||
236 | } | ||
237 | |||
238 | static inline void *phys_to_virt(unsigned long address) | ||
239 | { | ||
240 | return __va(address); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * Change "struct page" to physical address. | ||
245 | */ | ||
246 | static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) | ||
247 | { | ||
248 | return (void __iomem*) (unsigned long)offset; | ||
249 | } | ||
250 | |||
251 | #define __ioremap(offset, size, flags) ioremap(offset, size) | ||
252 | |||
253 | #ifndef ioremap_nocache | ||
254 | #define ioremap_nocache ioremap | ||
255 | #endif | ||
256 | |||
257 | #ifndef ioremap_wc | ||
258 | #define ioremap_wc ioremap_nocache | ||
259 | #endif | ||
260 | |||
261 | static inline void iounmap(void *addr) | ||
262 | { | ||
263 | } | ||
264 | |||
265 | #ifndef CONFIG_GENERIC_IOMAP | ||
266 | static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) | ||
267 | { | ||
268 | return (void __iomem *) port; | ||
269 | } | ||
270 | |||
271 | static inline void ioport_unmap(void __iomem *p) | ||
272 | { | ||
273 | } | ||
274 | #else /* CONFIG_GENERIC_IOMAP */ | ||
275 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); | ||
276 | extern void ioport_unmap(void __iomem *p); | ||
277 | #endif /* CONFIG_GENERIC_IOMAP */ | ||
278 | |||
279 | #define xlate_dev_kmem_ptr(p) p | ||
280 | #define xlate_dev_mem_ptr(p) ((void *) (p)) | ||
281 | |||
282 | #ifndef virt_to_bus | ||
283 | static inline unsigned long virt_to_bus(volatile void *address) | ||
284 | { | ||
285 | return ((unsigned long) address); | ||
286 | } | ||
287 | |||
288 | static inline void *bus_to_virt(unsigned long address) | ||
289 | { | ||
290 | return (void *) address; | ||
291 | } | ||
292 | #endif | ||
293 | |||
294 | #define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) | ||
295 | #define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c)) | ||
296 | #define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c)) | ||
297 | |||
298 | #endif /* __KERNEL__ */ | ||
299 | |||
300 | #endif /* __ASM_GENERIC_IO_H */ | ||